Dataset Viewer
source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
---|---|
scheduler_test.py
|
import datetime
from http.server import HTTPServer, BaseHTTPRequestHandler
import json
import pdb
import sqlite3
import threading
import time
import unittest
import myDevices.schedule as schedule
from myDevices.cloud.dbmanager import DbManager
from myDevices.cloud.scheduler import SchedulerEngine
from myDevices.utils.logger import debug, error, exception, info, setDebug, setInfo, warn
class TestClient():
def __init__(self):
info('TestClient init')
self.actions_ran = []
def RunAction(self, action):
info('RunAction: ' + action)
self.actions_ran.append(action)
return True
def SendNotification(self, notification):
info('SendNotification: ' + notification)
class TestHandler(BaseHTTPRequestHandler):
def handle_payload(self):
data = self.rfile.read(int(self.headers.get('Content-Length'))).decode('utf-8')
self.server.received.append(json.loads(data))
self.send_response(200)
self.end_headers()
def do_GET(self):
# This should match the payload in test_http_notification
self.server.received.append({'test':'GET request'})
def do_POST(self):
self.handle_payload()
def do_PUT(self):
self.handle_payload()
def do_DELETE(self):
# This should match the payload in test_http_notification
self.server.received.append({'test':'DELETE request'})
class SchedulerTest(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.test_client = TestClient()
self.test_engine = SchedulerEngine(self.test_client, 'test')
self.schedule_events = []
def tearDown(self):
self.remove_schedules()
self.test_engine.stop()
def add_schedules(self, schedule_events):
for event in schedule_events:
self.test_engine.add_scheduled_event(event, True)
self.schedule_events = self.schedule_events + schedule_events
def remove_schedules(self, engine=None):
scheduled_events = {event['id']:event for event in self.schedule_events if 'id' in event}
for event in scheduled_events.values():
self.assertTrue(self.test_engine.remove_scheduled_event(event))
def check_schedules_added(self, expected):
actual = self.test_engine.get_scheduled_events()
self.assertCountEqual(expected, actual)
def check_schedules_run(self, expected, skip_jobs=()):
print('Pause to allow scheduled events to execute')
expected_to_run = [action for event in expected if event['title'] not in skip_jobs for action in event['actions']]
for i in range(70):
time.sleep(1)
if len(expected_to_run) > 0 and len(expected_to_run) == len(self.test_client.actions_ran):
break
self.assertCountEqual(expected_to_run, self.test_client.actions_ran)
def test_missing_id(self):
start_date = datetime.datetime.strftime(datetime.datetime.utcnow() + datetime.timedelta(seconds=60), '%Y-%m-%dT%H:%M:%S.%fZ')
missing_id_event = {'title':'no_id_job', 'actions':['no_id_job_action'], 'config':{'type':'date', 'start_date':start_date}}
self.assertFalse(self.test_engine.add_scheduled_event(missing_id_event, True))
self.assertFalse(self.test_engine.get_scheduled_events())
def test_overwrite_job(self):
start_date = datetime.datetime.strftime(datetime.datetime.utcnow() + datetime.timedelta(seconds=60), '%Y-%m-%dT%H:%M:%S.%fZ')
schedule_events = [{'id':'overwrite_1', 'title':'overwritten_job', 'actions':['overwritten_job_action'], 'config':{'type':'date', 'start_date':start_date}},
{'id':'overwrite_1', 'title':'date_job_readd_same_id', 'actions':['date_job_readd_same_id_action'], 'config':{'type':'date', 'start_date':start_date}}]
self.add_schedules(schedule_events)
expected = [event for event in schedule_events if 'id' in event and event['title'] != 'overwritten_job']
self.check_schedules_added(expected)
def test_current_schedules(self):
start_date = datetime.datetime.strftime(datetime.datetime.utcnow() + datetime.timedelta(seconds=60), '%Y-%m-%dT%H:%M:%S.%fZ')
now = datetime.datetime.strftime(datetime.datetime.utcnow(), '%Y-%m-%dT%H:%M:%S.%fZ')
schedule_events = [{'id':'current_1', 'title':'date_job', 'actions':['date_job_action'], 'config':{'type':'date', 'start_date':start_date}},
{'id':'current_2', 'title':'daily_job', 'actions':['daily_job_action'], 'config': {'type':'interval', 'unit':'day', 'interval':1, 'start_date':start_date}},
{'id':'current_3', 'title':'every_3_days_job', 'actions':['every_3_days_job_action'], 'config':{'type':'interval', 'unit':'day', 'interval':3, 'start_date':start_date}},
{'id':'current_4', 'title':'now_date_job', 'actions':['now_date_job_action'], 'config':{'type':'date', 'start_date':now}},
{'id':'current_5', 'title':'weekly_job', 'actions':['weekly_job_action'], 'config':{'type':'interval', 'unit':'week', 'interval':1, 'start_date':start_date}},
{'id':'current_6', 'title':'bi-weekly_job', 'actions':['weekly_job_action'], 'config':{'type':'interval', 'unit':'week', 'interval':2, 'start_date':start_date}},
{'id':'current_7', 'title':'every_4_months_job', 'actions':['every_4_months_job_action'], 'config':{'type':'interval', 'unit':'month', 'interval':4, 'start_date':start_date}},
{'id':'current_8', 'title':'every_3_months_job', 'actions':['every_3_months_job_action'], 'config':{'type':'interval', 'unit':'month', 'interval':3, 'start_date':now}},
{'id':'current_9', 'title':'hourly_job', 'actions':['hourly_job_action'], 'config': {'type':'interval', 'unit':'hour', 'interval':1, 'start_date':start_date}}]
self.add_schedules(schedule_events)
self.check_schedules_added(schedule_events)
self.check_schedules_run(schedule_events)
def test_past_schedules(self):
next_minute = datetime.datetime.utcnow() + datetime.timedelta(seconds=60)
passed_date = datetime.datetime.strftime(datetime.datetime.utcnow() - datetime.timedelta(seconds=120), '%Y-%m-%dT%H:%M:%S.%fZ')
one_day_ago = datetime.datetime.strftime(next_minute - datetime.timedelta(days=1), '%Y-%m-%dT%H:%M:%S.%fZ')
one_week_ago = datetime.datetime.strftime(next_minute - datetime.timedelta(days=7), '%Y-%m-%dT%H:%M:%S.%fZ')
one_month_ago = datetime.datetime.strftime(schedule.month_delta(next_minute, -1), '%Y-%m-%dT%H:%M:%S.%fZ')
one_year_ago = next_minute.replace(year=next_minute.year-1)
one_year_ago = datetime.datetime.strftime(one_year_ago, '%Y-%m-%dT%H:%M:%S.%fZ')
schedule_events = [{'id':'past_1', 'title':'expired_date_job', 'actions':['expired_date_job_action'], 'config':{'type':'date', 'start_date':passed_date}},
{'id':'past_2', 'title':'daily_job_started_one_day_ago', 'actions':['daily_job_started_one_day_ago_action'], 'config':{'type':'interval', 'unit':'day', 'interval':1, 'start_date':one_day_ago}},
{'id':'past_3', 'title':'monthly_job_started_one_month_ago', 'actions':['monthly_job_started_one_month_ago_action'], 'config':{'type':'interval', 'unit':'month', 'interval':1, 'start_date':one_month_ago}},
{'id':'past_4', 'title':'yearly_job_started_one_year_ago', 'actions':['yearly_job_started_one_year_ago_action'], 'config':{'type':'interval', 'unit':'year', 'interval':1, 'start_date':one_year_ago}},
{'id':'past_5', 'title':'every_2_years_job_started_one_year_ago', 'actions':['every_2_years_job_started_one_year_ago_action'], 'config':{'type':'interval', 'unit':'year', 'interval':2, 'start_date':one_year_ago}},
{'id':'past_6', 'title':'weekly_job_started_one_week_ago', 'actions':['weekly_job_started_one_week_ago_action'], 'config':{'type':'interval', 'unit':'week', 'interval':1, 'start_date':one_week_ago}}]
self.add_schedules(schedule_events)
self.check_schedules_added(schedule_events)
self.check_schedules_run(schedule_events, ('expired_date_job', 'every_2_years_job_started_one_year_ago'))
def test_future_schedules(self):
one_day_from_now = datetime.datetime.strftime(datetime.datetime.utcnow() + datetime.timedelta(days=1), '%Y-%m-%dT%H:%M:%S.%fZ')
end_of_month = datetime.datetime.strftime(datetime.datetime(2015,1,31), '%Y-%m-%dT%H:%M:%S.%fZ')
future_month = datetime.datetime.strftime(datetime.datetime(2017,12,31), '%Y-%m-%dT%H:%M:%S.%fZ')
future_year = datetime.datetime.strftime(datetime.datetime(2017,1,1), '%Y-%m-%dT%H:%M:%S.%fZ')
schedule_events = [{'id':'future_1', 'title':'daily_job_starts_one_day_from_now', 'actions':['daily_job_starts_one_day_from_now_action'], 'config':{'type':'interval', 'unit':'day', 'interval':1, 'start_date':one_day_from_now}},
{'id':'future_2', 'title':'end_of_month_job', 'actions':['end_of_month_job_action'], 'config':{'type':'interval', 'unit':'month', 'interval':1, 'start_date':end_of_month}},
{'id':'future_3', 'title':'future_month_job', 'actions':['future_month_job_action'], 'config':{'type':'interval', 'unit':'month', 'interval':1, 'start_date':future_month}},
{'id':'future_4', 'title':'future_year_job', 'actions':['future_year_job_action'], 'config':{'type':'interval', 'unit':'month', 'interval':1, 'start_date':future_year}}]
self.add_schedules(schedule_events)
self.check_schedules_added(schedule_events)
skip_jobs = [event['title'] for event in schedule_events]
self.check_schedules_run(schedule_events, skip_jobs)
def test_reload(self):
start_date = datetime.datetime.strftime(datetime.datetime.utcnow() + datetime.timedelta(seconds=60), '%Y-%m-%dT%H:%M:%S.%fZ')
schedule_events = [{'id':'reload_1', 'title':'date_job', 'actions':['date_job_action'], 'config':{'type':'date', 'start_date':start_date}},
{'id':'reload_2', 'title':'daily_job', 'actions':['daily_job_action'], 'config':{'type':'interval', 'unit':'day', 'interval':1, 'start_date':start_date}}]
self.add_schedules(schedule_events)
self.check_schedules_added(schedule_events)
self.check_schedules_run(schedule_events)
self.test_engine.stop()
del self.test_engine
del self.test_client
self.test_client = TestClient()
self.test_engine = SchedulerEngine(self.test_client, 'test')
for event in schedule_events:
if 'last_run' in event:
del event['last_run']
self.check_schedules_added(schedule_events)
self.check_schedules_run(schedule_events, ('date_job', 'daily_job'))
def test_delayed_load(self):
self.test_engine.stop()
del self.test_engine
del self.test_client
now = datetime.datetime.utcnow()
if (now.second > 35):
print('Sleep until the minute rolls over')
time.sleep(60 - now.second)
now = datetime.datetime.strftime(datetime.datetime.utcnow(), '%Y-%m-%dT%H:%M:%S.%fZ')
self.schedule_events = [{'id':'delay_1', 'title':'date_job', 'actions':['date_job_action'], 'config':{'type':'date', 'start_date':now}},
{'id':'delay_2', 'title':'daily_job', 'actions':['daily_job_action'], 'config':{'type':'interval', 'unit':'day', 'interval':1, 'start_date':now}},
{'id':'delay_3', 'title':'weekly_job', 'actions':['weekly_job_action'], 'config':{'type':'interval', 'unit':'week', 'interval':1, 'start_date':now}},
{'id':'delay_4', 'title':'monthly_job', 'actions':['monthly_job_action'], 'config':{'type':'interval', 'unit':'month', 'interval':1, 'start_date':now}},
{'id':'delay_5', 'title':'yearly_job', 'actions':['yearly_job_action'], 'config':{'type':'interval', 'unit':'year', 'interval':1, 'start_date':now}}]
for event in self.schedule_events:
event_json = json.dumps(event)
try:
DbManager.Insert('scheduled_events', event['id'], event_json)
except sqlite3.IntegrityError as e:
DbManager.Update('scheduled_events', 'event = ?', event_json, 'id = ?', event['id'])
print('Pause before loading scheduler')
time.sleep(20)
print('Starting scheduler, time is {}'.format(datetime.datetime.utcnow()))
self.test_client = TestClient()
self.test_engine = SchedulerEngine(self.test_client, 'test')
self.check_schedules_run(self.schedule_events)
def test_concurrent_updates(self):
now = datetime.datetime.strftime(datetime.datetime.utcnow(), '%Y-%m-%dT%H:%M:%S.%fZ')
schedule_events = [{'id':'concurrent_1', 'title':'date_job', 'actions':['date_job_action'], 'config':{'type':'date', 'start_date':now}},
{'id':'concurrent_1', 'title':'date_job_updated', 'actions':['date_job_action'], 'config':{'type':'date', 'start_date':now}},
{'id':'concurrent_2', 'title':'daily_job', 'actions':['daily_job_action'], 'config':{'type':'interval', 'unit':'day', 'interval':1, 'start_date':now}},
{'id':'concurrent_2', 'title':'daily_job_updated', 'actions':['daily_job_action'], 'config':{'type':'interval', 'unit':'day', 'interval':1, 'start_date':now}},
{'id':'concurrent_3', 'title':'weekly_job', 'actions':['weekly_job_action'], 'config':{'type':'interval', 'unit':'week', 'interval':1, 'start_date':now}},
{'id':'concurrent_3', 'title':'weekly_job_updated', 'actions':['weekly_job_action'], 'config':{'type':'interval', 'unit':'week', 'interval':1, 'start_date':now}},
{'id':'concurrent_4', 'title':'monthly_job', 'actions':['monthly_job_action'], 'config':{'type':'interval', 'unit':'month', 'interval':1, 'start_date':now}},
{'id':'concurrent_4', 'title':'monthly_job_updated', 'actions':['monthly_job_action'], 'config':{'type':'interval', 'unit':'month', 'interval':1, 'start_date':now}},
{'id':'concurrent_5', 'title':'yearly_job', 'actions':['yearly_job_action'], 'config':{'type':'interval', 'unit':'year', 'interval':1, 'start_date':now}},
{'id':'concurrent_5', 'title':'yearly_job_updated', 'actions':['yearly_job_action'], 'config':{'type':'interval', 'unit':'year', 'interval':1, 'start_date':now}}]
for event in schedule_events:
threading.Thread(target=self.add_schedules, daemon=True, args=([event],)).start()
#Only half the schedule_events should run since ones with the same id will overwrite previously added ones. Since we don't know what order that will take
#we just make sure we only check that one of each action has run.
run_events = {event['id']:event for event in schedule_events if 'id' in event}
skip_jobs = [event['title'] for event in run_events.values()]
self.check_schedules_run(schedule_events, skip_jobs)
def test_update_schedules(self):
start_date = datetime.datetime.strftime(datetime.datetime.utcnow() + datetime.timedelta(seconds=60), '%Y-%m-%dT%H:%M:%S.%fZ')
schedule_events = [{'id':'update_1', 'title':'date_job', 'actions':['date_job_action'], 'config':{'type':'date', 'start_date':start_date}},
{'id':'update_2', 'title':'daily_job', 'actions':['daily_job_action'], 'config':{'type':'interval', 'unit':'day', 'interval':1, 'start_date':start_date}}]
self.add_schedules(schedule_events)
update_schedule_events = [{'id':'update_3', 'title':'date_job_full_update', 'actions':['date_job_full_update_action'], 'config':{'type':'date', 'start_date':start_date}},
{'id':'update_4', 'title':'daily_job_full_update', 'actions':['daily_job_full_update_action'], 'config':{'type':'interval', 'unit':'day', 'interval':1, 'start_date':start_date}}]
self.assertTrue(self.test_engine.update_scheduled_events(update_schedule_events))
self.schedule_events = update_schedule_events
self.check_schedules_run(update_schedule_events)
def start_http_server(self):
self.server = HTTPServer(('localhost', 8000), TestHandler)
self.server.received = []
self.server.serve_forever()
def test_http_notification(self):
threading.Thread(target=self.start_http_server, daemon=True).start()
now = datetime.datetime.strftime(datetime.datetime.utcnow(), '%Y-%m-%dT%H:%M:%S.%fZ')
schedule_events = [{'id':'http_1', 'title':'date_get_job', 'actions':['date_job_action'],
'http_push':{'url':'http://localhost:8000', 'method':'GET', 'headers':{'Content-Type':'application/json'}, 'payload':{'test': 'GET request'}},
'config':{'type':'date', 'start_date':now}},
{'id':'http_2', 'title':'date_post_job', 'actions':['date_job_action'],
'http_push':{'url':'http://localhost:8000', 'method':'POST', 'headers':{'Content-Type':'application/json'}, 'payload':{'test': 'POST request'}},
'config':{'type':'date', 'start_date':now}},
{'id':'http_3', 'title':'date_put_job', 'actions':['date_job_action'],
'http_push':{'url':'http://localhost:8000', 'method':'PUT', 'headers':{'Content-Type':'application/json'}, 'payload':{'test': 'PUT request'}},
'config':{'type':'date', 'start_date':now}},
{'id':'http_4', 'title':'date_delete_job', 'actions':['date_job_action'],
'http_push':{'url':'http://localhost:8000', 'method':'DELETE', 'headers':{'Content-Type':'application/json'}, 'payload':{'test': 'DELETE request'}},
'config':{'type':'date', 'start_date':now}}]
self.add_schedules(schedule_events)
self.check_schedules_added(schedule_events)
self.check_schedules_run(schedule_events)
self.assertEqual(4, len(self.server.received))
expected = [event['http_push']['payload'] for event in schedule_events]
self.assertCountEqual(expected, self.server.received)
if __name__ == '__main__':
# setDebug()
setInfo()
unittest.main()
# test_suite = unittest.TestSuite()
# # test_suite.addTest(SchedulerTest('test_current_schedules'))
# # test_suite.addTest(SchedulerTest('test_future_schedules'))
# test_suite.addTest(SchedulerTest('test_reload'))
# # test_suite.addTest(SchedulerTest('test_delayed_load'))
# # test_suite.addTest(SchedulerTest('test_http_notification'))
# unittest.TextTestRunner().run(test_suite)
|
backend.py
|
import time
import pandas as pd
import multiprocessing
from task_queue import TaskQueue
import autoshape.autoshape as autoshape
import fixed_ratio.fixed_ratio as fixed_ratio
import fixed_interval.fixed_interval as fixed_interval
from re import sub
# TODO: import other tasks
class Backend:
""" backend functions"""
# TODO: add other tasks to self.tasks
def __init__(self):
self.tasks = {
'autoshape': autoshape,
'fixed_ratio': fixed_ratio,
'fixed_interval': fixed_interval,
}
self.task_queue = TaskQueue(50)
@staticmethod
def read_params():
"""
Reads parameters from parameters.csv
:return: parameters in dictionary and dataframe format
"""
params_df = pd.read_csv('parameters.csv')
params_dict = params_df.to_dict(orient='list')
for key in params_dict.keys():
params_dict[key] = params_dict[key][0]
return params_dict, params_df
@staticmethod
def set_params(params_df: pd.DataFrame):
"""
Writes new parameters to parameters.csv
:param params_df: New parameters dataframe
:return: None
"""
params_df.to_csv('parameters.csv', index=None)
@staticmethod
def read_output(name):
"""
Reads the output of a specified task
:param name: name of task
:return: output in dictionary and dataframe formats
"""
output_df = pd.read_csv(name + '/output.csv')
output_dict = output_df.to_dict(orient='list')
for key in output_dict.keys():
output_dict[key] = output_dict[key][0]
return output_dict, output_df
def start_task(self, params=None):
"""
Starts task specified by parameters
"""
if not params:
params = self.read_params()[0]
name = params['schedule']
length = params['session_length']
proc = multiprocessing.Process(target=self.task_process, name='task_process', args=(name, params,))
proc.start()
print(f'Task timer started: {length} seconds remaining.')
time.sleep(length)
if proc.is_alive():
print(f'Terminating task. {length} seconds elapsed.')
proc.terminate()
proc.join()
print('Task completed.')
return True
def task_process(self, name: str, params: pd.DataFrame):
"""
Helper method for start_task
"""
self.tasks[name].main(params)
def enqueue_task(self):
"""
enqueues task specified by parameters
"""
params = self.read_params()[0]
if self.task_queue.enqueue(params):
return True
else:
return False
def start_queue(self):
"""
Starts task Queue
"""
for i in range(self.task_queue.size):
self.start_task(params=self.task_queue.dequeue())
return True
def get_queue_size(self):
"""
return: current size of task queue
"""
return self.task_queue.size
@staticmethod
def calc_func(func: str, x: int):
# regex to clean up math expression
func = sub(r'(\d+|x)(x)', r'\1*\2', func)
# returns evaluated python result
return int(eval(func))
|
batch_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the currently experimental in-graph batch ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from tensorflow.contrib.batching.python.ops import batch_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import test
def delayed_plus1(x):
"""Sleeps for 100ms then returns x+1."""
time.sleep(0.1)
return x + 1
class BatchOpsTest(test.TestCase):
"""Tests for batch_ops.{un,}batch."""
def testBasicBatch(self):
"""Tests that a single batched tensor executes together and only once."""
with self.test_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
batched, index, _ = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=2,
batch_timeout_micros=36000000, grad_timeout_micros=0,
batching_queue="")
thread_results = []
def worker():
thread_results.extend(
sess.run([batched, index], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([batched, index], feed_dict={inp: [2]})
worker_thread.join()
# At this point either the thread or the main did the batch and the other
# should have empty results.
if list(thread_results[0][0]):
batch_t = thread_results[0][0]
index_t = thread_results[1]
empty_b = main_results[0][0]
empty_m = main_results[1]
else:
batch_t = main_results[0][0]
index_t = main_results[1]
empty_b = thread_results[0][0]
empty_m = thread_results[1]
# Check that both the inputs made it out exactly once.
self.assertAllEqual(sorted(batch_t), (1, 2))
# Check that we get 2 rows in the index tensor.
self.assertEqual(len(index_t), 2)
# Check that the other ones are empty.
self.assertEqual(len(empty_b), 0)
self.assertEqual(len(empty_m), 0)
def testBatchWithPadding(self):
"""Test that batching with padding up to an allowed batch size works."""
with self.test_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[2])
batched, index, _ = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=10,
batch_timeout_micros=100000, # 100ms
allowed_batch_sizes=[5, 10],
grad_timeout_micros=0, batching_queue="")
thread_results = []
def worker():
thread_results.extend(
sess.run([batched, index], feed_dict={inp: [1, 3]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([batched, index], feed_dict={inp: [2, 4]})
worker_thread.join()
# At this point either the thread or the main did the batch and the other
# should have empty results.
if list(thread_results[0][0]):
batch_t = thread_results[0][0]
else:
batch_t = main_results[0][0]
# Check that the batch tensor incorporates the padding.
self.assertEqual(len(batch_t), 5)
def testMultipleBatch(self):
"""Tests that multiple batched tensors execute together."""
with self.test_session() as sess:
inp0 = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
inp1 = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
batched, _, _ = batch_ops.batch(
[inp0, inp1],
num_batch_threads=1,
max_batch_size=2,
batch_timeout_micros=36000000,
grad_timeout_micros=0,
batching_queue="")
thread_results = []
def worker():
thread_results.extend(
sess.run([batched], feed_dict={inp0: [1],
inp1: [2]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([batched], feed_dict={inp0: [2], inp1: [3]})
worker_thread.join()
# At this point either the thread or the main did the batch and the other
# should have empty results.
if list(thread_results[0][0]):
batch_t = thread_results[0]
empty_t = main_results[0]
else:
batch_t = main_results[0]
empty_t = thread_results[0]
# Assert that the tensors were batched together.
self.assertAllEqual(sorted(batch_t[0]), [1, 2])
self.assertAllEqual(sorted(batch_t[1]), [2, 3])
self.assertAllEqual(empty_t[0], [])
self.assertAllEqual(empty_t[1], [])
def testIllegalBatchDifferentDim0Sizes(self):
"""Tests illegally feeding tensors with different dim0 sizes."""
with self.test_session() as sess:
inp0 = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
inp1 = array_ops.placeholder(dtype=dtypes.int32, shape=[2])
batched, index, _ = batch_ops.batch(
[inp0, inp1], num_batch_threads=1, max_batch_size=2,
batch_timeout_micros=0, grad_timeout_micros=0, batching_queue="")
with self.assertRaises(Exception) as raised:
_ = sess.run([batched, index], feed_dict={inp0: [0], inp1: [1, 2]})
self.assertGreater(
raised.exception.message.find("must have equal 0th-dimension size"),
0)
def testBasicUnbatch(self):
"""Tests that batch and unbatch work together."""
with self.test_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
batched, index, id_t = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=10,
batch_timeout_micros=100000, # 100ms
allowed_batch_sizes=[3, 10],
grad_timeout_micros=0, batching_queue="")
computation = batched[0] + 1
result = batch_ops.unbatch(computation, index, id_t,
timeout_micros=1000000, shared_name="unbatch")
thread_results = []
def worker():
thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([result], feed_dict={inp: [2]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [3])
def testBasicUnbatchDecorated(self):
"""Tests that the batch_function decorator works."""
with self.test_session() as sess:
@batch_ops.batch_function(1, 10, 100000)
def computation(in_t):
return in_t + 1
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
result = computation(inp)
thread_results = []
def worker():
thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([result], feed_dict={inp: [2]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [3])
def testUnbatchTimeout(self):
"""Tests that the unbatch timeout works."""
with self.test_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
batched, index, id_t = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=2,
batch_timeout_micros=36000000, grad_timeout_micros=0,
batching_queue="")
computation = batched[0] + 1
timeout_micros = 10
result = batch_ops.unbatch(computation, index, id_t, timeout_micros,
shared_name="shared_unbatch")
# Set up a parallel pipeline that delays the computation, but uses the
# same unbatch resource object as the non-delayed pipeline.
computation_delayed = script_ops.py_func(delayed_plus1,
[batched[0]],
dtypes.int32)
result_delayed = batch_ops.unbatch(computation_delayed,
index,
id_t,
timeout_micros,
shared_name="shared_unbatch")
thread_results = []
def worker():
# A first call using the non-delayed pipeline. The batcher will send an
# empty tensor along the non-delayed pipeline.
thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
time.sleep(0.1) # Ensure the thread's call starts first.
# A second call using the delayed pipeline. The batcher will send the
# batched tensor along the delayed pipeline, thus delaying the arrival of
# the batched tensor at the unbatch op, relative to the empty tensor.
#
# TODO(olston, apassos): Avoid relying on the order in which the batch op
# emits the empty tensor versus the batched one.
_ = sess.run([result_delayed], feed_dict={inp: [2]})
worker_thread.join()
# The thread's call should hit the timeout, and thus get 0 results.
self.assertEqual(len(thread_results), 0)
def testUnbatchGrad(self):
"""Tests that batch and unbatch are differentiable."""
with self.test_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
batched, index, id_t = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=2,
batch_timeout_micros=36000000, grad_timeout_micros=1000000,
batching_queue="")
computation = batched[0] * batched[0]
result = batch_ops.unbatch(computation, index, id_t,
timeout_micros=1000000, shared_name="unbatch")
grad = gradients_impl.gradients(result, inp)
thread_results = []
def worker():
thread_results.extend(sess.run([grad], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([grad], feed_dict={inp: [2]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [4])
if __name__ == "__main__":
test.main()
|
DataBaseModule.py
|
import tushare as ts
#import os
import pandas as pd
#import time
#import threading
import datetime as dtime
class DataBase:
updateAllShareHistoryDataSum = 0
updateAllShareHistoryDataCount = 0
indexNameMap = {'hs300':399300}
store = None
"""===============================公有函数========================="""
"""
获得沪深300成份股的列表,以权重降序排序
"""
def get_hs300_sharelist(self):
return self.store["hs300_share_list"]
"""
更新所有的股票信息
"""
def update_all_share_history_data(self):
self.__log("update all share history data");
self.__update_share_list_from_internet()
data = self.get_share_list_form_local()
#剔除所有未上市股票
data = data[data.timeToMarket != 0]
dataCodes = list(data.index)
for key in self.indexNameMap:
#print(self.indexNameMap[key])
dataCodes.append(self.indexNameMap[key])
#print(dataCodes)
self.updateAllShareHistoryDataSum = len(dataCodes)
#data1 = dataCodes[0:int(self.updateAllShareHistoryDataSum/2)]
#data2 = dataCodes[int(self.updateAllShareHistoryDataSum/2):]
#threading.Thread(target=self.__update_share_history_data_by_codes,args=([data1])).start()
#threading.Thread(target=self.__update_share_history_data_by_codes,args=([data2])).start()
self.__update_share_history_data_by_codes(dataCodes)
"""
获取个股信息,如果有本地数据,先拿本地数据,如果没有本地数据,先更新,再拿本地数据
"""
def get_share_history_data(self,code,startDate=None,endDate=None):
data = self.store.select('share_'+str(code))
if not isinstance(data,pd.DataFrame):
return None
#data = data.iloc[::-1]
data.index = data.date
if startDate is None and endDate is None:
return data
elif startDate is not None and endDate is None:
return data[startDate:startDate]
else:
return data[startDate:endDate]
"""
if startDate is None and endDate is None:
data = self.store.select('share_'+str(code))
data = data.iloc[::-1]
data.index = data.date
if not isinstance(data,pd.DataFrame):
return None
else:
return data
else:
topData = self.store.select('share_'+str(code),start=0,stop=1)
print(topData)
"""
def debug_get_share_history_data(self,code,startDate=None,endDate=None):
try:
if startDate is None and endDate is None:
data = self.store.select('share_'+str(code))
data = data.iloc[::-1]
data.index = data.date
if not isinstance(data,pd.DataFrame):
return None
else:
return data
else:
topData = self.store.select('share_'+str(code),start=0,stop=1)
print(topData)
"""
if topData.empty:
print("start data is emmpty code="+str(code))
return None
topDateStr = topData.index[0]
topDateObj = dtime.datetime.strptime(topDateStr,"%Y-%m-%d")
startDateObj = dtime.datetime.strptime(startDate,"%Y-%m-%d")
del_time = topDateObj - startDateObj
if del_time.days <=0:
start = 0
else:
start = int(del_time.days*0.5)
if endDate is None:
stop = start+10
else:
endDateObj = dtime.datetime.strptime(endDate,"%Y-%m-%d")
del_time2 = startDateObj - endDateObj
stop = int(del_time2.days+3)+start
#print(start)
#print(stop)
data = self.store.select('share_'+str(code),start=start,stop=stop)
if not isinstance(data,pd.DataFrame):
print("data is not dataframe code="+str(code)+" ["+str(start)+" "+str(stop)+"]")
return None
else:
#print(data)
if endDate is None:
ret_data = data.loc[startDate:startDate]
else:
ret_data = data.loc[startDate:endDate]
if ret_data is None or ret_data.empty:
print("DataBase is None or empty"+" code="+str(code)+" ["+startDate+" "+endDate+"]")
return ret_data
"""
except Exception as e:
print(" get_share_history_data except code="+str(code)+" e="+str(e))
return None
"""
获取沪深300指数信息
"""
def get_hs300_data(self):
return self.get_share_history_data(self.indexNameMap['hs300'])
"""
更新个股信息
"""
def update_share_history_data(self,codestr):
code = self.__formtInputCode(codestr)
self.__log("update data from internet code="+code)
data = ts.get_k_data(code)
if isinstance(data,pd.DataFrame) and not data.empty:
self.store['share_'+code] = data
else:
self.store['share_'+code] = pd.DataFrame()
self.__log("update data from internet code="+code+" but not get data")
"""
得到股票名称,代码,等信息列表
"""
def get_share_list_form_local(self):
return self.store['all_share_list']
def update_all_report_data(self):
timeset = self.__get_Q_list()
for time in timeset:
self.__update_report_data(time[0],time[1])
def update_all(self):
self.__update_hs300_sharelist()
self.update_all_share_history_data()
self.store.close()
"""==================================私有函数============================"""
def __date_to_q(self,date):
tmp = date.split('-')
q = 1
if tmp[1] in ['01','02','03']:
q = 1
elif tmp[1] in ['04','05','06']:
q = 2
elif tmp[1] in ['07','08','09']:
q = 3
else:
q = 4
return (int(tmp[0]),q)
def __get_Q_list(self):
now = dtime.datetime.now()
deltalist = [dtime.timedelta(days=-x*30) for x in range(36)]
n_days = [ now + delta for delta in deltalist]
time_list = [ x.strftime('%Y-%m') for x in n_days ]
q_list = [self.__date_to_q(x) for x in time_list]
return set(q_list)
def __update_report_data(self,year,index):
try:
data1 = ts.get_report_data(year,index)
data2 = ts.get_profit_data(year,index)
data3 = ts.get_operation_data(year,index)
data4 = ts.get_growth_data(year,index)
data5 = ts.get_debtpaying_data(year,index)
data6 = ts.get_cashflow_data(year,index)
self.store['report_data_'+str(year)+'_'+str(index)] = data1
self.store['profit_data_'+str(year)+'_'+str(index)] = data2
self.store['operation_data_'+str(year)+'_'+str(index)] = data3
self.store['growth_data_'+str(year)+'_'+str(index)] = data4
self.store['debtpaying_data_'+str(year)+'_'+str(index)] = data5
self.store['cashflow_data_'+str(year)+'_'+str(index)] = data6
except:
print("xxxx")
def __update_hs300_sharelist(self):
print("更新hs300数据")
data = ts.get_hs300s()
if not isinstance(data,pd.DataFrame):
data = pd.DataFrame()
self.store['hs300_share_list'] = data
"""
获取codes列表指明的股票数据
"""
def __update_share_history_data_by_codes(self,codes):
for code in codes:
self.updateAllShareHistoryDataCount += 1
self.update_share_history_data(code)
self.__log("finish "+str(self.updateAllShareHistoryDataCount)+"/"+str(self.updateAllShareHistoryDataSum))
def __update_share_list_from_internet(self):
self.__log("updata share list form internet")
data = ts.get_stock_basics()
self.store['all_share_list'] = data
def __formtInputCode(self,code):
codestr = str(code)
dlen = 6-len(codestr)
while(dlen > 0):
codestr = '0'+codestr
dlen -= 1
return codestr
def __log(self,str):
print("DataBase:"+str)
def __init__(self):
self.__log("---init---")
self.store = pd.HDFStore("hdf_store.hd5")
self.__log('---init end---')
#print(self.store)
def __del__(self):
self.__log("---del---")
if self.store.is_open:
self.__log("close store")
self.store.close()
if __name__ == "__main__":
dataBase = DataBase()
dataBase.update_all_report_data()
#dataBase.update_all()
#print(dataBase.get_hs300_sharelist())
#dataBase._makeLocalShareDataPath(100)
#data = dataBase.get_share_history_data(300024,'2017-07-03','2017-06-03')
#dataBase.update_all_share_history_data()
#data = ts.get_hist_data('399300')
#print(data)
#print(dataBase.get_hs300_info())
|
resources.py
|
from datetime import datetime, timedelta
import time
import random
import subprocess
import os
import os.path
import time
from collections import defaultdict
import json
import logging
import numbers
import yaml
from django.db import models
from django.contrib.auth.models import AbstractUser
import pexpect, getpass
import qmpy
from qmpy.db.custom import DictField
import queue as queue
import threading
logger = logging.getLogger(__name__)
def is_yes(string):
char = string.lower()[0]
if char == 'n':
return False
if char == 'y':
return True
return None
class AllocationError(Exception):
"""Problem with the allocation"""
class SubmissionError(Exception):
"""Failed to submit a job"""
class User(AbstractUser):
"""
User model - stores an oqmd users information.
Relationships:
| :mod:`~qmpy.Account` via account_set
| :mod:`~qmpy.Allocation` via allocation_set
| :mod:`~qmpy.Project` via project_set
Attributes:
| id
| username
| first_name
| last_name
| date_joined
| is_active
| is_staff
| is_superuser
| last_login
| email
"""
class Meta:
app_label = 'qmpy'
db_table = 'users'
@property
def running(self):
return queue.Job.objects.filter(account__user=self, state=1)
@classmethod
def get(cls, name):
try:
return cls.objects.get(username=name)
except cls.DoesNotExist:
return cls(username=name)
@staticmethod
def create():
username = raw_input("Username: ")
email = raw_input("E-mail address: ")
user, new = User.objects.get_or_create(username=username)
if not new:
print 'User by that name exists!'
print 'Please try a new name, or exit with Ctrl-x'
return User.create()
print 'Okay, user created!'
user.save()
user.create_accounts()
#user.assign_allocations()
return user
def create_accounts(self):
msg = 'Would you like to create cluster accounts for this user?'
ans = is_yes(raw_input(msg+' [y/n]: '))
if ans is False:
return
elif ans is None:
print "I didn't understand that command."
return self.create_accounts()
msg = 'Does user %s have an account on %s? [y/n]: '
msg2 = 'What is %s\'s username on %s?: '
msg3 = 'On %s@%s where should calculations be run? (absolute path): '
known = self.account_set.values_list('host__name', flat=True)
for host in Host.objects.exclude(name__in=known):
ans = raw_input(msg % (self.username, host.name))
ans = is_yes(ans)
if ans is False:
continue
uname = raw_input(msg2 % (self.username, host.name))
acct, new = Account.objects.get_or_create(user=self, host=host)
if not new:
print 'Account exists!'
continue
path = raw_input(msg3 % (self.username, host.name))
acct.run_path = path
acct.username = uname.strip()
acct.save()
acct.create_passwordless_ssh()
class Host(models.Model):
"""
Host model - stores all host information for a cluster.
Relationships:
| account
| allocation
Attributes:
| name: Primary key.
| binaries: dict of label:path pairs for vasp binaries.
| check_queue: Path to showq command
| checked_time: datetime object for the last time the queue was
| checked.
| hostname: Full host name.
| ip_address: Full ip address.
| nodes: Total number of nodes.
| ppn: Number of processors per node.
| running: dict of PBS_ID:state pairs.
| sub_script: Path to qsub command
| sub_text: Path to queue file template.
| utilization: Number of active cores (based on showq).
| walltime: Maximum walltime on the machine.
| state: State code. 1=Up, 0=Full (auto-resets to 1 when jobs are
| collected), -1=Down.
"""
name = models.CharField(max_length=63, primary_key=True)
ip_address = models.IPAddressField(null=True)
hostname = models.CharField(max_length=255)
binaries = DictField()
ppn = models.IntegerField(default=8)
nodes = models.IntegerField(default=30)
walltime = models.IntegerField(default=3600*24)
sub_script = models.CharField(max_length=120)
sub_text = models.TextField(default='/usr/local/bin/qsub')
check_queue = models.CharField(max_length=180,
default='/usr/local/maui/bin/showq')
checked_time = models.DateTimeField(default=datetime.min)
running = DictField()
utilization = models.IntegerField(default=0)
state = models.IntegerField(default=1)
class Meta:
app_label = 'qmpy'
db_table = 'hosts'
def __str__(self):
return self.name
@staticmethod
def create():
"""
Classmethod to create a Host model. Script will ask you questions about
the host to add, and will return the created Host.
"""
host = {}
host['name'] = raw_input('Hostname:')
if Host.objects.filter(name=host['name']).exists():
print 'Host by that name already exists!'
exit(-1)
host['ip_address'] = raw_input('IP Address:')
if Host.objects.filter(ip_address=host['ip_address']).exists():
print 'Host at that address already exists!'
exit(-1)
host['ppn'] = raw_input('Processors per node:')
host['nodes'] = raw_input('Max nodes to run on:')
host['sub_script'] = raw_input('Command to submit a script '
'(e.g. /usr/local/bin/qsub):')
host['check_queue'] = raw_input('Command for showq (e.g.'
'/usr/local/maui/bin/showq):')
host['sub_text'] = raw_input('Path to qfile template:')
h = Host(**host)
h.save()
@classmethod
def get(cls, name):
try:
return cls.objects.get(name=name)
except cls.DoesNotExist:
return cls(name=name)
@property
def accounts(self):
return list(self.account_set.all())
@property
def jobs(self):
jobs = []
for acct in self.accounts:
jobs += list(acct.job_set.filter(state=1))
return jobs
@property
def active(self):
if self.state < 1:
return False
elif self.utilization > 5*self.nodes*self.ppn:
return False
else:
return True
@property
def percent_utilization(self):
return 100. * float(self.utilization) / (self.nodes*self.ppn)
def get_utilization(self):
util = 0
for acct in self.account_set.all():
for job in acct.job_set.filter(state=1):
util += job.ncpus
self.utilization = util
return util
def get_project(self):
"""
Out of the active projects able to run on this host,
select one at random
Output:
Project, Active project able to run on this host
"""
proj = Project.objects.filter(allocations__host=self, state=1)
proj = proj.filter(task__state=0)
if proj.exists():
return random.choice(list(proj.distinct()))
def get_tasks(self, project=None):
tasks = queue.Task.objects.filter(state=0)
if project is None:
project = self.get_project()
if project is None:
return
tasks = tasks.filter(project_set=project)
tasks = tasks.filter(project_set__allocations__host=self)
tasks = tasks.filter(project_set__users__account__host=self)
return tasks.order_by('priority', 'id')
@property
def qfile(self):
return open(self.sub_text).read()
def get_binary(self, key):
return self.binaries[key]
def _try_login(self, timeout=5.0):
def _login():
self._tmp_acct = Allocation.get('b1004').get_account()
self._tmp_ssh = 'ssh {user}@{host} "{cmd}"'.format(
user=self._tmp_acct.user.username,
host=self._tmp_acct.host.ip_address,
cmd='whoami')
self._tmp_proc = subprocess.Popen(self._tmp_ssh, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = self._tmp_proc.communicate()
if stdout.strip() == self._tmp_acct.user.username:
print "quest is up"
self._tmp_thread = threading.Thread(target=_login)
self._tmp_thread.start()
self._tmp_thread.join(timeout)
if self._tmp_thread.is_alive():
print "unable login on quest"
self._tmp_proc.terminate()
self._tmp_thread.join()
return self._tmp_proc.returncode
def check_host(self):
"""Pings the host to see if it is online. Returns False if it is
offline."""
ret = subprocess.call("ping -c 1 -w 1 %s" % self.ip_address,
shell=True,
stdout=open('/dev/null', 'w'),
stderr=subprocess.STDOUT)
if ret == 0:
self.state = 1
self.save()
write_resources()
return True
else:
"""Sometimes quest refuses to respond to ping requests. So, try
logging into it using an(y) account. Trying executing a command and
see if it is successful."""
if self.name == 'quest':
if self._try_login() == 0:
self.state = 1
self.save()
write_resources()
return True
self.state = -2
self.save()
return False
@property
def running_now(self):
if not self.state == 1:
return {}
if datetime.now() + timedelta(seconds=-60) > self.checked_time:
self.check_running()
return self.running
def check_running(self):
"""
Uses the hosts data and one of the associated accounts to check the PBS
queue on the Host. If it has been checked in the last 2 minutes, it
will return the previously returned result.
"""
self.checked_time = datetime.now()
if not self.state == 1:
self.running = {}
self.save()
return
account = random.choice(self.accounts)
raw_data = account.execute(self.check_queue)
running = {}
if not raw_data:
return
for line in raw_data.split('\n'):
if 'Active Jobs' in line:
continue
line = line.split()
if len(line) != 9:
continue
try:
# < Mohan
if 'Moab' in line[0]:
qid = int(line[0].strip().split('.')[1])
else:
qid = int(line[0])
running[qid] = {
'user':line[1],
'state':line[2],
'proc':int(line[3])}
# Mohan >
except:
pass
self.running = running
self.save()
def get_running(self):
if self.running is not None:
return self.running
else:
return {}
def activate(self):
"""
Allow jobs to be run on this system. Remember to save() to enact change
"""
self.state = 1
def deactivate(self):
"""
Prevent new jobs from being started on this system.
Remember to save() changes
"""
self.state = -1
@property
def utilization_by_project(self):
utilization = defaultdict(int)
for job in self.jobs:
projects = job.task.project_set.all()
for p in projects:
utilization[str(p.name)] += float(job.ncpus)/len(projects)
if self.ppn*self.nodes > sum(utilization.values()):
utilization["Idle"] = self.ppn*self.nodes - sum(utilization.values())
return utilization
@property
def utilization_json(self):
series = []
for k, v in self.utilization_by_project.items():
series.append({'data':v, 'label':k})
return json.dumps(series)
@property
def ncpus(self):
return self.ppn * self.nodes
#===============================================================================#
class Account(models.Model):
"""
Base class for a `User` account on a `Host`.
Attributes:
| host
| id
| job
| run_path
| state
| user
| username
"""
user = models.ForeignKey(User)
host = models.ForeignKey(Host)
username = models.CharField(max_length=255)
run_path = models.TextField()
state = models.IntegerField(default=1)
class Meta:
app_label = 'qmpy'
db_table = 'accounts'
def __str__(self):
return '{user}@{host}'.format(user=self.user.username,
host=self.host.name)
@classmethod
def get(cls, user, host):
try:
return Account.objects.get(user=user, host=host)
except cls.DoesNotExist:
return Account(host=host, user=user)
def create_passwordless_ssh(self, key='id_dsa', origin=None):
msg = 'password for {user}@{host}: '
if origin is None:
origin = '/home/{user}/.ssh'.format(user=getpass.getuser())
pas = getpass.getpass(msg.format(user=self.username, host=self.host.name))
msg = '/usr/bin/ssh {user}@{host} touch'
msg += ' /home/{user}/.ssh/authorized_keys'
p = pexpect.spawn(msg.format(
origin=origin, key=key,
user=self.username, host=self.host.ip_address))
p.expect('assword:')
p.sendline(pas)
time.sleep(2)
p.close()
msg = '/usr/bin/scp {origin}/{key} {user}@{host}:/home/{user}/.ssh/'
p = pexpect.spawn(msg.format(
origin=origin, key=key,
user=self.username, host=self.host.ip_address))
p.expect('assword:')
p.sendline(pas)
time.sleep(2)
p.close()
msg = '/usr/bin/ssh {user}@{host}'
msg += ' chmod 600 /home/{user}/.ssh/authorized_keys'
p = pexpect.spawn(msg.format(
origin=origin, key=key,
user=self.username, host=self.host.ip_address))
p.expect('assword:')
p.sendline(pas)
time.sleep(2)
p.close()
msg = '/usr/bin/ssh-copy-id -i {origin}/{key} {user}@{host}'
p = pexpect.spawn(msg.format(
origin=origin, key=key,
user=self.username, host=self.host.ip_address))
p.expect('assword:')
p.sendline(pas)
time.sleep(2)
p.close()
print 'Great! Lets test it real quick...'
out = self.execute('whoami')
if out == '%s\n' % self.username:
print 'Awesome! It worked!'
else:
print 'Something appears to be wrong, talk to Scott...'
@property
def active(self):
if self.state < 1:
return False
elif not self.host.active:
return False
else:
return True
def submit(self, path=None, run_path=None, qfile=None):
self.execute('mkdir %s' % run_path, ignore_output=True)
self.copy(folder=path, file='*', destination=run_path)
cmd = 'command cd {path} && {sub} {qfile}'.format(
path=run_path,
sub=self.host.sub_script,
qfile=qfile)
stdout = self.execute(cmd)
# < Mohan
tmp = stdout.strip().split()[0]
if 'Moab' in tmp:
jid = int(tmp.split('.')[1])
else:
jid = int(tmp.split('.')[0])
# Mohan >
return jid
def execute(self, command='exit 0', ignore_output=False):
ssh = 'ssh {user}@{host} "{cmd}"'.format(
user=self.username,
host=self.host.ip_address,
cmd=command)
logging.debug(ssh)
call = subprocess.Popen(ssh, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout,stderr = call.communicate()
logging.debug('stdout: %s', stdout)
logging.debug('stderr: %s', stderr)
if stderr and not ignore_output:
logging.warn('WARNING: %s', stderr)
return stdout
def copy(self, destination=None, to=None, # where to send the stuff
fr=None, file=None, folder=None, # what to send
clear_dest_dir=False, move=False): # some conditions on sending it
if destination is None:
destination = self.run_path
if to is None:
to = self
if fr is None:
if to == 'local':
fr = self
else:
fr = 'local'
assert (isinstance(to, Account) or to == 'local')
assert (isinstance(fr, Account) or fr == 'local')
assert ( not (file is None and folder is None) )
send_dir = False
if file is None:
send_dir = True
elif folder is None:
folder = os.path.dirname(file)
file = os.path.basename(file)
if clear_dest_dir:
if to == 'local':
command = subprocess.Popen('rm -f %s/*' % destination,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, stderr = command.communicate()
else:
stdout, stderr = self.execute('rm -f %/*' % destination)
logging.debug('stdout: %s', stdout)
if fr == 'local':
scp = 'scp '
else:
scp = 'scp {user}@{host}:'.format(
user=fr.username, host=fr.host.ip_address)
if not file:
scp += '-r '
if send_dir:
scp += os.path.abspath(folder)
else:
scp += '{path}/{file}'.format(
path=os.path.abspath(folder), file=file)
if to == 'local':
scp += ' '+destination
else:
scp += ' {user}@{host}:{path}'.format(
user=to.username, host=to.host.ip_address,
path=os.path.abspath(destination))
logging.debug('copy command: %s', scp)
cmd = subprocess.Popen(scp,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = cmd.communicate()
logging.debug('stdout: %s', stdout)
logging.debug('stderr: %s', stderr)
if move:
if send_dir:
rmcmd = 'rm -rf {path}'.format(path=os.path.abspath(folder))
else:
rmcmd = 'rm -f {path}/{file}'.format(file=file,
path=os.path.abspath(folder))
logging.debug('wiping source: %s', rmcmd)
stdout = fr.execute(rmcmd)
logging.debug('output: %s', stdout)
#===============================================================================#
class Allocation(models.Model):
"""
Base class for an Allocation on a computing resources.
Attributes:
| host
| job
| key
| name
| project
| state
| users
"""
name = models.CharField(max_length=63, primary_key=True)
key = models.CharField(max_length=100, default='')
host = models.ForeignKey(Host)
users = models.ManyToManyField(User)
state = models.IntegerField(default=1)
class Meta:
app_label = 'qmpy'
db_table = 'allocations'
def __str__(self):
return self.name
@classmethod
def create(self):
name = raw_input('Name your allocation:')
if Allocation.objects.filter(name=name).exists():
print 'Allocation by that name already exists!'
exit(-1)
host = raw_input('Which cluster is this allocation on?')
if not Host.objects.filter(name=host).exists():
print "This host doesn't exist!"
exit(-1)
host = Host.objects.get(name=host)
alloc = Allocation(name=name, host=host)
alloc.save()
print 'Now we will assign users to this allocation'
for acct in Account.objects.filter(host=host):
inc = raw_input('Can %s use this allocation? y/n [y]:' %
acct.user.username )
if inc == 'y' or inc == '':
alloc.users.add(acct.user)
print 'If this allocation requires a special password, enter',
key = raw_input('it now:')
alloc.key=key
alloc.save()
@classmethod
def get(cls, name):
try:
return cls.objects.get(name=name)
except cls.DoesNotExist:
return cls(name=name)
@property
def active(self):
if self.state < 1:
return False
elif not self.host.active:
return False
else:
return True
def get_user(self):
return random.choice(self.users.filter(state=1))
def get_account(self, users=None):
if users is None:
users = self.users.all()
user = random.choice(list(users))
return user.account_set.get(host=self.host)
@property
def percent_utilization(self):
return self.host.percent_utilization
#===============================================================================#
class Project(models.Model):
"""
Base class for a project within qmpy.
Attributes:
| allocations
| entry
| name
| priority
| state
| task
| users
"""
name = models.CharField(max_length=63, primary_key=True)
priority = models.IntegerField(default=0)
users = models.ManyToManyField(User)
allocations = models.ManyToManyField(Allocation)
state = models.IntegerField(default=1)
class Meta:
app_label = 'qmpy'
db_table = 'projects'
def __str__(self):
return self.name
@classmethod
def get(cls, name):
if isinstance(name, cls):
return name
try:
return cls.objects.get(name=name)
except cls.DoesNotExist:
return cls(name=name)
@property
def completed(self):
return self.task_set.filter(state=2)
@property
def running(self):
return self.task_set.filter(state=1)
@property
def waiting(self):
return self.task_set.filter(state=0).order_by('priority')
@property
def failed(self):
return self.task_set.filter(state=-1)
@staticmethod
def create():
'''
Create a new project. Prompts user on std-in
for name, users, and allocations of this project.
'''
name = raw_input('Name your project: ')
if Project.objects.filter(name=name).exists():
print 'Project by that name already exists!'
exit(-1)
proj = Project(name=name)
proj.save()
proj.priority = raw_input('Project priority (1-100): ')
users = raw_input('List project users (e.g. sjk648 jsaal531 bwm291): ')
for u in users.split():
if not User.objects.filter(username=u).exists():
print 'User named', u, 'doesn\'t exist!'
else:
proj.users.add(User.objects.get(username=u))
alloc = raw_input('List project allocations (e.g. byrd victoria b1004): ')
for a in alloc.split():
if not Allocation.objects.filter(name=a).exists():
print 'Allocation named', a, 'doesn\'t exist!'
else:
proj.allocations.add(Allocation.objects.get(name=a))
@property
def active(self):
if self.state < 0:
return False
else:
if self.state != 1:
self.state = 1
self.save()
return True
def get_allocation(self):
available = [ a for a in self.allocations.all() if a.active ]
if available:
return random.choice(available)
else:
return []
# !vih
def write_resources():
current_loc = os.path.dirname(__file__)
######
# headers for various configuration files
######
hosts_header = """# host1:
# binaries:
# bin_name1: /path/to/bin1
# bin_name2: /path/to/bin2
# check_queue: /full/path/to/showq
# hostname: full.host.name
# ip_address: ###.###.##.###
# nodes: # of nodes on machine
# ppn: # of processors per node
# sub_script: /full/path/to/submission/command
# sub_text: filename for qfile to use a template.
# A file named "filename" must be in configuration/qfiles
# walltime: maximum walltime, in seconds
# host2: ...
"""
f_hosts = open(current_loc+'/../configuration/resources/hosts.yml', 'w')
f_hosts.write(hosts_header)
f_hosts.write('\n')
users_header = """# user1:
# hostname1:
# run_path:/where/to/run/on/host1
# username: usernameonhost1
# hostname2:
# run_path:/where/to/run/on/host2
# username: usernameonhost2
# user2:
# hostname1: ...
"""
f_users = open(current_loc+'/../configuration/resources/users.yml', 'w')
f_users.write(users_header)
f_users.write('\n')
allocations_header = """# allocation1:
# host: hostname
# key: key needed for identifying allocation
# users:
# - user1
# - user2
# allocation2: ...
"""
f_allocations = open(current_loc+'/../configuration/resources/allocations.yml', 'w')
f_allocations.write(allocations_header)
f_allocations.write('\n')
projects_header = """# project1:
# allocations:
# - allocation1
# - allocation2
# priority: Base priority for the project. Lower numbers will be done soonest.
# users:
# - user1
# - user2
# project2: ...
"""
f_projects = open(current_loc+'/../configuration/resources/projects.yml', 'w')
f_projects.write(projects_header)
f_projects.write('\n')
######
# list of values that need to be written into the configuration files
######
host_values = ['binaries', 'check_queue', 'hostname', 'ip_address', \
'nodes', 'ppn', 'sub_script', 'sub_text', 'walltime']
user_values = ['run_path', 'username']
allocation_values = ['host', 'key', 'users']
project_values = ['allocations', 'priority', 'users']
######
# a function to 'clean' the values from type unicode/ long/ etc. to string/ int
######
def clean(val):
if isinstance(val, unicode):
val = str(val)
elif isinstance(val, numbers.Number):
val = int(val)
return val
######
# write host configurations into hosts.yml
######
hosts = Host.objects.all()
dict1 = {}
for h in hosts:
dict2 = {}
for hv in host_values:
dict2[hv] = clean(h.__getattribute__(hv))
dict1[clean(h.name)] = dict2
yaml.dump(dict1, f_hosts, default_flow_style=False)
######
# write user configurations into users.yml
######
users = User.objects.all()
dict1 = {}
for u in users:
dict2 = {}
accounts = Account.objects.filter(user=u)
for a in accounts:
dict2[clean(a.host.name)] = {'run_path':clean(a.run_path), \
'username':clean(a.username)}
dict1[clean(u.username)] = dict2
yaml.dump(dict1, f_users, default_flow_style=False)
######
# write allocation configurations into allocations.yml
######
alloc = Allocation.objects.all()
dict1 = {}
for a in alloc:
dict2 = {}
dict2['host'] = clean(a.host.name)
dict2['key'] = clean(a.key)
dict2['users'] = [ clean(u) for u in a.users.all().values_list('username', flat=True) ]
dict1[clean(a.name)] = dict2
yaml.dump(dict1, f_allocations, default_flow_style=False)
######
# write project configurations into projects.yml
######
pro = Project.objects.all()
dict1 = {}
for p in pro:
dict2 = {}
dict2['allocations'] = [ clean(a) for a in p.allocations.all().values_list('name', flat=True) ]
dict2['priority'] = clean(p.priority)
dict2['users'] = [ clean(u) for u in p.users.all().values_list('username', flat=True) ]
dict1[clean(p.name)] = dict2
yaml.dump(dict1, f_projects, default_flow_style=False)
|
randomizer.py
|
import spotipy
import os
import spotipy.util as util
from http.server import HTTPServer, BaseHTTPRequestHandler
from threading import Thread
import requests
os.environ["SPOTIPY_CLIENT_ID"] = ""
os.environ["SPOTIPY_CLIENT_SECRET"] = ""
os.environ["USER"] = ""
SERVER_PORT = 14523
os.environ["SPOTIPY_REDIRECT_URI"] = "http://localhost:{}".format(SERVER_PORT)
scope = 'user-library-read playlist-read-private playlist-read-collaborative playlist-modify-private playlist-modify-public user-follow-read'
class FailedAuth(BaseException):
"""Failed authentication for spotify"""
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
class NotFound(BaseException):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
class MyHTTPHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><body><h1 style="text-align:center">Great! Now go back to the python program and insert the URL of this page:</h1><button onclick="copy()" style="margin: 0 auto;display:block">Copy to clipboard</button><textarea id="textarea" style="display: block; margin: 0 auto; width: 60%"></textarea><script>var txt = document.getElementById("textarea"); txt.value = window.location.href;txt.select();function copy() {txt.select();document.execCommand("copy");}</script></body></html>'.encode('utf-8'))
def log_message(self, format, *args):
return
class StoppableSilentHTTPServer(HTTPServer):
stopped = False
def __init__(self, *args, **kw):
HTTPServer.__init__(self, *args, **kw)
def serve_forever(self):
while not self.stopped:
self.handle_request()
def force_stop(self):
self.stopped = True
# Ensure a last run of the thread so it can exit
requests.get(url='http://localhost:14523')
self.server_close()
class SpotifyAuth:
def __init__(self, username):
self._username = username
self._sp = None
self.httpd = None
def wait_for_auth(self):
self.httpd = StoppableSilentHTTPServer(('', SERVER_PORT), MyHTTPHandler)
Thread(target=self.httpd.serve_forever).start()
token = util.prompt_for_user_token(self._username, scope)
if token:
self._sp = spotipy.Spotify(auth=token)
else:
raise FailedAuth
def get_spotify(self):
return self._sp
def stop_server(self):
self.httpd.force_stop()
def __list_add_tracks__(list_object, tracks):
for item in tracks["items"]:
track = item["track"]
if track["id"] is not None:
list_object.append(track["id"])
return list_object
def __list_add_artist_tracks__(list_object, tracks):
for track in tracks:
if track["id"] is not None:
list_object.append(track["id"])
return list_object
def __add_playlist__(playlist_list, playlists):
for item in playlists["items"]:
playlist_list.append(item)
return playlist_list
def __add_artist__(artist_list, artists):
for item in artists["items"]:
artist_list.append(item)
return artist_list
def __chunk_list__(data, size):
return [data[x:x + size] for x in range(0, len(data), size)]
class SpotifyArtistRandomizer:
""""Randomizes a playlist in spotify"""
def __init__(self, username, sp):
self._username = username
self._sp = sp
self._playlist = None
self._artist = None
self._random_playlist_name = "{} (Randomized)"
def set_playlist_by_name(self, name):
self._playlist = self.__find_playlist__(name)
if self._playlist is None:
raise NotFound("No playlist found")
def __find_playlist__(self, name):
playlists = self.get_all_playlists()
for item in playlists:
if item["name"] == name:
return item
return None
def get_playlist_tracks(self, playlist=None):
if playlist is None:
playlist = self._playlist
track_list = []
result = self._sp.user_playlist(self._username, playlist["id"], fields="tracks,next")
tracks = result["tracks"]
track_list = __list_add_tracks__(track_list, tracks)
while tracks["next"]:
tracks = self._sp.next(tracks)
track_list = __list_add_tracks__(track_list, tracks)
return track_list
def get_artist_tracks(self, artist):
track_list = []
result = self._sp.artist_top_tracks(artist["uri"], country='US')
tracks = result["tracks"]
track_list = __list_add_artist_tracks__(track_list, tracks)
return track_list
def __remove_all_tracks__(self, playlist=None):
if playlist is None and self._playlist is not None:
playlist = self._playlist
elif self._playlist is None:
return
tracks = self.get_playlist_tracks(playlist)
for chunk in __chunk_list__(tracks, 20):
self._sp.user_playlist_remove_all_occurrences_of_tracks(self._username, playlist["id"], chunk)
def __create_artist_playlist__(self):
name = "Top 10 Tracks of followed Artists"
self._playlist = self.__find_playlist__(name)
if self._playlist is None:
self._playlist = self._sp.user_playlist_create(self._username,
name,
False)
return
def get_playlist_size(self, playlist=None):
if playlist is not None:
return playlist["tracks"]["total"]
elif self._playlist is not None:
return self._playlist["tracks"]["total"]
def add_tracks_to_playlist(self, tracks, playlist=None):
if playlist is None and self._playlist is not None:
playlist = self._playlist
elif self._playlist is None:
return
for chunk in __chunk_list__(tracks, 20):
self._sp.user_playlist_add_tracks(self._username, playlist["id"], chunk)
def top10_artist_tracks_playlist(self):
self.__create_artist_playlist__()
if self.get_playlist_size() > 1:
self.__remove_all_tracks__()
track_list = []
artists = self.get_all_artists()
for artist in artists:
track_list += self.get_artist_tracks(artist)
self.add_tracks_to_playlist(track_list)
def get_all_playlists(self):
playlist_list = []
playlists = self._sp.user_playlists(self._username)
__add_playlist__(playlist_list, playlists)
while playlists["next"]:
playlists = self._sp.next(playlists)
__add_playlist__(playlist_list, playlists)
return playlist_list
def get_all_artists(self):
artist_list = []
artists = self._sp.current_user_followed_artists()["artists"]
__add_artist__(artist_list, artists)
while artists["next"]:
artists = self._sp.next(artists)["artists"]
__add_artist__(artist_list, artists)
return artist_list
|
test_common.py
|
from __future__ import annotations
import socket
from typing import TYPE_CHECKING
from unittest.mock import Mock, patch
import pytest
from amqp import RecoverableConnectionError
from kombu import common
from kombu.common import (PREFETCH_COUNT_MAX, Broadcast, QoS, collect_replies,
declaration_cached, generate_oid, ignore_errors,
maybe_declare, send_reply)
from t.mocks import ContextMock, MockPool
if TYPE_CHECKING:
from types import TracebackType
def test_generate_oid():
from uuid import NAMESPACE_OID
instance = Mock()
args = (1, 1001, 2001, id(instance))
ent = '%x-%x-%x-%x' % args
with patch('kombu.common.uuid3') as mock_uuid3, \
patch('kombu.common.uuid5') as mock_uuid5:
mock_uuid3.side_effect = ValueError
mock_uuid3.return_value = 'uuid3-6ba7b812-9dad-11d1-80b4'
mock_uuid5.return_value = 'uuid5-6ba7b812-9dad-11d1-80b4'
oid = generate_oid(1, 1001, 2001, instance)
mock_uuid5.assert_called_once_with(NAMESPACE_OID, ent)
assert oid == 'uuid5-6ba7b812-9dad-11d1-80b4'
def test_ignore_errors():
connection = Mock()
connection.channel_errors = (KeyError,)
connection.connection_errors = (KeyError,)
with ignore_errors(connection):
raise KeyError()
def raising():
raise KeyError()
ignore_errors(connection, raising)
connection.channel_errors = connection.connection_errors = ()
with pytest.raises(KeyError):
with ignore_errors(connection):
raise KeyError()
class test_declaration_cached:
def test_when_cached(self):
chan = Mock()
chan.connection.client.declared_entities = ['foo']
assert declaration_cached('foo', chan)
def test_when_not_cached(self):
chan = Mock()
chan.connection.client.declared_entities = ['bar']
assert not declaration_cached('foo', chan)
class test_Broadcast:
def test_arguments(self):
with patch('kombu.common.uuid',
return_value='test') as uuid_mock:
q = Broadcast(name='test_Broadcast')
uuid_mock.assert_called_with()
assert q.name == 'bcast.test'
assert q.alias == 'test_Broadcast'
assert q.auto_delete
assert q.exchange.name == 'test_Broadcast'
assert q.exchange.type == 'fanout'
q = Broadcast('test_Broadcast', 'explicit_queue_name')
assert q.name == 'explicit_queue_name'
assert q.exchange.name == 'test_Broadcast'
q2 = q(Mock())
assert q2.name == q.name
with patch('kombu.common.uuid',
return_value='test') as uuid_mock:
q = Broadcast('test_Broadcast',
'explicit_queue_name',
unique=True)
uuid_mock.assert_called_with()
assert q.name == 'explicit_queue_name.test'
q2 = q(Mock())
assert q2.name.split('.')[0] == q.name.split('.')[0]
class test_maybe_declare:
def _get_mock_channel(self):
# Given: A mock Channel with mock'd connection/client/entities
channel = Mock()
channel.connection.client.declared_entities = set()
return channel
def _get_mock_entity(self, is_bound=False, can_cache_declaration=True):
# Given: Unbound mock Entity (will bind to channel when bind called
entity = Mock()
entity.can_cache_declaration = can_cache_declaration
entity.is_bound = is_bound
def _bind_entity(channel):
entity.channel = channel
entity.is_bound = True
return entity
entity.bind = _bind_entity
return entity
def test_cacheable(self):
# Given: A mock Channel and mock entity
channel = self._get_mock_channel()
# Given: A mock Entity that is already bound
entity = self._get_mock_entity(
is_bound=True, can_cache_declaration=True)
entity.channel = channel
entity.auto_delete = False
assert entity.is_bound, "Expected entity is bound to begin this test."
# When: Calling maybe_declare default
maybe_declare(entity, channel)
# Then: It called declare on the entity queue and added it to list
assert entity.declare.call_count == 1
assert hash(entity) in channel.connection.client.declared_entities
# When: Calling maybe_declare default (again)
maybe_declare(entity, channel)
# Then: we did not call declare again because its already in our list
assert entity.declare.call_count == 1
# When: Entity channel connection has gone away
entity.channel.connection = None
# Then: maybe_declare must raise a RecoverableConnectionError
with pytest.raises(RecoverableConnectionError):
maybe_declare(entity)
def test_binds_entities(self):
# Given: A mock Channel and mock entity
channel = self._get_mock_channel()
# Given: A mock Entity that is not bound
entity = self._get_mock_entity()
assert not entity.is_bound, "Expected entity unbound to begin test."
# When: calling maybe_declare with default of no retry policy
maybe_declare(entity, channel)
# Then: the entity is now bound because it called to bind it
assert entity.is_bound is True, "Expected entity is now marked bound."
def test_binds_entities_when_retry_policy(self):
# Given: A mock Channel and mock entity
channel = self._get_mock_channel()
# Given: A mock Entity that is not bound
entity = self._get_mock_entity()
assert not entity.is_bound, "Expected entity unbound to begin test."
# Given: A retry policy
sample_retry_policy = {
'interval_start': 0,
'interval_max': 1,
'max_retries': 3,
'interval_step': 0.2,
'errback': lambda x: "Called test errback retry policy",
}
# When: calling maybe_declare with retry enabled
maybe_declare(entity, channel, retry=True, **sample_retry_policy)
# Then: the entity is now bound because it called to bind it
assert entity.is_bound is True, "Expected entity is now marked bound."
def test_with_retry(self):
# Given: A mock Channel and mock entity
channel = self._get_mock_channel()
# Given: A mock Entity that is already bound
entity = self._get_mock_entity(
is_bound=True, can_cache_declaration=True)
entity.channel = channel
assert entity.is_bound, "Expected entity is bound to begin this test."
# When calling maybe_declare with retry enabled (default policy)
maybe_declare(entity, channel, retry=True)
# Then: the connection client used ensure to ensure the retry policy
assert channel.connection.client.ensure.call_count
def test_with_retry_dropped_connection(self):
# Given: A mock Channel and mock entity
channel = self._get_mock_channel()
# Given: A mock Entity that is already bound
entity = self._get_mock_entity(
is_bound=True, can_cache_declaration=True)
entity.channel = channel
assert entity.is_bound, "Expected entity is bound to begin this test."
# When: Entity channel connection has gone away
entity.channel.connection = None
# When: calling maybe_declare with retry
# Then: the RecoverableConnectionError should be raised
with pytest.raises(RecoverableConnectionError):
maybe_declare(entity, channel, retry=True)
class test_replies:
def test_send_reply(self):
req = Mock()
req.content_type = 'application/json'
req.content_encoding = 'binary'
req.properties = {'reply_to': 'hello',
'correlation_id': 'world'}
channel = Mock()
exchange = Mock()
exchange.is_bound = True
exchange.channel = channel
producer = Mock()
producer.channel = channel
producer.channel.connection.client.declared_entities = set()
send_reply(exchange, req, {'hello': 'world'}, producer)
assert producer.publish.call_count
args = producer.publish.call_args
assert args[0][0] == {'hello': 'world'}
assert args[1] == {
'exchange': exchange,
'routing_key': 'hello',
'correlation_id': 'world',
'serializer': 'json',
'retry': False,
'retry_policy': None,
'content_encoding': 'binary',
}
@patch('kombu.common.itermessages')
def test_collect_replies_with_ack(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
body, message = Mock(), Mock()
itermessages.return_value = [(body, message)]
it = collect_replies(conn, channel, queue, no_ack=False)
m = next(it)
assert m is body
itermessages.assert_called_with(conn, channel, queue, no_ack=False)
message.ack.assert_called_with()
with pytest.raises(StopIteration):
next(it)
channel.after_reply_message_received.assert_called_with(queue.name)
@patch('kombu.common.itermessages')
def test_collect_replies_no_ack(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
body, message = Mock(), Mock()
itermessages.return_value = [(body, message)]
it = collect_replies(conn, channel, queue)
m = next(it)
assert m is body
itermessages.assert_called_with(conn, channel, queue, no_ack=True)
message.ack.assert_not_called()
@patch('kombu.common.itermessages')
def test_collect_replies_no_replies(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
itermessages.return_value = []
it = collect_replies(conn, channel, queue)
with pytest.raises(StopIteration):
next(it)
channel.after_reply_message_received.assert_not_called()
class test_insured:
@patch('kombu.common.logger')
def test_ensure_errback(self, logger):
common._ensure_errback('foo', 30)
logger.error.assert_called()
def test_revive_connection(self):
on_revive = Mock()
channel = Mock()
common.revive_connection(Mock(), channel, on_revive)
on_revive.assert_called_with(channel)
common.revive_connection(Mock(), channel, None)
def get_insured_mocks(self, insured_returns=('works', 'ignored')):
conn = ContextMock()
pool = MockPool(conn)
fun = Mock()
insured = conn.autoretry.return_value = Mock()
insured.return_value = insured_returns
return conn, pool, fun, insured
def test_insured(self):
conn, pool, fun, insured = self.get_insured_mocks()
ret = common.insured(pool, fun, (2, 2), {'foo': 'bar'})
assert ret == 'works'
conn.ensure_connection.assert_called_with(
errback=common._ensure_errback,
)
insured.assert_called()
i_args, i_kwargs = insured.call_args
assert i_args == (2, 2)
assert i_kwargs == {'foo': 'bar', 'connection': conn}
conn.autoretry.assert_called()
ar_args, ar_kwargs = conn.autoretry.call_args
assert ar_args == (fun, conn.default_channel)
assert ar_kwargs.get('on_revive')
assert ar_kwargs.get('errback')
def test_insured_custom_errback(self):
conn, pool, fun, insured = self.get_insured_mocks()
custom_errback = Mock()
common.insured(pool, fun, (2, 2), {'foo': 'bar'},
errback=custom_errback)
conn.ensure_connection.assert_called_with(errback=custom_errback)
class MockConsumer:
consumers = set()
def __init__(self, channel, queues=None, callbacks=None, **kwargs):
self.channel = channel
self.queues = queues
self.callbacks = callbacks
def __enter__(self):
self.consumers.add(self)
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None
) -> None:
self.consumers.discard(self)
class test_itermessages:
class MockConnection:
should_raise_timeout = False
def drain_events(self, **kwargs):
if self.should_raise_timeout:
raise socket.timeout()
for consumer in MockConsumer.consumers:
for callback in consumer.callbacks:
callback('body', 'message')
def test_default(self):
conn = self.MockConnection()
channel = Mock()
channel.connection.client = conn
conn.Consumer = MockConsumer
it = common.itermessages(conn, channel, 'q', limit=1)
ret = next(it)
assert ret == ('body', 'message')
with pytest.raises(StopIteration):
next(it)
def test_when_raises_socket_timeout(self):
conn = self.MockConnection()
conn.should_raise_timeout = True
channel = Mock()
channel.connection.client = conn
conn.Consumer = MockConsumer
it = common.itermessages(conn, channel, 'q', limit=1)
with pytest.raises(StopIteration):
next(it)
@patch('kombu.common.deque')
def test_when_raises_IndexError(self, deque):
deque_instance = deque.return_value = Mock()
deque_instance.popleft.side_effect = IndexError()
conn = self.MockConnection()
channel = Mock()
conn.Consumer = MockConsumer
it = common.itermessages(conn, channel, 'q', limit=1)
with pytest.raises(StopIteration):
next(it)
class test_QoS:
class _QoS(QoS):
def __init__(self, value):
self.value = value
super().__init__(None, value)
def set(self, value):
return value
def test_qos_exceeds_16bit(self):
with patch('kombu.common.logger') as logger:
callback = Mock()
qos = QoS(callback, 10)
qos.prev = 100
# cannot use 2 ** 32 because of a bug on macOS Py2.5:
# https://jira.mongodb.org/browse/PYTHON-389
qos.set(4294967296)
logger.warning.assert_called()
callback.assert_called_with(prefetch_count=0)
def test_qos_increment_decrement(self):
qos = self._QoS(10)
assert qos.increment_eventually() == 11
assert qos.increment_eventually(3) == 14
assert qos.increment_eventually(-30) == 14
assert qos.decrement_eventually(7) == 7
assert qos.decrement_eventually() == 6
def test_qos_disabled_increment_decrement(self):
qos = self._QoS(0)
assert qos.increment_eventually() == 0
assert qos.increment_eventually(3) == 0
assert qos.increment_eventually(-30) == 0
assert qos.decrement_eventually(7) == 0
assert qos.decrement_eventually() == 0
assert qos.decrement_eventually(10) == 0
def test_qos_thread_safe(self):
qos = self._QoS(10)
def add():
for i in range(1000):
qos.increment_eventually()
def sub():
for i in range(1000):
qos.decrement_eventually()
def threaded(funs):
from threading import Thread
threads = [Thread(target=fun) for fun in funs]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
threaded([add, add])
assert qos.value == 2010
qos.value = 1000
threaded([add, sub]) # n = 2
assert qos.value == 1000
def test_exceeds_short(self):
qos = QoS(Mock(), PREFETCH_COUNT_MAX - 1)
qos.update()
assert qos.value == PREFETCH_COUNT_MAX - 1
qos.increment_eventually()
assert qos.value == PREFETCH_COUNT_MAX
qos.increment_eventually()
assert qos.value == PREFETCH_COUNT_MAX + 1
qos.decrement_eventually()
assert qos.value == PREFETCH_COUNT_MAX
qos.decrement_eventually()
assert qos.value == PREFETCH_COUNT_MAX - 1
def test_consumer_increment_decrement(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.update()
assert qos.value == 10
mconsumer.qos.assert_called_with(prefetch_count=10)
qos.decrement_eventually()
qos.update()
assert qos.value == 9
mconsumer.qos.assert_called_with(prefetch_count=9)
qos.decrement_eventually()
assert qos.value == 8
mconsumer.qos.assert_called_with(prefetch_count=9)
assert {'prefetch_count': 9} in mconsumer.qos.call_args
# Does not decrement 0 value
qos.value = 0
qos.decrement_eventually()
assert qos.value == 0
qos.increment_eventually()
assert qos.value == 0
def test_consumer_decrement_eventually(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.decrement_eventually()
assert qos.value == 9
qos.value = 0
qos.decrement_eventually()
assert qos.value == 0
def test_set(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.set(12)
assert qos.prev == 12
qos.set(qos.prev)
|
server.py
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import threading
import time
from collections import defaultdict
from functools import partial
from socketserver import ThreadingMixIn
from xmlrpc.client import ServerProxy
from xmlrpc.server import SimpleXMLRPCServer
from ..core._imperative_rt.utils import create_mm_server
from ..utils.future import Future
class Methods:
"""
Distributed Server Method.
Used for exchange information between distributed nodes.
:param mm_server_port: multiple machine rpc server port.
"""
def __init__(self, mm_server_port):
self.lock = threading.Lock()
self.mm_server_port = mm_server_port
self.dict_is_grad = defaultdict(partial(Future, True))
self.dict_remote_tracer = defaultdict(partial(Future, True))
self.dict_pack_list = defaultdict(partial(Future, False))
self.dict_barrier_counter = defaultdict(int)
self.dict_barrier_event = defaultdict(threading.Event)
self.user_dict = defaultdict(partial(Future, False))
self.bcast_dict = {}
def connect(self):
"""Method for checking connection success."""
return True
def get_mm_server_port(self):
"""Get multiple machine rpc server port."""
return self.mm_server_port
def set_is_grad(self, key, is_grad):
"""
Mark send/recv need gradiants by key.
:param key: key to match send/recv op.
:param is_grad: whether this op need grad.
"""
with self.lock:
future = self.dict_is_grad[key]
future.set(is_grad)
return True
def check_is_grad(self, key):
"""
Check whether send/recv need gradiants.
:param key: key to match send/recv op.
"""
with self.lock:
future = self.dict_is_grad[key]
ret = future.get()
with self.lock:
del self.dict_is_grad[key]
return ret
def set_remote_tracer(self, key, tracer_set):
"""
Set tracer dict for tracing send/recv op.
:param key: key to match send/recv op.
:param tracer_set: valid tracer set.
"""
with self.lock:
future = self.dict_remote_tracer[key]
future.set(tracer_set)
return True
def check_remote_tracer(self, key):
"""
Get tracer dict for send/recv op.
:param key: key to match send/recv op.
"""
with self.lock:
future = self.dict_remote_tracer[key]
ret = future.get()
with self.lock:
del self.dict_remote_tracer[key]
return ret
def group_barrier(self, key, size):
"""
A barrier wait for all group member.
:param key: group key to match each other.
:param size: group size.
"""
with self.lock:
self.dict_barrier_counter[key] += 1
counter = self.dict_barrier_counter[key]
event = self.dict_barrier_event[key]
if counter == size:
del self.dict_barrier_counter[key]
del self.dict_barrier_event[key]
event.set()
else:
event.wait()
return True
def user_set(self, key, val):
"""Set user defined key-value pairs across processes."""
with self.lock:
future = self.user_dict[key]
future.set(val)
return True
def user_get(self, key):
"""Get user defined key-value pairs across processes."""
with self.lock:
future = self.user_dict[key]
return future.get()
def bcast_val(self, val, key, size):
with self.lock:
if key not in self.bcast_dict:
self.bcast_dict[key] = [Future(False), size]
arr = self.bcast_dict[key]
if val is not None:
arr[0].set(val)
val = None
else:
val = arr[0].get()
with self.lock:
cnt = arr[1] - 1
arr[1] = cnt
if cnt == 0:
del self.bcast_dict[key]
return val
def _del(self, key):
with self.lock:
del self.user_dict[key]
# thread safe function
def user_pop(self, key):
ret = self.user_get(key)
self._del(key)
return ret
class ThreadXMLRPCServer(ThreadingMixIn, SimpleXMLRPCServer):
pass
def _start_server(py_server_port, queue):
"""
Start python distributed server and multiple machine server.
:param py_server_port: python server port.
:param mm_server_port: multiple machine server port.
:param queue: server port will put in this queue, puts exception when process fails.
"""
try:
mm_server_port = create_mm_server("0.0.0.0", 0)
server = ThreadXMLRPCServer(
("0.0.0.0", py_server_port), logRequests=False, allow_none=True
)
server.register_instance(Methods(mm_server_port))
_, py_server_port = server.server_address
queue.put((py_server_port, mm_server_port))
server.serve_forever()
except Exception as e:
queue.put(e)
class Server:
"""
Distributed Server for distributed training.
Should be running at master node.
:param port: python server port.
"""
def __init__(self, port=0):
q = mp.Queue()
self.proc = mp.Process(target=_start_server, args=(port, q), daemon=True)
self.proc.start()
ret = q.get()
if isinstance(ret, Exception):
raise ret
else:
self.py_server_port, self.mm_server_port = ret
def __del__(self):
self.proc.terminate()
class Client:
"""
Distributed Client for distributed training.
:param master_ip: ip address of master node.
:param port: port of server at master node.
"""
def __init__(self, master_ip, port):
self.master_ip = master_ip
self.port = port
self.connect()
self.bcast_dict = defaultdict(lambda: 0)
def connect(self):
"""Check connection success."""
while True:
try:
self.proxy = ServerProxy(
"http://{}:{}".format(self.master_ip, self.port), allow_none=True
)
if self.proxy.connect():
break
except:
time.sleep(1)
def get_mm_server_port(self):
"""Get multiple machine server port."""
return self.proxy.get_mm_server_port()
def set_is_grad(self, key, is_grad):
"""
Mark send/recv need gradiants by key.
:param key: key to match send/recv op.
:param is_grad: whether this op need grad.
"""
self.proxy.set_is_grad(key, is_grad)
def check_is_grad(self, key):
"""
Check whether send/recv need gradiants.
:param key: key to match send/recv op.
"""
return self.proxy.check_is_grad(key)
def set_remote_tracer(self, key, tracer_set):
"""
Set tracer dict for tracing send/recv op.
:param key: key to match send/recv op.
:param tracer_set: valid tracer set.
"""
self.proxy.set_remote_tracer(key, tracer_set)
def check_remote_tracer(self, key):
"""
Get tracer dict for send/recv op.
:param key: key to match send/recv op.
"""
return self.proxy.check_remote_tracer(key)
def group_barrier(self, key, size):
"""
A barrier wait for all group member.
:param key: group key to match each other.
:param size: group size.
"""
self.proxy.group_barrier(key, size)
def user_set(self, key, val):
"""Set user defined key-value pairs across processes."""
return self.proxy.user_set(key, val)
def user_get(self, key):
"""Get user defined key-value pairs across processes."""
return self.proxy.user_get(key)
def user_pop(self, key):
"""Get user defined key-value pairs and delete the resources when the get is done"""
return self.proxy.user_pop(key)
def bcast_val(self, val, key, size):
idx = self.bcast_dict[key] + 1
self.bcast_dict[key] = idx
key = key + "_bcast_" + str(idx)
return self.proxy.bcast_val(val, key, size)
def main(port=0, verbose=True):
mm_server_port = create_mm_server("0.0.0.0", 0)
server = ThreadXMLRPCServer(("0.0.0.0", port), logRequests=verbose)
server.register_instance(Methods(mm_server_port))
_, port = server.server_address
print("serving on port", port)
server.serve_forever()
if __name__ == "__main__":
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--port", type=int, default=0)
ap.add_argument("-v", "--verbose", type=bool, default=True)
args = ap.parse_args()
main(port=args.port, verbose=args.verbose)
|
build.py
| "## @file\r\n# build a platform or a module\r\n#\r\n# Copyright (c) 2014, Hewlett-Packard Developme(...TRUNCATED) |
__init__.py
| "from .app.webserver.app import app\nfrom .app.tgbot.server import run\nimport os\nimport time\nimpo(...TRUNCATED) |
End of preview. Expand
in Data Studio
- Downloads last month
- 17