code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
from multiprocessing import Pool
import os, time, random
def long_time_task(name):
print 'Run task %s (%s)...' % (name, os.getpid())
start = time.time()
time.sleep(random.random() * 3)
end = time.time()
print 'Task %s runs %0.2f seconds.' % (name, (end - start))
if __name__ == '__main__':
print 'Parent process %s.' % os.getpid()
p = Pool()
for i in range(5):
p.apply_async(long_time_task, args=(i,))
print 'Waiting for all subprocesses done...'
p.close()
p.join()
print 'All subprocesses done.'
"""
ไปฃ็ ่งฃ่ฏป๏ผ
ๅฏนPoolๅฏน่ฑก่ฐ็จjoin()ๆนๆณไผ็ญๅพ
ๆๆๅญ่ฟ็จๆง่กๅฎๆฏ๏ผ่ฐ็จjoin()ไนๅๅฟ
้กปๅ
่ฐ็จclose()๏ผ่ฐ็จclose()ไนๅๅฐฑไธ่ฝ็ปง็ปญๆทปๅ ๆฐ็Processไบใ
่ฏทๆณจๆ่พๅบ็็ปๆ๏ผtask 0๏ผ1๏ผ2๏ผ3ๆฏ็ซๅปๆง่ก็๏ผ่task 4่ฆ็ญๅพ
ๅ้ขๆไธชtaskๅฎๆๅๆๆง่ก๏ผ่ฟๆฏๅ ไธบPool็้ป่ฎคๅคงๅฐๅจๆ็็ต่ไธๆฏ4๏ผๅ ๆญค๏ผๆๅคๅๆถๆง่ก4ไธช่ฟ็จใ่ฟๆฏPoolๆๆ่ฎพ่ฎก็้ๅถ๏ผๅนถไธๆฏๆไฝ็ณป็ป็้ๅถใๅฆๆๆนๆ๏ผ
p = Pool(5)
""" | Jayin/practice_on_py | Process&Thread/PoolTest.py | Python | mit | 1,094 |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
from os import path, remove, sys, urandom
import platform
import uuid
from azure.storage.blob import (
BlobServiceClient,
ContainerClient,
BlobClient,
ContentSettings
)
if sys.version_info >= (3,):
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
from settings.testcase import BlobPreparer
from devtools_testutils.storage import StorageTestCase
# ------------------------------------------------------------------------------
TEST_BLOB_PREFIX = 'largeblob'
LARGE_BLOB_SIZE = 12 * 1024 * 1024
LARGE_BLOCK_SIZE = 6 * 1024 * 1024
# ------------------------------------------------------------------------------
if platform.python_implementation() == 'PyPy':
pytest.skip("Skip tests for Pypy", allow_module_level=True)
class StorageLargeBlockBlobTest(StorageTestCase):
def _setup(self, storage_account_name, key):
# test chunking functionality by reducing the threshold
# for chunking and the size of each chunk, otherwise
# the tests would take too long to execute
self.bsc = BlobServiceClient(
self.account_url(storage_account_name, "blob"),
credential=key,
max_single_put_size=32 * 1024,
max_block_size=2 * 1024 * 1024,
min_large_block_upload_threshold=1 * 1024 * 1024)
self.config = self.bsc._config
self.container_name = self.get_resource_name('utcontainer')
if self.is_live:
try:
self.bsc.create_container(self.container_name)
except:
pass
def _teardown(self, file_name):
if path.isfile(file_name):
try:
remove(file_name)
except:
pass
# --Helpers-----------------------------------------------------------------
def _get_blob_reference(self):
return self.get_resource_name(TEST_BLOB_PREFIX)
def _create_blob(self):
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
blob.upload_blob(b'')
return blob
def assertBlobEqual(self, container_name, blob_name, expected_data):
blob = self.bsc.get_blob_client(container_name, blob_name)
actual_data = blob.download_blob()
self.assertEqual(b"".join(list(actual_data.chunks())), expected_data)
# --Test cases for block blobs --------------------------------------------
@pytest.mark.live_test_only
@BlobPreparer()
def test_put_block_bytes_large(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
blob = self._create_blob()
# Act
for i in range(5):
resp = blob.stage_block(
'block {0}'.format(i).encode('utf-8'), urandom(LARGE_BLOCK_SIZE))
self.assertIsNotNone(resp)
assert 'content_md5' in resp
assert 'content_crc64' in resp
assert 'request_id' in resp
# Assert
@pytest.mark.live_test_only
@BlobPreparer()
def test_put_block_bytes_large_with_md5(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
blob = self._create_blob()
# Act
for i in range(5):
resp = blob.stage_block(
'block {0}'.format(i).encode('utf-8'),
urandom(LARGE_BLOCK_SIZE),
validate_content=True)
self.assertIsNotNone(resp)
assert 'content_md5' in resp
assert 'content_crc64' in resp
assert 'request_id' in resp
@pytest.mark.live_test_only
@BlobPreparer()
def test_put_block_stream_large(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
blob = self._create_blob()
# Act
for i in range(5):
stream = BytesIO(bytearray(LARGE_BLOCK_SIZE))
resp = resp = blob.stage_block(
'block {0}'.format(i).encode('utf-8'),
stream,
length=LARGE_BLOCK_SIZE)
self.assertIsNotNone(resp)
assert 'content_md5' in resp
assert 'content_crc64' in resp
assert 'request_id' in resp
# Assert
@pytest.mark.live_test_only
@BlobPreparer()
def test_put_block_stream_large_with_md5(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
blob = self._create_blob()
# Act
for i in range(5):
stream = BytesIO(bytearray(LARGE_BLOCK_SIZE))
resp = resp = blob.stage_block(
'block {0}'.format(i).encode('utf-8'),
stream,
length=LARGE_BLOCK_SIZE,
validate_content=True)
self.assertIsNotNone(resp)
assert 'content_md5' in resp
assert 'content_crc64' in resp
assert 'request_id' in resp
# Assert
@pytest.mark.live_test_only
@BlobPreparer()
def test_create_large_blob_from_path(self, storage_account_name, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account_name, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = 'large_blob_from_path.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, max_concurrency=2, overwrite=True)
block_list = blob.get_block_list()
# Assert
self.assertIsNot(len(block_list), 0)
self.assertBlobEqual(self.container_name, blob_name, data)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@BlobPreparer()
def test_create_large_blob_from_path_with_md5(self, storage_account_name, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account_name, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = "blob_from_path_with_md5.temp.dat"
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, validate_content=True, max_concurrency=2)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@BlobPreparer()
def test_create_large_blob_from_path_non_parallel(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(self.get_random_bytes(100))
FILE_PATH = "blob_from_path_non_parallel.temp.dat"
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, max_concurrency=1)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@BlobPreparer()
def test_create_large_blob_from_path_with_progress(self, storage_account_name, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account_name, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = "blob_from_path_with_progress.temp.dat"
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
progress = []
def callback(response):
current = response.context['upload_stream_current']
total = response.context['data_stream_total']
if current is not None:
progress.append((current, total))
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, max_concurrency=2, raw_response_hook=callback)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
self.assert_upload_progress(len(data), self.config.max_block_size, progress)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@BlobPreparer()
def test_create_large_blob_from_path_with_properties(self, storage_account_name, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account_name, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = 'blob_from_path_with_properties.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
content_settings = ContentSettings(
content_type='image/png',
content_language='spanish')
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, content_settings=content_settings, max_concurrency=2)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
properties = blob.get_blob_properties()
self.assertEqual(properties.content_settings.content_type, content_settings.content_type)
self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@BlobPreparer()
def test_create_large_blob_from_stream_chunked_upload(self, storage_account_name, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account_name, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = 'blob_from_stream_chunked_upload.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, max_concurrency=2)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@BlobPreparer()
def test_creat_lrgblob_frm_stream_w_progress_chnkd_upload(self, storage_account_name, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account_name, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = 'stream_w_progress_chnkd_upload.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
progress = []
def callback(response):
current = response.context['upload_stream_current']
total = response.context['data_stream_total']
if current is not None:
progress.append((current, total))
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, max_concurrency=2, raw_response_hook=callback)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
self.assert_upload_progress(len(data), self.config.max_block_size, progress)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@BlobPreparer()
def test_create_large_blob_from_stream_chunked_upload_with_count(self, storage_account_name, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account_name, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = 'chunked_upload_with_count.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
blob_size = len(data) - 301
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, length=blob_size, max_concurrency=2)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data[:blob_size])
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@BlobPreparer()
def test_creat_lrgblob_frm_strm_chnkd_uplod_w_count_n_props(self, storage_account_name, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account_name, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = 'plod_w_count_n_props.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
content_settings = ContentSettings(
content_type='image/png',
content_language='spanish')
blob_size = len(data) - 301
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(
stream, length=blob_size, content_settings=content_settings, max_concurrency=2)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data[:blob_size])
properties = blob.get_blob_properties()
self.assertEqual(properties.content_settings.content_type, content_settings.content_type)
self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@BlobPreparer()
def test_creat_lrg_blob_frm_stream_chnked_upload_w_props(self, storage_account_name, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account_name, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = 'creat_lrg_blob.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
content_settings = ContentSettings(
content_type='image/png',
content_language='spanish')
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, content_settings=content_settings, max_concurrency=2)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
properties = blob.get_blob_properties()
self.assertEqual(properties.content_settings.content_type, content_settings.content_type)
self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
self._teardown(FILE_PATH)
# ------------------------------------------------------------------------------ | Azure/azure-sdk-for-python | sdk/storage/azure-storage-blob/tests/test_large_block_blob.py | Python | mit | 16,306 |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import os
import re
import subprocess
import sys
from datetime import date
import click
import yaml
from indico.util.console import cformat
# Dictionary listing the files for which to change the header.
# The key is the extension of the file (without the dot) and the value is another
# dictionary containing two keys:
# - 'regex' : A regular expression matching comments in the given file type
# - 'format': A dictionary with the comment characters to add to the header.
# There must be a `comment_start` inserted before the header,
# `comment_middle` inserted at the beginning of each line except the
# first and last one, and `comment_end` inserted at the end of the
# header. (See the `HEADER` above)
SUPPORTED_FILES = {
'py': {
'regex': re.compile(r'((^#|[\r\n]#).*)*'),
'format': {'comment_start': '#', 'comment_middle': '#', 'comment_end': ''}},
'wsgi': {
'regex': re.compile(r'((^#|[\r\n]#).*)*'),
'format': {'comment_start': '#', 'comment_middle': '#', 'comment_end': ''}},
'js': {
'regex': re.compile(r'/\*(.|[\r\n])*?\*/|((^//|[\r\n]//).*)*'),
'format': {'comment_start': '//', 'comment_middle': '//', 'comment_end': ''}},
'jsx': {
'regex': re.compile(r'/\*(.|[\r\n])*?\*/|((^//|[\r\n]//).*)*'),
'format': {'comment_start': '//', 'comment_middle': '//', 'comment_end': ''}},
'css': {
'regex': re.compile(r'/\*(.|[\r\n])*?\*/'),
'format': {'comment_start': '/*', 'comment_middle': ' *', 'comment_end': ' */'}},
'scss': {
'regex': re.compile(r'/\*(.|[\r\n])*?\*/|((^//|[\r\n]//).*)*'),
'format': {'comment_start': '//', 'comment_middle': '//', 'comment_end': ''}},
}
# The substring which must be part of a comment block in order for the comment to be updated by the header.
SUBSTRING = 'This file is part of'
USAGE = '''
Updates all the headers in the supported files ({supported_files}).
By default, all the files tracked by git in the current repository are updated
to the current year.
You can specify a year to update to as well as a file or directory.
This will update all the supported files in the scope including those not tracked
by git. If the directory does not contain any supported files (or if the file
specified is not supported) nothing will be updated.
'''.format(supported_files=', '.join(SUPPORTED_FILES)).strip()
def _walk_to_root(path):
"""Yield directories starting from the given directory up to the root."""
# Based on code from python-dotenv (BSD-licensed):
# https://github.com/theskumar/python-dotenv/blob/e13d957b/src/dotenv/main.py#L245
if os.path.isfile(path):
path = os.path.dirname(path)
last_dir = None
current_dir = os.path.abspath(path)
while last_dir != current_dir:
yield current_dir
parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))
last_dir, current_dir = current_dir, parent_dir
def _get_config(path, end_year):
config = {}
for dirname in _walk_to_root(path):
check_path = os.path.join(dirname, 'headers.yml')
if os.path.isfile(check_path):
with open(check_path) as f:
config.update((k, v) for k, v in yaml.safe_load(f.read()).items() if k not in config)
if config.pop('root', False):
break
if 'start_year' not in config:
click.echo('no valid headers.yml files found: start_year missing')
sys.exit(1)
if 'name' not in config:
click.echo('no valid headers.yml files found: name missing')
sys.exit(1)
if 'header' not in config:
click.echo('no valid headers.yml files found: header missing')
sys.exit(1)
config['end_year'] = end_year
return config
def gen_header(data):
if data['start_year'] == data['end_year']:
data['dates'] = data['start_year']
else:
data['dates'] = '{} - {}'.format(data['start_year'], data['end_year'])
return '\n'.join(line.rstrip() for line in data['header'].format(**data).strip().splitlines())
def _update_header(file_path, config, substring, regex, data, ci):
found = False
with open(file_path) as file_read:
content = orig_content = file_read.read()
if not content.strip():
return False
shebang_line = None
if content.startswith('#!/'):
shebang_line, content = content.split('\n', 1)
for match in regex.finditer(content):
if substring in match.group():
found = True
content = content[:match.start()] + gen_header(data | config) + content[match.end():]
if shebang_line:
content = shebang_line + '\n' + content
if content != orig_content:
msg = 'Incorrect header in {}' if ci else cformat('%{green!}Updating header of %{blue!}{}')
print(msg.format(os.path.relpath(file_path)))
if not ci:
with open(file_path, 'w') as file_write:
file_write.write(content)
return True
elif not found:
msg = 'Missing header in {}' if ci else cformat('%{red!}Missing header%{reset} in %{blue!}{}')
print(msg.format(os.path.relpath(file_path)))
return True
def update_header(file_path, year, ci):
config = _get_config(file_path, year)
ext = file_path.rsplit('.', 1)[-1]
if ext not in SUPPORTED_FILES or not os.path.isfile(file_path):
return False
if os.path.basename(file_path)[0] == '.':
return False
return _update_header(file_path, config, SUBSTRING, SUPPORTED_FILES[ext]['regex'],
SUPPORTED_FILES[ext]['format'], ci)
def blacklisted(root, path, _cache={}):
orig_path = path
if path not in _cache:
_cache[orig_path] = False
while (path + os.path.sep).startswith(root):
if os.path.exists(os.path.join(path, '.no-headers')):
_cache[orig_path] = True
break
path = os.path.normpath(os.path.join(path, '..'))
return _cache[orig_path]
@click.command(help=USAGE)
@click.option('--ci', is_flag=True, help='Indicate that the script is running during CI and should use a non-zero '
'exit code unless all headers were already up to date. This also prevents '
'files from actually being updated.')
@click.option('--year', '-y', type=click.IntRange(min=1000), default=date.today().year, metavar='YEAR',
help='Indicate the target year')
@click.option('--path', '-p', type=click.Path(exists=True), help='Restrict updates to a specific file or directory')
@click.pass_context
def main(ctx, ci, year, path):
error = False
if path and os.path.isdir(path):
if not ci:
print(cformat('Updating headers to the year %{yellow!}{year}%{reset} for all the files in '
'%{yellow!}{path}%{reset}...').format(year=year, path=path))
for root, _, filenames in os.walk(path):
for filename in filenames:
if not blacklisted(path, root):
if update_header(os.path.join(root, filename), year, ci):
error = True
elif path and os.path.isfile(path):
if not ci:
print(cformat('Updating headers to the year %{yellow!}{year}%{reset} for the file '
'%{yellow!}{file}%{reset}...').format(year=year, file=path))
if update_header(path, year, ci):
error = True
else:
if not ci:
print(cformat('Updating headers to the year %{yellow!}{year}%{reset} for all '
'git-tracked files...').format(year=year))
try:
for filepath in subprocess.check_output(['git', 'ls-files'], text=True).splitlines():
filepath = os.path.abspath(filepath)
if not blacklisted(os.getcwd(), os.path.dirname(filepath)):
if update_header(filepath, year, ci):
error = True
except subprocess.CalledProcessError:
raise click.UsageError(cformat('%{red!}You must be within a git repository to run this script.'))
if not error:
print(cformat('%{green}\u2705 All headers are up to date'))
elif ci:
print(cformat('%{red}\u274C Some headers need to be updated or added'))
sys.exit(1)
else:
print(cformat('%{yellow}\U0001F504 Some headers have been updated (or are missing)'))
if __name__ == '__main__':
main()
| ThiefMaster/indico | bin/maintenance/update_header.py | Python | mit | 8,843 |
from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from paste.models import Paste, Language
@csrf_exempt
def add(request):
print "jojo"
if request.method == 'POST':
language = request.POST['language']
content = request.POST['content']
try:
lang = Language.objects.get(pk=language)
except:
print "lang not avalible", language
lang = Language.objects.get(pk='txt')
paste = Paste(content=content, language=lang)
paste.save()
paste = Paste.objects.latest()
return HttpResponse(paste.pk, content_type='text/plain')
else:
return redirect('/api')
| spezifanta/Paste-It | api/v01/views.py | Python | mit | 749 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "wellspring.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| ushatil/wellness-tracker | ws/manage.py | Python | mit | 253 |
class Sprite(object):
def __init__(self, xPos, yPos):
self.x = xPos
self.y = yPos
self.th = 32
self.tw = 32
def checkCollision(self, otherSprite):
if (self.x < otherSprite.x + otherSprite.tw and otherSprite.x < self.x + self.tw
and self.y < otherSprite.y + otherSprite.th and otherSprite.y < self.y + self.th):
return True
else:
return False
class Actor(Sprite):
def __init__(self, xPos, yPos):
super(Actor, self).__init__(xPos, yPos)
self.speed = 5
self.dy = 0
self.d = 3
self.dir = "right"
# self.newdir = "right"
self.state = "standing"
self.walkR = []
self.walkL = []
def loadPics(self):
self.standing = loadImage("gripe_stand.png")
self.falling = loadImage("grfalling.png")
for i in range(8):
imageName = "gr" + str(i) + ".png"
self.walkR.append(loadImage(imageName))
for i in range(8):
imageName = "gl" + str(i) + ".png"
self.walkL.append(loadImage(imageName))
def checkWall(self, wall):
if wall.state == "hidden":
if (self.x >= wall.x - self.d and
(self.x + 32 <= wall.x + 32 + self.d)):
return False
def move(self):
if self.dir == "right":
if self.state == "walking":
self.im = self.walkR[frameCount % 8]
self.dx = self.speed
elif self.state == "standing":
self.im = self.standing
self.dx = 0
elif self.state == "falling":
self.im = self.falling
self.dx = 0
self.dy = 5
elif self.dir == "left":
if self.state == "walking":
self.im = self.walkL[frameCount % 8]
self.dx = -self.speed
elif self.state == "standing":
self.im = self.standing
self.dx = 0
elif self.state == "falling":
self.im = self.falling
self.dx = 0
self.dy = 5
else:
self.dx = 0
self.x += self.dx
self.y += self.dy
if self.x <= 0:
self.x = 0
if self.x >= 640 - self.tw:
self.x = 640 -self.tw
def display(self):
image(self.im, self.x, self.y)
class Block(Sprite):
def __init__(self, xPos, yPos):
super(Block, self).__init__(xPos, yPos)
self.state = "visible"
def loadPics(self):
self.im = loadImage("block.png")
def display(self):
if self.state == "visible":
image(self.im, self.x, self.y)
| kantel/processingpy | sketches/Apple_Invaders/sprites.py | Python | mit | 2,805 |
# -*- coding: utf-8 -*-
import unittest
from hanspell import spell_checker
from hanspell.constants import CheckResult
from textwrap import dedent as trim
class SpellCheckerTests(unittest.TestCase):
def setUp(self):
pass
def test_basic_check(self):
result = spell_checker.check(u'์๋
ํ์ธ์. ์ ๋ ํ๊ตญ์ธ ์
๋๋ค. ์ด๋ฌธ์ฅ์ ํ๊ธ๋ก ์์ฑ๋ฌ์ต๋๋ค.')
assert result.errors == 4
assert result.checked == u'์๋
ํ์ธ์. ์ ๋ ํ๊ตญ์ธ์
๋๋ค. ์ด ๋ฌธ์ฅ์ ํ๊ธ๋ก ์์ฑ๋์ต๋๋ค.'
def test_words(self):
result = spell_checker.check(u'ํ์์ด๊ฐ ์ฅ๋๊น์ ๊ฐ๊ณ ๋๊ณ ์๋ค. ๊ทธ๋งํ๊ฒ ํ ๊ฐ?')
assert result.errors == 4
items = result.words
assert items[u'ํ'] == CheckResult.WRONG_SPACING
assert items[u'์์ด๊ฐ'] == CheckResult.WRONG_SPACING
assert items[u'์ฅ๋๊ฐ์'] == CheckResult.STATISTICAL_CORRECTION
assert items[u'๊ฐ๊ณ '] == CheckResult.WRONG_SPACING
assert items[u'๋๊ณ '] == CheckResult.WRONG_SPACING
assert items[u'์๋ค.'] == CheckResult.WRONG_SPACING
assert items[u'๊ทธ๋งํ๊ฒ'] == CheckResult.PASSED
assert items[u'ํ ๊น?'] == CheckResult.WRONG_SPELLING
def test_list(self):
results = spell_checker.check([u'์๋
ํ์ธ์.', u'์ ๋ ํ๊ตญ์ธ ์
๋๋ค.'])
assert results[0].checked == u'์๋
ํ์ธ์.'
assert results[1].checked == u'์ ๋ ํ๊ตญ์ธ์
๋๋ค.'
def test_long_paragraph(self):
paragraph = trim("""
ubit.info(์ ๋น์ธํฌ)๋ ์ฝ๋๋ฏธ ๋ฆฌ๋ฌ๊ฒ์, ์ ๋นํธ์ ํ๋ ์ด ๋ฐ์ดํฐ ๊ด๋ฆฌ ๋ฐ ์ด๋ ์๋น์ค์
๋๋ค. ๋ฑ๋ก ํ์ ์์ ๊ณผ ์น๊ตฌ์ ๊ธฐ๋ก์ p.eagate.573.jp์ ์ ์ํ ํ์ ์์ด ๋ณธ ์น ์ฌ์ดํธ์์ ๋ฐ๋ก ํ์ธํ ์ ์์ต๋๋ค.
๋ฑ๋ก ํ์๋ "https://ubit.info/๋ณ์นญ"์ผ๋ก ์์ ์ ๊ฐ์ธ ํ์ด์ง๊ฐ ์์ฑ๋๋ฉฐ ์ด ์ฃผ์(๋ณ์นญ)๋ฅผ ์๋ ์ฌ๋๋ง ์ ์ํ ์ ์์ต๋๋ค. ๋ค๋ฅธ ์น๊ตฌ์๊ฒ ๊ธฐ๋ก์ ๋ณด์ฌ์ฃผ๊ณ ์ถ๋ค๋ฉด ๋ณธ์ธ์ ์ธํฌ ์ฃผ์๋ฅผ ์๋ ค์ฃผ๋ฉด ๋ฉ๋๋ค.
์ด ์ฌ์ดํธ๋ ์ต์ ๋ธ๋ผ์ฐ์ ํ๊ฒฝ๋ง์ ์ ๋๋ก ์ง์ํฉ๋๋ค. ๋ง์ฝ ํฌ๋กฌ, ํ์ด์ดํญ์ค ๋ฑ์ ์ต์ ๋ธ๋ผ์ฐ์ ์์ ๋ฒ์ (stable)์ ์ฌ์ฉํ๊ณ ์๋๋ฐ๋ ํ์ด์ง ๋ ์ด์์์ด ๊นจ์ง๋ ๊ฒฝ์ฐ ์ฌ์ดํธ ๊ด๋ฆฌ์์๊ฒ ๋ฌธ์ํด์ฃผ์ธ์.
๋ฑ๋ก ๊ณผ์ ์ ๊ฐ๋จํฉ๋๋ค. ์๋จ ๋ฉ๋ด์์ ๋ฑ๋ก์ ํด๋ฆญํ ํ ์์์ ๋ง๊ฒ ์
๋ ฅํ์๋ฉด ์๋์ผ๋ก ๊ณต๊ฐ์ค์ ์ด ์๋ฃ๋จ๊ณผ ๋์์ ์ ๋น์ธํฌ ๊ณ์ ์ด ์์ฑ๋ฉ๋๋ค.
""")
result = spell_checker.check(paragraph)
if __name__ == '__main__':
unittest.main()
| ssut/py-hanspell | tests.py | Python | mit | 2,729 |
__author__ = 'brianoneill'
from log_calls import log_calls
global_settings = dict(
log_call_numbers=True,
log_exit=False,
log_retval=True,
)
log_calls.set_defaults(global_settings, args_sep=' $ ')
| Twangist/log_calls | tests/set_reset_defaults/global_defaults.py | Python | mit | 211 |
"""
Given a string that contains only digits 0-9 and a target value, return all possibilities to add binary operators (not
unary) +, -, or * between the digits so they evaluate to the target value.
Examples:
"123", 6 -> ["1+2+3", "1*2*3"]
"232", 8 -> ["2*3+2", "2+3*2"]
"105", 5 -> ["1*0+5","10-5"]
"00", 0 -> ["0+0", "0-0", "0*0"]
"3456237490", 9191 -> []
"""
__author__ = 'Daniel'
class Solution(object):
def addOperators(self, num, target):
"""
Adapted from https://leetcode.com/discuss/58614/java-standard-backtrace-ac-solutoin-short-and-clear
Algorithm:
1. DFS
2. Special handling for multiplication
3. Detect invalid number with leading 0's
:type num: str
:type target: int
:rtype: List[str]
"""
ret = []
self.dfs(num, target, 0, "", 0, 0, ret)
return ret
def dfs(self, num, target, pos, cur_str, cur_val, mul, ret):
if pos >= len(num):
if cur_val == target:
ret.append(cur_str)
else:
for i in xrange(pos, len(num)):
if i != pos and num[pos] == "0":
continue
nxt_val = int(num[pos:i+1])
if not cur_str:
self.dfs(num, target, i+1, "%d"%nxt_val, nxt_val, nxt_val, ret)
else:
self.dfs(num, target, i+1, cur_str+"+%d"%nxt_val, cur_val+nxt_val, nxt_val, ret)
self.dfs(num, target, i+1, cur_str+"-%d"%nxt_val, cur_val-nxt_val, -nxt_val, ret)
self.dfs(num, target, i+1, cur_str+"*%d"%nxt_val, cur_val-mul+mul*nxt_val, mul*nxt_val, ret)
if __name__ == "__main__":
assert Solution().addOperators("232", 8) == ["2+3*2", "2*3+2"]
| algorhythms/LeetCode | 282 Expression Add Operators.py | Python | mit | 1,769 |
from django.contrib import admin
from .models import Question
# Register your models here.
admin.site.register(Question)
| BeardedPlatypus/capita-selecta-ctf | ctf/players/admin.py | Python | mit | 126 |
from django.conf.urls import patterns, include, url
import views
urlpatterns = patterns('',
url(r'^logout', views.logout, name='logout'),
url(r'^newUser', views.newUser, name='newUser'),
url(r'^appHandler', views.appHandler, name='appHandler'),
url(r'^passToLogin', views.loginByPassword, name='passToLogin'),
url(r'^signToLogin', views.loginBySignature, name='signToLogin'),
url(r'^authUserHandler', views.authUserHandler, name='authUserHandler'),
)
| odeke-em/restAssured | auth/urls.py | Python | mit | 477 |
import sys
import pytest
from opentracing.ext import tags
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from opentracing_instrumentation.client_hooks import mysqldb as mysqldb_hooks
from opentracing_instrumentation.request_context import span_in_context
from .sql_common import metadata, User
SKIP_REASON_PYTHON_3 = 'MySQLdb is not compatible with Python 3'
SKIP_REASON_CONNECTION = 'MySQL is not running or cannot connect'
MYSQL_CONNECTION_STRING = 'mysql://[email protected]/test'
@pytest.fixture
def session():
Session = sessionmaker()
engine = create_engine(MYSQL_CONNECTION_STRING)
Session.configure(bind=engine)
metadata.create_all(engine)
try:
yield Session()
except:
pass
@pytest.fixture(autouse=True, scope='module')
def patch_sqlalchemy():
mysqldb_hooks.install_patches()
try:
yield
finally:
mysqldb_hooks.reset_patches()
def is_mysql_running():
try:
import MySQLdb
with MySQLdb.connect(host='127.0.0.1', user='root'):
pass
return True
except:
return False
def assert_span(span, operation, parent=None):
assert span.operation_name == 'MySQLdb:' + operation
assert span.tags.get(tags.SPAN_KIND) == tags.SPAN_KIND_RPC_CLIENT
if parent:
assert span.parent_id == parent.context.span_id
assert span.context.trace_id == parent.context.trace_id
else:
assert span.parent_id is None
@pytest.mark.skipif(not is_mysql_running(), reason=SKIP_REASON_CONNECTION)
@pytest.mark.skipif(sys.version_info.major == 3, reason=SKIP_REASON_PYTHON_3)
def test_db(tracer, session):
root_span = tracer.start_span('root-span')
# span recording works for regular operations within a context only
with span_in_context(root_span):
user = User(name='user', fullname='User', password='password')
session.add(user)
session.commit()
spans = tracer.recorder.get_spans()
assert len(spans) == 4
connect_span, insert_span, commit_span, rollback_span = spans
assert_span(connect_span, 'Connect')
assert_span(insert_span, 'INSERT', root_span)
assert_span(commit_span, 'commit', root_span)
assert_span(rollback_span, 'rollback', root_span)
| uber-common/opentracing-python-instrumentation | tests/opentracing_instrumentation/test_mysqldb.py | Python | mit | 2,279 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Participant'
db.delete_table(u'pa_participant')
# Removing M2M table for field user on 'Participant'
db.delete_table('pa_participant_user')
# Adding M2M table for field user on 'ReportingPeriod'
db.create_table(u'pa_reportingperiod_user', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('reportingperiod', models.ForeignKey(orm[u'pa.reportingperiod'], null=False)),
('user', models.ForeignKey(orm[u'pa.user'], null=False))
))
db.create_unique(u'pa_reportingperiod_user', ['reportingperiod_id', 'user_id'])
def backwards(self, orm):
# Adding model 'Participant'
db.create_table(u'pa_participant', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('reporting_period', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['pa.ReportingPeriod'])),
))
db.send_create_signal(u'pa', ['Participant'])
# Adding M2M table for field user on 'Participant'
db.create_table(u'pa_participant_user', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('participant', models.ForeignKey(orm[u'pa.participant'], null=False)),
('user', models.ForeignKey(orm[u'pa.user'], null=False))
))
db.create_unique(u'pa_participant_user', ['participant_id', 'user_id'])
# Removing M2M table for field user on 'ReportingPeriod'
db.delete_table('pa_reportingperiod_user')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'pa.activity': {
'Meta': {'object_name': 'Activity'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.Category']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'pa.activityentry': {
'Meta': {'object_name': 'ActivityEntry'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.Activity']"}),
'day': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'hour': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.User']"})
},
u'pa.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'grouping': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '15'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reporting_period': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.ReportingPeriod']"})
},
u'pa.profession': {
'Meta': {'object_name': 'Profession'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
u'pa.reportingperiod': {
'Meta': {'object_name': 'ReportingPeriod'},
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'slots_per_hour': ('django.db.models.fields.IntegerField', [], {}),
'start_date': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pa.User']", 'symmetrical': 'False'})
},
u'pa.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.Profession']", 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
}
}
complete_apps = ['pa'] | Mathew/psychoanalysis | psychoanalysis/apps/pa/migrations/0002_auto__del_participant.py | Python | mit | 7,476 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Advent of Code 2015 from http://adventofcode.com/2015/day/5
Author: James Walker
Copyrighted 2017 under the MIT license:
http://www.opensource.org/licenses/mit-license.php
Execution:
python advent_of_code_2015_day_05.py
--- Day 5: Doesn't He Have Intern-Elves For This? ---
Santa needs help figuring out which strings in his text file are naughty or
nice.
A nice string is one with all of the following properties:
It contains at least three vowels (aeiou only), like aei, xazegov, or
aeiouaeiouaeiou.
It contains at least one letter that appears twice in a row, like xx,
abcdde (dd), or aabbccdd (aa, bb, cc, or dd).
It does not contain the strings ab, cd, pq, or xy, even if they are part of
one of the other requirements.
For example:
ugknbfddgicrmopn is nice because it has at least three vowels
(u...i...o...), a double letter (...dd...), and none of the disallowed
substrings.
aaa is nice because it has at least three vowels and a double letter, even
though the letters used by different rules overlap.
jchzalrnumimnmhp is naughty because it has no double letter.
haegwjzuvuyypxyu is naughty because it contains the string xy.
dvszwmarrgswjxmb is naughty because it contains only one vowel.
How many strings are nice?
Answer: 258
--- Day 5: Part Two ---
Realizing the error of his ways, Santa has switched to a better model of
determining whether a string is naughty or nice. None of the old rules apply,
as they are all clearly ridiculous. Now, a nice string is one with all of the
following properties:
It contains a pair of any two letters that appears at least twice in the
string without overlapping, like xyxy (xy) or aabcdefgaa (aa), but not
like aaa (aa, but it overlaps).
It contains at least one letter which repeats with exactly one letter
between them, like xyx, abcdefeghi (efe), or even aaa.
For example:
qjhvhtzxzqqjkmpb is nice because is has a pair that appears twice (qj) and
a letter that repeats with exactly one letter between them (zxz).
xxyxx is nice because it has a pair that appears twice and a letter that
repeats with one between, even though the letters used by each rule
overlap.
uurcxstgmygtbstg is naughty because it has a pair (tg) but no repeat with a
single letter between them.
ieodomkazucvgmuy is naughty because it has a repeating letter with one
between (odo), but no pair that appears twice.
How many strings are nice under these new rules?
Answer: 53
"""
import collections
import os
import re
import sys
TestCase = collections.namedtuple('TestCase', 'input expected1 expected2')
class Advent_Of_Code_2015_Solver_Day05(object):
"""Advent of Code 2015 Day 5: Doesn't He Have Intern-Elves For This?"""
def __init__(self, file_name=None):
self._file_name = file_name
self._puzzle_input = None
self._solved_output = (
"The text file had {0} nice strings using the original rules\n"
"and it had {1} nice strings using the new rules."
)
self.__regex_vowels = re.compile('[aeiou]')
self.__regex_double_char = re.compile('(\w)\\1+')
self.__regex_naughty = re.compile('ab|cd|pq|xy')
self.__regex_double_pair = re.compile('(\w{2})\w*\\1')
self.__regex_triplet = re.compile('(\w)\w\\1')
def _load_puzzle_file(self):
filePath = "{dir}/{f}".format(dir=os.getcwd(), f=self._file_name)
try:
with open(filePath, mode='r') as puzzle_file:
self._puzzle_input = puzzle_file.readlines()
except IOError as err:
errorMsg = (
"ERROR: Failed to read the puzzle input from file '{file}'\n"
"{error}"
)
print(errorMsg.format(file=self._file_name, error=err))
exit(1)
def __is_nice_string_using_old_rules(self, string):
return (self.__regex_naughty.search(string) is None
and len(self.__regex_vowels.findall(string)) > 2
and self.__regex_double_char.search(string))
def __is_nice_string_using_new_rules(self, string):
return (self.__regex_double_pair.search(string)
and self.__regex_triplet.search(string))
def _solve_puzzle_parts(self):
old_nice_count = 0
new_nice_count = 0
for string in self._puzzle_input:
if not string:
continue
if self.__is_nice_string_using_old_rules(string):
old_nice_count += 1
if self.__is_nice_string_using_new_rules(string):
new_nice_count += 1
return (old_nice_count, new_nice_count)
def get_puzzle_solution(self, alt_input=None):
if alt_input is None:
self._load_puzzle_file()
else:
self._puzzle_input = alt_input
old_nice_count, new_nice_count = self._solve_puzzle_parts()
return self._solved_output.format(old_nice_count, new_nice_count)
def _run_test_case(self, test_case):
correct_output = self._solved_output.format(
test_case.expected1,
test_case.expected2
)
test_output = self.get_puzzle_solution(test_case.input)
if correct_output == test_output:
print("Test passed for input '{0}'".format(test_case.input))
else:
print("Test failed for input '{0}'".format(test_case.input))
print(test_output)
def run_test_cases(self):
print("No Puzzle Input for {puzzle}".format(puzzle=self.__doc__))
print("Running Test Cases...")
self._run_test_case(TestCase(['ugknbfddgicrmopn'], 1, 0))
self._run_test_case(TestCase(['aaa'], 1, 0))
self._run_test_case(TestCase(['jchzalrnumimnmhp'], 0, 0))
self._run_test_case(TestCase(['haegwjzuvuyypxyu'], 0, 0))
self._run_test_case(TestCase(['dvszwmarrgswjxmb'], 0, 0))
self._run_test_case(TestCase(['xyxy'], 0, 1))
self._run_test_case(TestCase(['aabcdefgaa'], 0, 0))
self._run_test_case(TestCase(['qjhvhtzxzqqjkmpb'], 0, 1))
self._run_test_case(TestCase(['xxyxx'], 0, 1))
self._run_test_case(TestCase(['uurcxstgmygtbstg'], 0, 0))
self._run_test_case(TestCase(['ieodomkazucvgmuy'], 0, 0))
self._run_test_case(TestCase(['aaccacc'], 1, 1))
if __name__ == '__main__':
try:
day05_solver = Advent_Of_Code_2015_Solver_Day05(sys.argv[1])
print(day05_solver.__doc__)
print(day05_solver.get_puzzle_solution())
except IndexError:
Advent_Of_Code_2015_Solver_Day05().run_test_cases()
| JDSWalker/AdventOfCode | 2015/Day05/advent_of_code_2015_day_05.py | Python | mit | 6,750 |
#!/bin/env/python
# coding: utf-8
import logging
import os
import time
import uuid
from logging import Formatter
from logging.handlers import RotatingFileHandler
from multiprocessing import Queue
from time import strftime
import dill
from .commands import *
from .processing import MultiprocessingLogger
class TaskProgress(object):
"""
Holds both data and graphics-related information for a task's progress bar.
The logger will iterate over TaskProgress objects to draw progress bars on screen.
"""
def __init__(self,
total,
prefix='',
suffix='',
decimals=0,
bar_length=60,
keep_alive=False,
display_time=False):
"""
Creates a new progress bar using the given information.
:param total: The total number of iteration for this progress bar.
:param prefix: [Optional] The text that should be displayed at the left side of the
progress bar. Note that progress bars will always stay left-aligned at the
shortest possible.
:param suffix: [Optional] The text that should be displayed at the very right side of the
progress bar.
:param decimals: [Optional] The number of decimals to display for the percentage.
:param bar_length: [Optional] The graphical bar size displayed on screen. Unit is character.
:param keep_alive: [Optional] Specify whether the progress bar should stay displayed forever
once completed or if it should vanish.
:param display_time: [Optional] Specify whether the duration since the progress has begun should
be displayed. Running time will be displayed between parenthesis, whereas it
will be displayed between brackets when the progress has completed.
"""
super(TaskProgress, self).__init__()
self.progress = 0
# Minimum number of seconds at maximum completion before a progress bar is removed from display
# The progress bar may vanish at a further time as the redraw rate depends upon chrono AND method calls
self.timeout_chrono = None
self.begin_time = None
self.end_time = None
self.elapsed_time_at_end = None
# Graphics related information
self.keep_alive = keep_alive
self.display_time = display_time
self.total = total
self.prefix = prefix
self.suffix = suffix
self.decimals = decimals
self.bar_length = bar_length
def set_progress(self, progress):
"""
Defines the current progress for this progress bar in iteration units (not percent).
:param progress: Current progress in iteration units regarding its total (not percent).
:return: True if the progress has changed. If the given progress is higher than the total or lower
than 0 then it will be ignored.
"""
_progress = progress
if _progress > self.total:
_progress = self.total
elif _progress < 0:
_progress = 0
# Stop task chrono if needed
if _progress == self.total and self.display_time:
self.end_time = time.time() * 1000
# If the task has completed instantly then define its begin_time too
if not self.begin_time:
self.begin_time = self.end_time
has_changed = self.progress != _progress
if has_changed:
self.progress = _progress
return has_changed
class FancyLogger(object):
"""
Defines a multiprocess logger object. Logger uses a redraw rate because of console flickering. That means it will
not draw new messages or progress at the very time they are being logged but their timestamp will be captured at the
right time. Logger will redraw at a given time period AND when new messages or progress are logged.
If you still want to force redraw immediately (may produce flickering) then call 'flush' method.
Logger uses one file handler and then uses standard output (stdout) to draw on screen.
"""
queue = None
"Handles all messages and progress to be sent to the logger process."
default_message_number = 20
"Default value for the logger configuration."
default_exception_number = 5
"Default value for the logger configuration."
default_permanent_progressbar_slots = 0
"Default value for the logger configuration."
default_redraw_frequency_millis = 500
"Default value for the logger configuration."
default_level = logging.INFO
"Default value for the logger configuration."
default_task_millis_to_removal = 500
"Default value for the logger configuration."
default_console_format_strftime = '%d %B %Y %H:%M:%S'
"Default value for the logger configuration."
default_console_format = '{T} [{L}]'
"Default value for the logger configuration."
default_file_handlers = []
"Default value for the logger configuration. Filled in constructor."
def __init__(self,
message_number=default_message_number,
exception_number=default_exception_number,
permanent_progressbar_slots=default_permanent_progressbar_slots,
redraw_frequency_millis=default_redraw_frequency_millis,
console_level=default_level,
task_millis_to_removal=default_task_millis_to_removal,
console_format_strftime=default_console_format_strftime,
console_format=default_console_format,
file_handlers=None,
application_name=None):
"""
Initializes a new logger and starts its process immediately using given configuration.
:param message_number: [Optional] Number of simultaneously displayed messages below progress bars.
:param exception_number: [Optional] Number of simultaneously displayed exceptions below messages.
:param permanent_progressbar_slots: [Optional] The amount of vertical space (bar slots) to keep at all times,
so the message logger will not move anymore if the bar number is equal or
lower than this parameter.
:param redraw_frequency_millis: [Optional] Minimum time lapse in milliseconds between two redraws. It may be
more because the redraw rate depends upon time AND method calls.
:param console_level: [Optional] The logging level (from standard logging module).
:param task_millis_to_removal: [Optional] Minimum time lapse in milliseconds at maximum completion before
a progress bar is removed from display. The progress bar may vanish at a
further time as the redraw rate depends upon time AND method calls.
:param console_format_strftime: [Optional] Specify the time format for console log lines using python
strftime format. Defaults to format: '29 november 2016 21:52:12'.
:param console_format: [Optional] Specify the format of the console log lines. There are two
variables available: {T} for timestamp, {L} for level. Will then add some
tabulations in order to align text beginning for all levels.
Defaults to format: '{T} [{L}]'
Which will produce: '29 november 2016 21:52:12 [INFO] my log text'
'29 november 2016 21:52:13 [WARNING] my log text'
'29 november 2016 21:52:14 [DEBUG] my log text'
:param file_handlers: [Optional] Specify the file handlers to use. Each file handler will use its
own regular formatter and level. Console logging is distinct from file
logging. Console logging uses custom stdout formatting, while file logging
uses regular python logging rules. All handlers are permitted except
StreamHandler if used with stdout or stderr which are reserved by this
library for custom console output.
:param application_name: [Optional] Used only if 'file_handlers' parameter is ignored. Specifies the
application name to use to format the default file logger using format:
application_%Y-%m-%d_%H-%M-%S.log
"""
super(FancyLogger, self).__init__()
# Define default file handlers
if not file_handlers:
if not application_name:
app_name = 'application'
else:
app_name = application_name
handler = RotatingFileHandler(filename=os.path.join(os.getcwd(), '{}_{}.log'
.format(app_name, strftime('%Y-%m-%d_%H-%M-%S'))),
encoding='utf8',
maxBytes=5242880, # 5 MB
backupCount=10,
delay=True)
handler.setLevel(logging.INFO)
handler.setFormatter(fmt=Formatter(fmt='%(asctime)s [%(levelname)s]\t%(message)s',
datefmt=self.default_console_format_strftime))
self.default_file_handlers.append(handler)
file_handlers = self.default_file_handlers
if not self.queue:
self.queue = Queue()
self.process = MultiprocessingLogger(queue=self.queue,
console_level=console_level,
message_number=message_number,
exception_number=exception_number,
permanent_progressbar_slots=permanent_progressbar_slots,
redraw_frequency_millis=redraw_frequency_millis,
task_millis_to_removal=task_millis_to_removal,
console_format_strftime=console_format_strftime,
console_format=console_format,
file_handlers=file_handlers)
self.process.start()
def flush(self):
"""
Flushes the remaining messages and progress bars state by forcing redraw. Can be useful if you want to be sure
that a message or progress has been updated in display at a given moment in code, like when you are exiting an
application or doing some kind of synchronized operations.
"""
self.queue.put(dill.dumps(FlushCommand()))
def terminate(self):
"""
Tells the logger process to exit immediately. If you do not call 'flush' method before, you may lose some
messages of progresses that have not been displayed yet. This method blocks until logger process has stopped.
"""
self.queue.put(dill.dumps(ExitCommand()))
if self.process:
self.process.join()
def set_configuration(self,
message_number=default_message_number,
exception_number=default_exception_number,
permanent_progressbar_slots=default_permanent_progressbar_slots,
redraw_frequency_millis=default_redraw_frequency_millis,
console_level=default_level,
task_millis_to_removal=default_task_millis_to_removal,
console_format_strftime=default_console_format_strftime,
console_format=default_console_format,
file_handlers=default_file_handlers):
"""
Defines the current configuration of the logger. Can be used at any moment during runtime to modify the logger
behavior.
:param message_number: [Optional] Number of simultaneously displayed messages below progress bars.
:param exception_number: [Optional] Number of simultaneously displayed exceptions below messages.
:param permanent_progressbar_slots: [Optional] The amount of vertical space (bar slots) to keep at all times,
so the message logger will not move anymore if the bar number is equal or
lower than this parameter.
:param redraw_frequency_millis: [Optional] Minimum time lapse in milliseconds between two redraws. It may be
more because the redraw rate depends upon time AND method calls.
:param console_level: [Optional] The logging level (from standard logging module).
:param task_millis_to_removal: [Optional] Minimum time lapse in milliseconds at maximum completion before
a progress bar is removed from display. The progress bar may vanish at a
further time as the redraw rate depends upon time AND method calls.
:param console_format_strftime: [Optional] Specify the time format for console log lines using python
strftime format. Defaults to format: '29 november 2016 21:52:12'.
:param console_format: [Optional] Specify the format of the console log lines. There are two
variables available: {T} for timestamp, {L} for level. Will then add some
tabulations in order to align text beginning for all levels.
Defaults to format: '{T} [{L}]'
Which will produce: '29 november 2016 21:52:12 [INFO] my log text'
'29 november 2016 21:52:13 [WARNING] my log text'
'29 november 2016 21:52:14 [DEBUG] my log text'
:param file_handlers: [Optional] Specify the file handlers to use. Each file handler will use its
own regular formatter and level. Console logging is distinct from file
logging. Console logging uses custom stdout formatting, while file logging
uses regular python logging rules. All handlers are permitted except
StreamHandler if used with stdout or stderr which are reserved by this
library for custom console output.
"""
self.queue.put(dill.dumps(SetConfigurationCommand(task_millis_to_removal=task_millis_to_removal,
console_level=console_level,
permanent_progressbar_slots=permanent_progressbar_slots,
message_number=message_number,
exception_number=exception_number,
redraw_frequency_millis=redraw_frequency_millis,
console_format_strftime=console_format_strftime,
console_format=console_format,
file_handlers=file_handlers)))
def set_level(self,
level,
console_only=False):
"""
Defines the logging level (from standard logging module) for log messages.
:param level: Level of logging for the file logger.
:param console_only: [Optional] If True then the file logger will not be affected.
"""
self.queue.put(dill.dumps(SetLevelCommand(level=level,
console_only=console_only)))
def set_task_object(self,
task_id,
task_progress_object):
"""
Defines a new progress bar with the given information using a TaskProgress object.
:param task_id: Unique identifier for this progress bar. Will erase if already existing.
:param task_progress_object: TaskProgress object holding the progress bar information.
"""
self.set_task(task_id=task_id,
total=task_progress_object.total,
prefix=task_progress_object.prefix,
suffix=task_progress_object.suffix,
decimals=task_progress_object.decimals,
bar_length=task_progress_object.bar_length,
keep_alive=task_progress_object.keep_alive,
display_time=task_progress_object.display_time)
def set_task(self,
task_id,
total,
prefix,
suffix='',
decimals=0,
bar_length=60,
keep_alive=False,
display_time=False):
"""
Defines a new progress bar with the given information.
:param task_id: Unique identifier for this progress bar. Will erase if already existing.
:param total: The total number of iteration for this progress bar.
:param prefix: The text that should be displayed at the left side of the progress bar. Note that
progress bars will always stay left-aligned at the shortest possible.
:param suffix: [Optional] The text that should be displayed at the very right side of the progress bar.
:param decimals: [Optional] The number of decimals to display for the percentage.
:param bar_length: [Optional] The graphical bar size displayed on screen. Unit is character.
:param keep_alive: [Optional] Specify whether the progress bar should stay displayed forever once completed
or if it should vanish.
:param display_time: [Optional] Specify whether the duration since the progress has begun should be
displayed. Running time will be displayed between parenthesis, whereas it will be
displayed between brackets when the progress has completed.
"""
self.queue.put(dill.dumps(NewTaskCommand(task_id=task_id,
task=TaskProgress(total,
prefix,
suffix,
decimals,
bar_length,
keep_alive,
display_time))))
def update(self,
task_id,
progress):
"""
Defines the current progress for this progress bar id in iteration units (not percent).
If the given id does not exist or the given progress is identical to the current, then does nothing.
Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress
at the very time they are being logged but their timestamp will be captured at the right time. Logger will
redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw
immediately (may produce flickering) then call 'flush' method.
:param task_id: Unique identifier for this progress bar. Will erase if already existing.
:param progress: Current progress in iteration units regarding its total (not percent).
"""
self.queue.put(dill.dumps(UpdateProgressCommand(task_id=task_id,
progress=progress)))
def debug(self, text):
"""
Posts a debug message adding a timestamp and logging level to it for both file and console handlers.
Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress
at the very time they are being logged but their timestamp will be captured at the right time. Logger will
redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw
immediately (may produce flickering) then call 'flush' method.
:param text: The text to log into file and console.
"""
self.queue.put(dill.dumps(LogMessageCommand(text=text, level=logging.DEBUG)))
def info(self, text):
"""
Posts an info message adding a timestamp and logging level to it for both file and console handlers.
Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress
at the very time they are being logged but their timestamp will be captured at the right time. Logger will
redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw
immediately (may produce flickering) then call 'flush' method.
:param text: The text to log into file and console.
"""
self.queue.put(dill.dumps(LogMessageCommand(text=text, level=logging.INFO)))
def warning(self, text):
"""
Posts a warning message adding a timestamp and logging level to it for both file and console handlers.
Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress
at the very time they are being logged but their timestamp will be captured at the right time. Logger will
redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw
immediately (may produce flickering) then call 'flush' method.
:param text: The text to log into file and console.
"""
self.queue.put(dill.dumps(LogMessageCommand(text=text, level=logging.WARNING)))
def error(self, text):
"""
Posts an error message adding a timestamp and logging level to it for both file and console handlers.
Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress
at the very time they are being logged but their timestamp will be captured at the right time. Logger will
redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw
immediately (may produce flickering) then call 'flush' method.
:param text: The text to log into file and console.
"""
self.queue.put(dill.dumps(LogMessageCommand(text=text, level=logging.ERROR)))
def critical(self, text):
"""
Posts a critical message adding a timestamp and logging level to it for both file and console handlers.
Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress
at the very time they are being logged but their timestamp will be captured at the right time. Logger will
redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw
immediately (may produce flickering) then call 'flush' method.
:param text: The text to log into file and console.
"""
self.queue.put(dill.dumps(LogMessageCommand(text=text, level=logging.CRITICAL)))
def throw(self, stacktrace, process_title=None):
"""
Sends an exception to the logger so it can display it as a special message. Prevents console refresh cycles from
hiding exceptions that could be thrown by processes.
:param stacktrace: Stacktrace string as returned by 'traceback.format_exc()' in an 'except' block.
:param process_title: [Optional] Define the current process title to display into the logger for this
exception.
"""
self.queue.put(dill.dumps(StacktraceCommand(pid=os.getpid(),
stacktrace=stacktrace,
process_title=process_title)))
# --------------------------------------------------------------------
# Iterator implementation
def progress(self,
enumerable,
task_progress_object=None):
"""
Enables the object to be used as an iterator. Each iteration will produce a progress update in the logger.
:param enumerable: Collection to iterate over.
:param task_progress_object: [Optional] TaskProgress object holding the progress bar information.
:return: The logger instance.
"""
self.list = enumerable
self.list_length = len(enumerable)
self.task_id = uuid.uuid4()
self.index = 0
if task_progress_object:
# Force total attribute
task_progress_object.total = self.list_length
else:
task_progress_object = TaskProgress(total=self.list_length,
display_time=True,
prefix='Progress')
# Create a task progress
self.set_task_object(task_id=self.task_id,
task_progress_object=task_progress_object)
return self
def __iter__(self):
"""
Enables the object to be used as an iterator. Each iteration will produce a progress update in the logger.
:return: The logger instance.
"""
return self
def __next__(self):
"""
Enables the object to be used as an iterator. Each iteration will produce a progress update in the logger.
:return: The current object of the iterator.
"""
if self.index >= self.list_length:
raise StopIteration
else:
self.index += 1
self.update(task_id=self.task_id,
progress=self.index)
return self.list[self.index - 1]
# ---------------------------------------------------------------------
| peepall/FancyLogger | FancyLogger/__init__.py | Python | mit | 27,844 |
from simtk.openmm import app
import simtk.openmm as mm
from simtk import unit
def findForce(system, forcetype, add=True):
""" Finds a specific force in the system force list - added if not found."""
for force in system.getForces():
if isinstance(force, forcetype):
return force
if add==True:
system.addForce(forcetype())
return findForce(system, forcetype)
return None
def setGlobalForceParameter(force, key, value):
for i in range(force.getNumGlobalParameters()):
if force.getGlobalParameterName(i)==key:
print('setting force parameter', key, '=', value)
force.setGlobalParameterDefaultValue(i, value);
def atomIndexInResidue(residue):
""" list of atom index in residue """
index=[]
for a in list(residue.atoms()):
index.append(a.index)
return index
def getResiduePositions(residue, positions):
""" Returns array w. atomic positions of residue """
ndx = atomIndexInResidue(residue)
return np.array(positions)[ndx]
def uniquePairs(index):
""" list of unique, internal pairs """
return list(combinations( range(index[0],index[-1]+1),2 ) )
def addHarmonicConstraint(harmonicforce, pairlist, positions, threshold, k):
""" add harmonic bonds between pairs if distance is smaller than threshold """
print('Constraint force constant =', k)
for i,j in pairlist:
distance = unit.norm( positions[i]-positions[j] )
if distance<threshold:
harmonicforce.addBond( i,j,
distance.value_in_unit(unit.nanometer),
k.value_in_unit( unit.kilojoule/unit.nanometer**2/unit.mole ))
print("added harmonic bond between", i, j, 'with distance',distance)
def addExclusions(nonbondedforce, pairlist):
""" add nonbonded exclusions between pairs """
for i,j in pairlist:
nonbondedforce.addExclusion(i,j)
def rigidifyResidue(residue, harmonicforce, positions, nonbondedforce=None,
threshold=6.0*unit.angstrom, k=2500*unit.kilojoule/unit.nanometer**2/unit.mole):
""" make residue rigid by adding constraints and nonbonded exclusions """
index = atomIndexInResidue(residue)
pairlist = uniquePairs(index)
addHarmonicConstraint(harmonic, pairlist, pdb.positions, threshold, k)
if nonbondedforce is not None:
for i,j in pairlist:
print('added nonbonded exclusion between', i, j)
nonbonded.addExclusion(i,j)
| mlund/pyha | pyha/openmm.py | Python | mit | 2,333 |
from array import array
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import classification_report, roc_auc_score, roc_curve
from sklearn import tree
import cPickle
data = np.load('/Users/musthero/Documents/Yura/Applications/tmva_local/output_electrons_fullsim_v5_VeryTightLH_20per.npz')
# Train on the first 2000, test on the rest
X_train, y_train = data['data_training'], data['isprompt_training'].ravel()
X_test, y_test = data['data_testing'][0:1000], data['isprompt_testing'][0:1000].ravel()
# sklearn
dt = DecisionTreeClassifier(max_depth=3,
min_samples_leaf=100)
#min_samples_leaf=0.05*len(X_train))
doFit = False
if doFit:
print "Performing DecisionTree fit..."
dt.fit(X_train, y_train)
import cPickle
with open('electrons_toTMVA.pkl', 'wb') as fid:
cPickle.dump(dt, fid)
else:
print "Loading DecisionTree..."
# load it again
with open('electrons_toTMVA.pkl', 'rb') as fid:
dt = cPickle.load(fid)
#sk_y_predicted = dt.predict(X_test)
#sk_y_predicted = dt.predict_proba(X_test)[:, 1]
sk_y_predicted = dt.predict_proba(X_test)[:, 1]
predictions = dt.predict(X_test)
print predictions
print y_test
# Draw ROC curve
fpr, tpr, _ = roc_curve(y_test, sk_y_predicted)
plt.figure()
plt.plot(fpr, tpr, label='ROC curve of class')
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.savefig("output_fullsim_v5_electrons_roc_20per_DecisionTree.png", dpi=144)
tree.export_graphviz(dt, out_file='dt_viz.dot')
# Save to file fpr, tpr
#np.savez('output_fullsim_v3_electrons_fpr_tpr_10per.npz',
# fpr=fpr, tpr=tpr) | yuraic/koza4ok | skTMVA/sci_bdt_electron_DecisionTree.py | Python | mit | 1,980 |
# coding=utf-8
from setuptools import setup
from Cython.Build import cythonize
setup(
name="cyfib",
ext_modules=cythonize('cyfib.pyx', compiler_directives={'embedsignature': True}),
)
| tleonhardt/Python_Interface_Cpp | cython/wrap_c/setup.py | Python | mit | 193 |
import random, math
import gimp_be
#from gimp_be.utils.quick import qL
from gimp_be.image.layer import editLayerMask
from effects import mirror
import numpy as np
import UndrawnTurtle as turtle
def brushSize(size=-1):
""""
Set brush size
"""
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
if size < 1:
size = random.randrange(2, ((image.height + image.width) / 8))
gimp_be.pdb.gimp_context_set_brush_size(size)
# Set brush opacity
def brushOpacity(op=-1):
if op == -1:
op = random.randrange(15, 100)
gimp_be.pdb.gimp_brushes_set_opacity(op)
return op
# Set random brush color no parameters set random
def brushColor(r1=-1, g1=-1, b1=-1, r2=-1, g2=-1, b2=-1):
if not r1 == -1:
gimp_be.pdb.gimp_context_set_foreground((r1, g1, b1))
if not r2 == -1:
gimp_be.pdb.gimp_context_set_background((r2, g2, b2))
elif r1 == -1:
r1 = random.randrange(0, 255)
g1 = random.randrange(0, 255)
b1 = random.randrange(0, 255)
r2 = random.randrange(0, 255)
g2 = random.randrange(0, 255)
b2 = random.randrange(0, 255)
gimp_be.pdb.gimp_context_set_foreground((r1, g1, b1))
gimp_be.pdb.gimp_context_set_background((r2, g2, b2))
return (r1, g1, b1, r2, g2, b2)
#set gray scale color
def grayColor(gray_color):
gimp_be.pdb.gimp_context_set_foreground((gray_color, gray_color, gray_color))
# Set random brush
def randomBrush():
num_brushes, brush_list = gimp_be.pdb.gimp_brushes_get_list('')
brush_pick = brush_list[random.randrange(0, len(brush_list))]
gimp_be.pdb.gimp_brushes_set_brush(brush_pick)
return brush_pick
# Set random brush dynamics
def randomDynamics():
dynamics_pick = random.choice(gimp_be.pdb.gimp_dynamics_get_list('')[1])
gimp_be.pdb.gimp_context_set_dynamics(dynamics_pick)
return dynamics_pick
def qL():
# quick new layer
gimp_be.addNewLayer()
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
gimp_be.pdb.gimp_edit_fill(drawable, 1)
def drawLine(points):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
gimp_be.pdb.gimp_paintbrush_default(drawable, len(points), points)
def drawSpiral(n=140, angle=61, step=10, center=[]):
coord=[]
nt=turtle.Turtle()
if center == []:
image = gimp_be.gimp.image_list()[0]
center=[image.width/2,image.height/2]
for step in range(n):
coord.append(int(nt.position()[0]*10)+center[0])
coord.append(int(nt.position()[1]*10)+center[1])
nt.forward(step)
nt.left(angle)
coord.append(int(nt.position()[0]*10)+center[0])
coord.append(int(nt.position()[1]*10)+center[1])
drawLine(coord)
def drawRays(rays=32, rayLength=100, centerX=0, centerY=0):
""""
draw N rays from center in active drawable with current brush
"""
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
if centerX == 0:
centerX = image.width/2
if centerY == 0:
centerY = image.height/2
ray_gap = int(360.0/rays)
for ray in range(0,rays):
ctrlPoints = centerX, centerY, centerX + rayLength * math.sin(math.radians(ray*ray_gap)), centerY + rayLength * math.cos(math.radians(ray*ray_gap))
drawLine(ctrlPoints)
def drawRandomRays(rays=32, length=100, centerX=0, centerY=0,noise=0.3):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
if centerX == 0:
centerX = image.width/2
if centerY == 0:
centerY = image.height/2
ray_gap = 360.0/rays
for ray in range(0,rays):
rayLength=random.choice(range(int(length-length*noise),int(length+length*noise)))
random_angle=random.choice(np.arange(0.0,360.0,0.01))
ctrlPoints = [ centerX, centerY, centerX + int(rayLength * math.sin(math.radians(random_angle))), int(centerY + rayLength * math.cos(math.radians(random_angle)))]
drawLine(ctrlPoints)
def spikeBallStack(depth=20, layer_mode=6, flatten=0):
for x in range(1,depth):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
qL()
gimp_be.pdb.gimp_layer_set_mode(gimp_be.pdb.gimp_image_get_active_layer(image), layer_mode)
drawRandomRays(rays=random.choice([32,64,128,4]), length=(image.height/2-image.height/12), centerX=image.width/2, centerY=image.height/2,noise=random.choice([0.3,0.1,0.8]))
if flatten:
if not x%flatten:
gimp_be.pdb.gimp_image_flatten(image)
def randomStrokes(num = 4, opt = 1):
"""
Draw random strokes of random size and random position
"""
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
r = random.randrange
for loopNum in range(0, num):
if opt == 1:
brushSize(35)
drawLine(ctrlPoints)
# draw random color bars, opt 3 uses random blend
def drawBars(barNum=10, opt=3):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
barWidth =image.width/ barNum
barLeft = 0
color = -1
for loopNum in range(0, barNum):
gimp_be.pdb.gimp_image_select_rectangle(image, 2, barLeft, 0, barWidth, image.height)
barLeft = barLeft + barWidth
if opt == 3:
randomBlend()
elif opt == 2:
color = brushColor()
gimp_be.pdb.gimp_edit_bucket_fill_full(drawable, 0, 0, 100, 0, 1, 0, gimp_be.SELECT_CRITERION_COMPOSITE, 0, 0)
else:
gimp_be.pdb.gimp_edit_bucket_fill_full(drawable, 0, 0, 100, 0, 1, 0, gimp_be.SELECT_CRITERION_COMPOSITE, 0, 0)
gimp_be.pdb.gimp_selection_none(image)
return (barNum, opt, color)
# draw carbon nano tube
def drawCNT():
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
drawSinWave(1, 4, image.height * .42, 0, image.height / 2)
gimp_be.pdb.gimp_paintbrush(drawable, 0, 4, (0, (image.height - 80),image.width, (image.height - 80)), 0, 0)
gimp_be.pdb.gimp_paintbrush(drawable, 0, 4, (0, 80,image.width, 80), 0, 0)
# draw sine wave
def drawSinWave(bar_space=32, bar_length=-1, mag=70, x_offset=-1, y_offset=-1):
image = gimp_be.gimp.image_list()[0]
if y_offset == -1:
y_offset = image.height/2
if x_offset == -1:
x_offset = 0
if bar_length == -1:
bar_length = image.height/6
steps = image.width / bar_space
x = 0
for cStep in range(0, steps):
x = cStep * bar_space + x_offset
y = int(round(math.sin(x) * mag) + y_offset)
ctrlPoints = x, int(y - round(bar_length / 2)), x, int(y + round(bar_length / 2))
drawLine(ctrlPoints)
# draw sine wave
def drawSinWaveDouble(barSpace, barLen, mag):
image = gimp_be.gimp.image_list()[0]
steps =image.width/ barSpace
x = 0
for cStep in range(1, steps):
x = cStep * barSpace
y = int(abs(round(math.sin(x) * mag + image.height / 2)))
ctrlPoints = x, int(y - round(barLen / 2)), x, int(y + round(barLen / 2))
drawLine(ctrlPoints)
# draw a single brush point
def drawBrush(x1, y1):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
ctrlPoints = (x1, y1, x1, y1)
drawLine(ctrlPoints)
# draw multiple brush points
def drawMultiBrush(brush_strokes=24):
image = gimp_be.gimp.image_list()[0]
grid_width=image.width/int(math.sqrt(brush_strokes))
grid_height=image.height/int(math.sqrt(brush_strokes))
coord_x=0
coord_y = 0
for i in range(0, int(math.sqrt(brush_strokes))):
coord_x = coord_x + grid_width
for x in range(0, int(math.sqrt(brush_strokes))):
coord_y = coord_y + grid_height
drawBrush(coord_x, coord_y)
coord_y = 0
#draw grid of dots, this is for remainder mapping, this incomplete and temp. ####====DONT FORGET
def dotGrid():
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
for i in range(10,image.width-10,20):
for x in range(10, image.height-10,20):
grayColor(abs(i^3-x^3)%256)
drawBrush(i+10,x+10)
# draws random dots, opt does random color
def randomCircleFill(num=20, size=100, opt=3, sq=1):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
for loopNum in range(0, num):
cirPar = [random.randrange(0,image.width), random.randrange(0, image.height), random.randrange(10, size),
random.randrange(10, size)]
if opt % 2 == 0:
brushColor()
if sq:
gimp_be.pdb.gimp_ellipse_select(image, cirPar[0], cirPar[1], cirPar[2], cirPar[2], 2, 1, 0, 0)
else:
gimp_be.pdb.gimp_ellipse_select(image, cirPar[0], cirPar[1], cirPar[2], cirPar[3], 2, 1, 0, 0)
if opt % 3 == 3:
randomBlend()
else:
gimp_be.pdb.gimp_edit_bucket_fill_full(drawable, 0, 0, 100, 0, 1, 0, gimp_be.SELECT_CRITERION_COMPOSITE, 0, 0)
gimp_be.pdb.gimp_selection_none(image)
def randomRectFill(num=20, size=100, opt=3, sq=0):
# draws square, opt does random color
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
selectMode = 2
if opt % 5 == 0:
selectMode = 0
for loopNum in range(0, num):
if opt % 2 == 0:
brushColor()
rectPar = [random.randrange(0,image.width), random.randrange(0, image.height), random.randrange(10, size),
random.randrange(10, size)]
if sq:
gimp_be.pdb.gimp_image_select_rectangle(image, 2, rectPar[0], rectPar[1], rectPar[2], rectPar[2])
else:
gimp_be.pdb.gimp_image_select_rectangle(image, 2, rectPar[0], rectPar[1], rectPar[2], rectPar[3])
if opt % 3 == 0:
randomBlend()
else:
gimp_be.pdb.gimp_edit_bucket_fill_full(drawable, 0, 0, 100, 0, 1, 0, gimp_be.SELECT_CRITERION_COMPOSITE, 0, 0)
gimp_be.pdb.gimp_selection_none(image)
def randomBlend():
# Random Blend tool test
blend_mode = 0
paint_mode = 0
gradient_type = random.randrange(0, 10)
opacity = random.randrange(20, 100)
offset = 0
repeat = random.randrange(0, 2)
reverse = 0
supersample = 0
max_depth = random.randrange(1, 9)
threshold = 0
threshold = random.randrange(0, 1)
dither = 0
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
brushColor()
x1 = random.randrange(0,image.width)
y1 = random.randrange(0, image.height)
x2 = random.randrange(0,image.width)
y2 = random.randrange(0, image.height)
gimp_be.pdb.gimp_blend(drawable, blend_mode, paint_mode, gradient_type, opacity, offset, repeat, reverse, supersample, max_depth, threshold, dither, x1, y1, x2, y2)
def randomPoints(num=12):
d = []
for x in range(num):
d.append(choice(range(boarder,image.width-boarder)))
d.append(choice(range(boarder,image.height-boarder)))
return d
def drawInkBlot(option=''):
image=gimp_be.gimp.image_list()[0]
layer=gimp_be.pdb.gimp_image_get_active_layer(image)
if 'trippy' in option:
layer_copy = gimp_be.pdb.gimp_layer_copy(layer, 0)
gimp_be.pdb.gimp_image_add_layer(image, layer_copy,1)
randomBlend()
mask = gimp_be.pdb.gimp_layer_create_mask(layer,5)
gimp_be.pdb.gimp_image_add_layer_mask(image, layer,mask)
editLayerMask(1)
randomCircleFill(num=15,size=800)
brushColor(255,255,255)
randomCircleFill(num=50,size=100)
randomCircleFill(num=5,size=300)
brushColor(0)
randomCircleFill(num=20,size=600)
randomCircleFill(num=50,size=400)
randomCircleFill(num=100,size=100)
brushColor(255,255,255)
randomCircleFill(num=50,size=100)
brushColor(0)
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
brushSize()
strokes=[random.randrange(0,image.width/2),random.randrange(0,image.height),random.randrange(0,image.width/2),random.randrange(0,image.height)]
gimp_be.pdb.gimp_smudge(drawable, random.choice([1,5,10,50,100]), len(strokes), strokes)
brushSize()
strokes=[random.randrange(0,image.width/2),random.randrange(0,image.height),random.randrange(0,image.width/2),random.randrange(0,image.height)]
gimp_be.pdb.gimp_smudge(drawable, random.choice([1,5,10,50,100]), len(strokes), strokes)
mirror('h')
if 'trippy' in option and random.choice([0,1]):
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
gimp_be.pdb.gimp_invert(drawable)
editLayerMask(0)
def inkBlotStack(depth=16,layer_mode=6, flatten=0):
for x in range(1,depth):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
qL()
gimp_be.pdb.gimp_layer_set_mode(gimp_be.pdb.gimp_image_get_active_layer(image), layer_mode)
drawInkBlot()
if flatten:
if not x%flatten:
flatten()
def gridCenters(grid=[]):
if grid==[]:
grid=[4,3]
image = gimp_be.gimp.image_list()[0]
row_width = image.width/(grid[0])
columb_height = image.height/(grid[1])
tile_centers = []
for row in range(0,grid[0]):
for columb in range(0,grid[1]):
tile_centers.append([row_width*row+row_width/2,columb_height*columb+columb_height/2])
return tile_centers
def tile(grid=[],option="mibd",irregularity=0.3):
image=gimp_be.gimp.image_list()[0]
layer=gimp_be.pdb.gimp_image_get_active_layer(image)
if grid==[]:
if image.height == image.width:
grid=[4,4]
elif image.height < image.width:
grid=[3,4]
else:
grid=[4,3]
if "m" in option:
mask = gimp_be.pdb.gimp_layer_create_mask(layer,0)
gimp_be.pdb.gimp_image_add_layer_mask(image, layer,mask)
editLayerMask(1)
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
grid_spacing = image.width/grid[0]
tile_centers=gridCenters(grid)
if irregularity > 0.0:
i_tiles=[]
for tile in tile_centers:
tile[0]=tile[0]+random.randrange((-1*int(grid_spacing*irregularity)),int(grid_spacing*irregularity))
tile[1]=tile[1]+random.randrange((-1*int(grid_spacing*irregularity)),int(grid_spacing*irregularity))
i_tiles.append(tile)
tile_centers=i_tiles
if "b" in option:
randomBrush()
if "d" in option:
randomDynamics()
brushSize(grid_spacing)
brushColor(0,0,0)
for tile in tile_centers:
if "m" in option:
editLayerMask(1)
if irregularity == 0:
gimp_be.pdb.gimp_paintbrush_default(drawable, len(tile), tile)
elif random.randrange(50.0*irregularity)+random.randrange(50.0*irregularity)>50.0:
randomDynamics()
else:
gimp_be.pdb.gimp_paintbrush_default(drawable, len(tile), tile)
if "g" in option:
gimp_be.pdb.plug_in_gauss(image, drawable, 20.0, 20.0, 0)
if "w" in option:
gimp_be.pdb.plug_in_whirl_pinch(image, drawable, 90, 0.0, 1.0)
if "i" in option:
gimp_be.pdb.gimp_invert(drawable)
if "m" in option:
editLayerMask(0)
def drawAkuTree(branches=6,tree_height=0, position=0):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
if position==0:
position=[]
position.append(random.randrange(image.width))
position.append(random.randrange(4*tree_height/3, 3*image.height/4))
if tree_height == 0:
tree_height=random.randrange(position[1]/3, position[1]-position[1]/25)
print 'position:' + str(position)
#draw trunk
trunk=[position[0],position[1],position[0],position[1]-tree_height]
trunk_size=tree_height/40+3
print str(trunk)
print 'tree_height: ' + str(tree_height)
print 'trunk size: ' + str(trunk_size)
brushSize(trunk_size)
drawLine(trunk)
for node in range(branches):
node_base=[position[0],position[1]-((node*tree_height+1)/branches+tree_height/25+random.randrange(-1*tree_height/12,tree_height/12))]
base_length=tree_height/25
node_end=[]
if node%2==0:
node_end=[node_base[0]+base_length/2,node_base[1]-base_length/2]
brushSize(2*trunk_size/3)
drawLine([node_base[0],node_base[1],node_end[0],node_end[1]])
brushSize(trunk_size/3)
drawLine([node_end[0],node_end[1],node_end[0],node_end[1]-tree_height/12-(tree_height/48)])
else:
node_end=[node_base[0]-base_length/2,node_base[1]-base_length/2]
brushSize(2*trunk_size/3)
drawLine([node_base[0],node_base[1],node_end[0],node_end[1]])
brushSize(trunk_size/3)
drawLine([node_end[0],node_end[1],node_end[0],node_end[1]-(tree_height/12)])
def drawAkuForest(num=25):
for x in range(num):
drawAkuTree()
# draw a tree
def drawTree(x1=-1, y1=-1, angle=270, depth=9, recursiondepth=0):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
if x1 == -1:
x1 = image.width/2
if y1 == -1:
y1 = image.height/2
x2 = x1 + int(math.cos(math.radians(angle)) * depth * 10.0)
y2 = y1 + int(math.sin(math.radians(angle)) * depth * 10.0)
ctrlPoints = (x1, y1, x2, y2)
if recursiondepth <= 2:
brushColor(87, 53, 12)
elif depth == 1:
brushColor(152, 90, 17)
elif depth <= 3:
brushColor(7, 145, 2)
brushSize(depth * 4 + 5)
gimp_be.pdb.gimp_paintbrush_default(drawable, len(ctrlPoints), ctrlPoints)
if depth > 0:
drawTree(x2, y2, angle - 20, depth - 1, recursiondepth + 1)
drawTree(x2, y2, angle + 20, depth - 1, recursiondepth + 1)
# draw a tree with 3 branches per node
def drawTriTree(x1=-1, y1=-1, angle=270, depth=6, recursiondepth=0, size=10):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
if x1 == -1:
x1 = image.width/2
if y1 == -1:
y1 = image.height/2
if depth:
x2 = x1 + int(math.cos(math.radians(angle)) * depth * size) + random.randrange(-12, 12)
y2 = y1 + int(math.sin(math.radians(angle)) * depth * size) + random.randrange(-12, 12)
ctrlPoints = (x1, y1, x2, y2)
brushSize(depth + int(size/10))
brushColor()
gimp_be.pdb.gimp_paintbrush_default(drawable, len(ctrlPoints), ctrlPoints)
drawTriTree(x2, y2, angle - 30, depth - 1, recursiondepth + 1,size)
drawTriTree(x2, y2, angle, depth - 1, recursiondepth + 1,size)
drawTriTree(x2, y2, angle + 30, depth - 1, recursiondepth + 1,size)
# draw random color tri-tree
def drawColorTriTree(x1=-1, y1=-1, angle=270, depth=9, recursiondepth=0):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
if x1 == -1:
x1 = image.width/2
if y1 == -1:
y1 = image.height/2
brushSize(depth + 1)
if depth:
x2 = x1 + int(math.cos(math.radians(angle)) * depth * 10.0) + random.randrange(-12, 12)
y2 = y1 + int(math.sin(math.radians(angle)) * depth * 10.0) + random.randrange(-12, 12)
ctrlPoints = (x1, y1, x2, y2)
gimp_be.pdb.gimp_paintbrush_default(drawable, len(ctrlPoints), ctrlPoints)
drawColorTriTree(x2, y2, angle - 20 + random.choice(-10, -5, 0, 5, 10), depth - 1, recursiondepth + 1)
drawColorTriTree(x2, y2, angle + random.choice(-10, -5, 0, 5, 10), depth - 1, recursiondepth + 1)
drawColorTriTree(x2, y2, angle + 20 + random.choice(-10, -5, 0, 5, 10), depth - 1, recursiondepth + 1)
# draw a tree
def drawOddTree(x1=-1, y1=-1, angle=270, depth=9, recursiondepth=0):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
if x1 == -1:
x1 = image.width/2
if y1 == -1:
y1 = image.height/2
brushSize((depth * 8 + 30))
if depth:
x2 = x1 + int(math.cos(math.radians(angle)) * depth * 10.0)
y2 = y1 + int(math.sin(math.radians(angle)) * depth * 10.0)
ctrlPoints = (x1, y1, x2, y2)
gimp_be.pdb.gimp_paintbrush_default(drawable, len(ctrlPoints), ctrlPoints)
if not random.randrange(0, 23) == 23:
drawTree(x2, y2, angle - 20, depth - 1, recursiondepth + 1)
if depth % 2 == 0:
drawTree(x2, y2, angle + 20, depth - 1, recursiondepth + 1)
if (depth + 1) % 4 == 0:
drawTree(x2, y2, angle + 20, depth - 1, recursiondepth + 1)
if depth == 5:
drawTree(x2, y2, angle - 45, depth - 1, recursiondepth + 1)
drawTree(x2, y2, angle + 45, depth - 1, recursiondepth + 1)
# draw a tree
def drawForestTree(x1=-1, y1=-1, angle=270, depth=7, size=10, recursiondepth=0):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
if x1 == -1:
x1 = image.width/2
if y1 == -1:
y1 = image.height/2
if depth:
x2 = x1 + int(math.cos(math.radians(angle)) * depth * 10.0)
y2 = y1 + int(math.sin(math.radians(angle)) * depth * 10.0)
ctrlPoints = (x1, y1, x2, y2)
brushSize(depth * depth * (int(size / ((image.height - y1)) / image.height)) + 4)
gimp_be.pdb.gimp_paintbrush_default(drawable, len(ctrlPoints), ctrlPoints)
if not random.randrange(0, 23) == 23:
drawForestTree(x2, y2, angle - 20, depth - 1, size, recursiondepth + 1)
if random.randrange(0, 23) == 23:
drawForestTree(x2, y2, angle - random.randrange(-30, 30), depth - 1, size, recursiondepth + 1)
drawForestTree(x2, y2, angle - random.randrange(-30, 30), depth - 1, size, recursiondepth + 1)
drawForestTree(x2, y2, angle - random.randrange(-30, 30), depth - 1, size, recursiondepth + 1)
else:
drawForestTree(x2, y2, angle - random.randrange(15, 50), depth - 1, size, recursiondepth + 1)
if depth % 2 == 0:
drawForestTree(x2, y2, angle + 20, depth - 1, size, recursiondepth + 1)
if (depth + 1) % 4 == 0:
drawForestTree(x2, y2, angle + 20, depth - 1, size, recursiondepth + 1)
if depth == 5:
drawForestTree(x2, y2, angle - 45, depth - 1, size, recursiondepth + 1)
drawForestTree(x2, y2, angle + 45, depth - 1, size, recursiondepth + 1)
# draw a series of trees with a y position based on depth
def drawForest(trees, options):
image = gimp_be.gimp.image_list()[0]
for tree in range(0, trees):
y1 = 2 * (image.height / 3) + random.randrange(-1 * (image.height / 5), image.height / 5)
x1 = random.randrange(image.width / 20, 19 * (image.width / 20))
angle = random.randrange(250, 290)
size = (y1 / (2.0 * (image.height / 3.0) + (image.height / 5.0))) + 4
depth = random.randrange(3, 7)
drawForestTree(x1, y1, angle, depth, size)
#draws polygon of N sides at a x-y location
def drawPolygon(sides=5,size=300,x_pos=0,y_pos=0, angle_offset=0):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
if y_pos==0:
y_pos=image.height/2
if x_pos==0:
x_pos=image.width/2
degree_between_points=360/sides
points_list=[]
for x in range(0,sides+1):
point_degree=degree_between_points*x+angle_offset
points_list.append(int(round(math.sin(math.radians(point_degree))*size))+x_pos)
points_list.append(int(round(math.cos(math.radians(point_degree))*size))+y_pos)
fade_out=0
method=0
gradient_length=0
gimp_be.pdb.gimp_paintbrush(drawable, fade_out, len(points_list), points_list, method, gradient_length)
#draw a grid of polygons of N sides
def drawPolygonGrid(size=60,sides=3, angle_offset=0):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
if sides%2 == 1 or sides>4:
for y in range(0-image.height/10,image.height+image.height/10, size):
x_loop=0
for x in range(0-image.width/10, image.width+image.width/10, size):
if x_loop%2==1:
drawPolygon(sides,size-size/2,x-(size/2),y,360/sides)
else:
drawPolygon(sides,size-size/2,x,y,0)
x_loop=x_loop+1
else:
for x in range(0-image.height/10,image.height+image.height/10, size):
for y in range(0-image.width/10, image.width+image.width/10, size):
drawPolygon(sides,size/3,x,y,0)
degree_between_points=360/sides
points_list=[]
for x in range(0,sides+1):
point_degree=math.radians(degree_between_points*x+angle_offset)
points_list.append(int(round(math.sin(point_degree)*size)))
points_list.append(int(round(math.cos(point_degree)*size)))
fade_out=0
method=0
gradient_length=0
gimp_be.pdb.gimp_paintbrush(drawable, fade_out, len(points_list), points_list, method, gradient_length)
def drawFrygon(sides=5,size=300,x_pos=0,y_pos=0, angle_offset=0):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
if y_pos==0:
y_pos=image.height/2
if x_pos==0:
x_pos=image.width/2
degree_between_points=360/sides
points_list=[]
for x in range(0,sides+1):
point_degree=degree_between_points*x+angle_offset
points_list.append(int(round(math.sin(point_degree)*size))+y_pos)
points_list.append(int(round(math.cos(point_degree)*size))+x_pos)
fade_out=0
method=0
gradient_length=0
gimp_be.pdb.gimp_paintbrush(drawable, fade_out, len(points_list), points_list, method, gradient_length)
def drawFrygonGrid(size=120,sides=13):
global height, width
if sides%2 == 1:
for x in range(0,height,size):
x_deep=0
for y in range(0, width,size):
if x_deep%2==1:
drawFrygon(sides,size,x,y-(size/2),0)
else:
drawFrygon(sides,size,x,y,0)
x_deep=x_deep+1
else:
for x in range(0,height, size):
for y in range(0, width, size):
drawFrygon(sides,size,x,y,0)
| J216/gimp_be | gimp_be/draw/draw.py | Python | mit | 26,770 |
#!/usr/bin/env python
# coding:utf-8
"""
Database operation module. This module is independent with web module.
"""
import time, logging
import db
class Field(object):
_count = 0
def __init__(self, **kw):
self.name = kw.get('name', None)
self.ddl = kw.get('ddl', '')
self._default = kw.get('default', None)
self.comment = kw.get('comment', '')
self.nullable = kw.get('nullable', False)
self.updatable = kw.get('updatable', True)
self.insertable = kw.get('insertable', True)
self.unique_key = kw.get('unique_key', False)
self.non_unique_key = kw.get('key', False)
self.primary_key = kw.get('primary_key', False)
self._order = Field._count
Field._count += 1
@property
def default(self):
d = self._default
return d() if callable(d) else d
def __str__(self):
s = ['<%s:%s,%s,default(%s),' % (self.__class__.__name__, self.name, self.ddl, self._default)]
self.nullable and s.append('N')
self.updatable and s.append('U')
self.insertable and s.append('I')
s.append('>')
return ''.join(s)
class StringField(Field):
def __init__(self, **kw):
if not 'default' in kw:
kw['default'] = ''
if not 'ddl' in kw:
kw['ddl'] = 'varchar(255)'
super(StringField, self).__init__(**kw)
class IntegerField(Field):
def __init__(self, **kw):
if not 'default' in kw:
kw['default'] = 0
if not 'ddl' in kw:
kw['ddl'] = 'bigint'
super(IntegerField, self).__init__(**kw)
class FloatField(Field):
def __init__(self, **kw):
if not 'default' in kw:
kw['default'] = 0.0
if not 'ddl' in kw:
kw['ddl'] = 'real'
super(FloatField, self).__init__(**kw)
class BooleanField(Field):
def __init__(self, **kw):
if not 'default' in kw:
kw['default'] = False
if not 'ddl' in kw:
kw['ddl'] = 'bool'
super(BooleanField, self).__init__(**kw)
class TextField(Field):
def __init__(self, **kw):
if not 'default' in kw:
kw['default'] = ''
if not 'ddl' in kw:
kw['ddl'] = 'text'
super(TextField, self).__init__(**kw)
class BlobField(Field):
def __init__(self, **kw):
if not 'default' in kw:
kw['default'] = ''
if not 'ddl' in kw:
kw['ddl'] = 'blob'
super(BlobField, self).__init__(**kw)
class VersionField(Field):
def __init__(self, name=None):
super(VersionField, self).__init__(name=name, default=0, ddl='bigint')
class DateTimeField(Field):
def __init__(self, **kw):
if 'ddl' not in kw:
kw['ddl'] = 'datetime'
super(DateTimeField, self).__init__(**kw)
class DateField(Field):
def __init__(self, **kw):
if 'ddl' not in kw:
kw['ddl'] = 'date'
super(DateField, self).__init__(**kw)
class EnumField(Field):
def __init__(self, **kw):
if 'ddl' not in kw:
kw['ddl'] = 'enum'
super(EnumField, self).__init__(**kw)
_triggers = frozenset(['pre_insert', 'pre_update', 'pre_delete'])
def _gen_sql(table_name, mappings):
pk, unique_keys, keys = None, [], []
sql = ['-- generating SQL for %s:' % table_name, 'create table `%s` (' % table_name]
for f in sorted(mappings.values(), lambda x, y: cmp(x._order, y._order)):
if not hasattr(f, 'ddl'):
raise StandardError('no ddl in field "%s".' % f)
ddl = f.ddl
nullable = f.nullable
has_comment = not (f.comment == '')
has_default = f._default is not None
left = nullable and ' `%s` %s' % (f.name, ddl) or ' `%s` %s not null' % (f.name, ddl)
mid = has_default and ' default \'%s\'' % f._default or None
right = has_comment and ' comment \'%s\',' % f.comment or ','
line = mid and '%s%s%s' % (left, mid, right) or '%s%s' % (left, right)
if f.primary_key:
pk = f.name
line = ' `%s` %s not null auto_increment,' % (f.name, ddl)
elif f.unique_key:
unique_keys.append(f.name)
elif f.non_unique_key:
keys.append(f.name)
sql.append(line)
for uk in unique_keys:
sql.append(' unique key(`%s`),' % uk)
for k in keys:
sql.append(' key(`%s`),' % k)
sql.append(' primary key(`%s`)' % pk)
sql.append(')ENGINE=InnoDB DEFAULT CHARSET=utf8;')
return '\n'.join(sql)
class ModelMetaclass(type):
"""
Metaclass for model objects.
"""
def __new__(cls, name, bases, attrs):
# skip base Model class:
if name == 'Model':
return type.__new__(cls, name, bases, attrs)
# store all subclasses info:
if not hasattr(cls, 'subclasses'):
cls.subclasses = {}
if not name in cls.subclasses:
cls.subclasses[name] = name
else:
logging.warning('Redefine class: %s', name)
logging.info('Scan ORMapping %s...', name)
mappings = dict()
primary_key = None
for k, v in attrs.iteritems():
if isinstance(v, Field):
if not v.name:
v.name = k
logging.debug('Found mapping: %s => %s' % (k, v))
# check duplicate primary key:
if v.primary_key:
if primary_key:
raise TypeError('Cannot define more than 1 primary key in class: %s' % name)
if v.updatable:
# logging.warning('NOTE: change primary key to non-updatable.')
v.updatable = False
if v.nullable:
# logging.warning('NOTE: change primary key to non-nullable.')
v.nullable = False
primary_key = v
mappings[k] = v
# check exist of primary key:
if not primary_key:
raise TypeError('Primary key not defined in class: %s' % name)
for k in mappings.iterkeys():
attrs.pop(k)
if '__table__' not in attrs:
attrs['__table__'] = name.lower()
attrs['__mappings__'] = mappings
attrs['__primary_key__'] = primary_key
attrs['__sql__'] = lambda self: _gen_sql(attrs['__table__'], mappings)
for trigger in _triggers:
if trigger not in attrs:
attrs[trigger] = None
return type.__new__(cls, name, bases, attrs)
class Model(dict):
"""
Base class for ORM.
>>> class User(Model):
... id = IntegerField(primary_key=True)
... name = StringField()
... email = StringField(updatable=False)
... passwd = StringField(default=lambda: '******')
... last_modified = FloatField()
... def pre_insert(self):
... self.last_modified = time.time()
>>> u = User(id=10190, name='Michael', email='[email protected]')
>>> r = u.insert()
>>> u.email
'[email protected]'
>>> u.passwd
'******'
>>> u.last_modified > (time.time() - 2)
True
>>> f = User.get(10190)
>>> f.name
u'Michael'
>>> f.email
u'[email protected]'
>>> f.email = '[email protected]'
>>> r = f.update() # change email but email is non-updatable!
>>> len(User.find_all())
1
>>> g = User.get(10190)
>>> g.email
u'[email protected]'
>>> r = g.mark_deleted()
>>> len(db.select('select * from user where id=10190'))
0
>>> import json
>>> print User().__sql__()
-- generating SQL for user:
create table `user` (
`id` bigint not null,
`name` varchar(255) not null,
`email` varchar(255) not null,
`passwd` varchar(255) not null,
`last_modified` real not null,
primary key(`id`)
);
"""
__metaclass__ = ModelMetaclass
def __init__(self, **kw):
super(Model, self).__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
@classmethod
def get(cls, key_name, key_value):
"""
Get by primary/unique key.
"""
d = db.select_one('select * from %s where %s=?' % (cls.__table__, key_name), key_value)
if not d:
# TODO: change to logging?
raise AttributeError("Can't find in [%s] where %s=[%s]" % (cls.__table__, key_name, key_value))
return cls(**d) if d else None
@classmethod
def find_first(cls, where, *args):
"""
Find by where clause and return one result. If multiple results found,
only the first one returned. If no result found, return None.
"""
d = db.select_one('select * from %s %s' % (cls.__table__, where), *args)
return cls(**d) if d else None
@classmethod
def find_all(cls, *args):
"""
Find all and return list.
"""
L = db.select('select * from `%s`' % cls.__table__)
return [cls(**d) for d in L]
@classmethod
def find_by(cls, cols, where, *args):
"""
Find by where clause and return list.
"""
L = db.select('select %s from `%s` %s' % (cols, cls.__table__, where), *args)
if cols.find(',') == -1 and cols.strip() != '*':
return [d[0] for d in L]
return [cls(**d) for d in L]
@classmethod
def count_all(cls):
"""
Find by 'select count(pk) from table' and return integer.
"""
return db.select_int('select count(`%s`) from `%s`' % (cls.__primary_key__.name, cls.__table__))
@classmethod
def count_by(cls, where, *args):
"""
Find by 'select count(pk) from table where ... ' and return int.
"""
return db.select_int('select count(`%s`) from `%s` %s' % (cls.__primary_key__.name, cls.__table__, where), *args)
def update(self):
self.pre_update and self.pre_update()
L = []
args = []
for k, v in self.__mappings__.iteritems():
if v.updatable:
if hasattr(self, k):
arg = getattr(self, k)
else:
arg = v.default
setattr(self, k, arg)
L.append('`%s`=?' % k)
args.append(arg)
pk = self.__primary_key__.name
args.append(getattr(self, pk))
db.update('update `%s` set %s where %s=?' % (self.__table__, ','.join(L), pk), *args)
return self
def delete(self):
self.pre_delete and self.pre_delete()
pk = self.__primary_key__.name
args = (getattr(self, pk), )
db.update('delete from `%s` where `%s`=?' % (self.__table__, pk), *args)
return self
def insert(self):
self.pre_insert and self.pre_insert()
params = {}
for k, v in self.__mappings__.iteritems():
if v.insertable:
if not hasattr(self, k):
setattr(self, k, v.default)
params[v.name] = getattr(self, k)
try:
db.insert('%s' % self.__table__, **params)
except Exception as e:
logging.info(e.args)
print "MySQL Model.insert() error: args=", e.args
# TODO !!! generalize ORM return package
# return {'status': 'Failure', 'msg': e.args, 'data': self}
raise
return self
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
db.create_engine('www-data', 'www-data', 'test')
db.update('drop table if exists user')
db.update('create table user (id int primary key, name text, email text, passwd text, last_modified real)')
import doctest
doctest.testmod()
| boisde/Greed_Island | business_logic/order_collector/transwarp/orm.py | Python | mit | 11,968 |
import pytest
from tests.base import Author, Post, Comment, Keyword, fake
def make_author():
return Author(
id=fake.random_int(),
first_name=fake.first_name(),
last_name=fake.last_name(),
twitter=fake.domain_word(),
)
def make_post(with_comments=True, with_author=True, with_keywords=True):
comments = [make_comment() for _ in range(2)] if with_comments else []
keywords = [make_keyword() for _ in range(3)] if with_keywords else []
author = make_author() if with_author else None
return Post(
id=fake.random_int(),
title=fake.catch_phrase(),
author=author,
author_id=author.id if with_author else None,
comments=comments,
keywords=keywords,
)
def make_comment(with_author=True):
author = make_author() if with_author else None
return Comment(id=fake.random_int(), body=fake.bs(), author=author)
def make_keyword():
return Keyword(keyword=fake.domain_word())
@pytest.fixture()
def author():
return make_author()
@pytest.fixture()
def authors():
return [make_author() for _ in range(3)]
@pytest.fixture()
def comments():
return [make_comment() for _ in range(3)]
@pytest.fixture()
def post():
return make_post()
@pytest.fixture()
def post_with_null_comment():
return make_post(with_comments=False)
@pytest.fixture()
def post_with_null_author():
return make_post(with_author=False)
@pytest.fixture()
def posts():
return [make_post() for _ in range(3)]
| marshmallow-code/marshmallow-jsonapi | tests/conftest.py | Python | mit | 1,521 |
# Declaring a Function
def recurPowerNew(base, exp):
# Base case is when exp = 0
if exp <= 0:
return 1
# Recursive Call
elif exp % 2 == 0:
return recurPowerNew(base*base, exp/2)
return base * recurPowerNew(base, exp - 1)
| jabhij/MITx-6.00.1x-Python- | Week-3/L5/Prob3.py | Python | mit | 268 |
'''
Testing class for database API's course related functions.
Authors: Ari Kairala, Petteri Ponsimaa
Originally adopted from Ivan's exercise 1 test class.
'''
import unittest, hashlib
import re, base64, copy, json, server
from database_api_test_common import BaseTestCase, db
from flask import json, jsonify
from exam_archive import ExamDatabaseErrorNotFound, ExamDatabaseErrorExists
from unittest import TestCase
from resources_common import COLLECTIONJSON, PROBLEMJSON, COURSE_PROFILE, API_VERSION
class RestCourseTestCase(BaseTestCase):
'''
RestCourseTestCase contains course related unit tests of the database API.
'''
# List of user credentials in exam_archive_data_dump.sql for testing purposes
super_user = "bigboss"
super_pw = hashlib.sha256("ultimatepw").hexdigest()
admin_user = "antti.admin"
admin_pw = hashlib.sha256("qwerty1234").hexdigest()
basic_user = "testuser"
basic_pw = hashlib.sha256("testuser").hexdigest()
wrong_pw = "wrong-pw"
test_course_template_1 = {"template": {
"data": [
{"name": "archiveId", "value": 1},
{"name": "courseCode", "value": "810136P"},
{"name": "name", "value": "Johdatus tietojenk\u00e4sittelytieteisiin"},
{"name": "description", "value": "Lorem ipsum"},
{"name": "inLanguage", "value": "fi"},
{"name": "creditPoints", "value": 4},
{"name": "teacherId", "value": 1}]
}
}
test_course_template_2 = {"template": {
"data": [
{"name": "archiveId", "value": 1},
{"name": "courseCode", "value": "810137P"},
{"name": "name", "value": "Introduction to Information Processing Sciences"},
{"name": "description", "value": "Aaa Bbbb"},
{"name": "inLanguage", "value": "en"},
{"name": "creditPoints", "value": 5},
{"name": "teacherId", "value": 2}]
}
}
course_resource_url = '/exam_archive/api/archives/1/courses/1/'
course_resource_not_allowed_url = '/exam_archive/api/archives/2/courses/1/'
courselist_resource_url = '/exam_archive/api/archives/1/courses/'
# Set a ready header for authorized admin user
header_auth = {'Authorization': 'Basic ' + base64.b64encode(super_user + ":" + super_pw)}
# Define a list of the sample contents of the database, so we can later compare it to the test results
@classmethod
def setUpClass(cls):
print "Testing ", cls.__name__
def test_user_not_authorized(self):
'''
Check that user in not able to get course list without authenticating.
'''
print '(' + self.test_user_not_authorized.__name__ + ')', \
self.test_user_not_authorized.__doc__
# Test CourseList/GET
rv = self.app.get(self.courselist_resource_url)
self.assertEquals(rv.status_code,401)
self.assertEquals(PROBLEMJSON,rv.mimetype)
# Test CourseList/POST
rv = self.app.post(self.courselist_resource_url)
self.assertEquals(rv.status_code,401)
self.assertEquals(PROBLEMJSON,rv.mimetype)
# Test Course/GET
rv = self.app.get(self.course_resource_url)
self.assertEquals(rv.status_code,401)
self.assertEquals(PROBLEMJSON,rv.mimetype)
# Test Course/PUT
rv = self.app.put(self.course_resource_url)
self.assertEquals(rv.status_code,401)
self.assertEquals(PROBLEMJSON,rv.mimetype)
# Test Course/DELETE
rv = self.app.put(self.course_resource_url)
self.assertEquals(rv.status_code,401)
self.assertEquals(PROBLEMJSON,rv.mimetype)
# Try to Course/POST when not admin or super user
rv = self.app.post(self.courselist_resource_url, headers={'Authorization': 'Basic ' + \
base64.b64encode(self.basic_user + ":" + self.basic_pw)})
self.assertEquals(rv.status_code,403)
self.assertEquals(PROBLEMJSON,rv.mimetype)
# Try to delete course, when not admin or super user
rv = self.app.delete(self.course_resource_url, headers={'Authorization': 'Basic ' + \
base64.b64encode(self.basic_user + ":" + self.basic_pw)})
self.assertEquals(rv.status_code,403)
self.assertEquals(PROBLEMJSON,rv.mimetype)
# Try to get Course list as basic user from unallowed archive
rv = self.app.get(self.course_resource_not_allowed_url, headers={'Authorization': 'Basic ' + \
base64.b64encode(self.basic_user + ":" + self.basic_pw)})
self.assertEquals(rv.status_code,403)
self.assertEquals(PROBLEMJSON,rv.mimetype)
# Try to get Course list as super user with wrong password
rv = self.app.get(self.courselist_resource_url, headers={'Authorization': 'Basic ' + \
base64.b64encode(self.super_user + ":" + self.wrong_pw)})
self.assertEquals(rv.status_code,401)
self.assertEquals(PROBLEMJSON,rv.mimetype)
def test_user_authorized(self):
'''
Check that authenticated user is able to get course list.
'''
print '(' + self.test_user_authorized.__name__ + ')', \
self.test_user_authorized.__doc__
# Try to get Course list as basic user from the correct archive
rv = self.app.get(self.course_resource_url, headers={'Authorization': 'Basic ' + \
base64.b64encode(self.basic_user + ":" + self.basic_pw)})
self.assertEquals(rv.status_code,200)
self.assertEquals(COLLECTIONJSON+";"+COURSE_PROFILE,rv.content_type)
# User authorized as super user
rv = self.app.get(self.courselist_resource_url, headers={'Authorization': 'Basic ' + \
base64.b64encode(self.super_user + ":" + self.super_pw)})
self.assertEquals(rv.status_code,200)
self.assertEquals(COLLECTIONJSON+";"+COURSE_PROFILE,rv.content_type)
def test_course_get(self):
'''
Check data consistency of Course/GET and CourseList/GET.
'''
print '(' + self.test_course_get.__name__ + ')', \
self.test_course_get.__doc__
# Test CourseList/GET
self._course_get(self.courselist_resource_url)
# Test single course Course/GET
self._course_get(self.course_resource_url)
def _course_get(self, resource_url):
'''
Check data consistency of CourseList/GET.
'''
# Get all the courses from database
courses = db.browse_courses(1)
# Get all the courses from API
rv = self.app.get(resource_url, headers=self.header_auth)
self.assertEquals(rv.status_code,200)
self.assertEquals(COLLECTIONJSON+";"+COURSE_PROFILE,rv.content_type)
input = json.loads(rv.data)
assert input
# Go through the data
data = input['collection']
items = data['items']
self.assertEquals(data['href'], resource_url)
self.assertEquals(data['version'], API_VERSION)
for item in items:
obj = self._create_dict(item['data'])
course = db.get_course(obj['courseId'])
assert self._isIdentical(obj, course)
def test_course_post(self):
'''
Check that a new course can be created.
'''
print '(' + self.test_course_post.__name__ + ')', \
self.test_course_post.__doc__
resource_url = self.courselist_resource_url
new_course = self.test_course_template_1.copy()
# Test CourseList/POST
rv = self.app.post(resource_url, headers=self.header_auth, data=json.dumps(new_course))
self.assertEquals(rv.status_code,201)
# Post returns the address of newly created resource URL in header, in 'location'. Get the identifier of
# the just created item, fetch it from database and compare.
location = rv.location
location_match = re.match('.*courses/([^/]+)/', location)
self.assertIsNotNone(location_match)
new_id = location_match.group(1)
# Fetch the item from database and set it to course_id_db, and convert the filled post template data above to
# similar format by replacing the keys with post data attributes.
course_in_db = db.get_course(new_id)
course_posted = self._convert(new_course)
# Compare the data in database and the post template above.
self.assertDictContainsSubset(course_posted, course_in_db)
# Next, try to add the same course twice - there should be conflict
rv = self.app.post(resource_url, headers=self.header_auth, data=json.dumps(new_course))
self.assertEquals(rv.status_code,409)
# Next check that by posting invalid JSON data we get status code 415
invalid_json = "INVALID " + json.dumps(new_course)
rv = self.app.post(resource_url, headers=self.header_auth, data=invalid_json)
self.assertEquals(rv.status_code,415)
# Check that template structure is validated
invalid_json = json.dumps(new_course['template'])
rv = self.app.post(resource_url, headers=self.header_auth, data=invalid_json)
self.assertEquals(rv.status_code,400)
# Check for the missing required field by removing the third row in array (course name)
invalid_template = copy.deepcopy(new_course)
invalid_template['template']['data'].pop(2)
rv = self.app.post(resource_url, headers=self.header_auth, data=json.dumps(invalid_template))
self.assertEquals(rv.status_code,400)
# Lastly, delete the item
rv = self.app.delete(location, headers=self.header_auth)
self.assertEquals(rv.status_code,204)
def test_course_put(self):
'''
Check that an existing course can be modified.
'''
print '(' + self.test_course_put.__name__ + ')', \
self.test_course_put.__doc__
resource_url = self.courselist_resource_url
new_course = self.test_course_template_1
edited_course = self.test_course_template_2
# First create the course
rv = self.app.post(resource_url, headers=self.header_auth, data=json.dumps(new_course))
self.assertEquals(rv.status_code,201)
location = rv.location
self.assertIsNotNone(location)
# Then try to edit the course
rv = self.app.put(location, headers=self.header_auth, data=json.dumps(edited_course))
self.assertEquals(rv.status_code,200)
location = rv.location
self.assertIsNotNone(location)
# Put returns the address of newly created resource URL in header, in 'location'. Get the identifier of
# the just created item, fetch it from database and compare.
location = rv.location
location_match = re.match('.*courses/([^/]+)/', location)
self.assertIsNotNone(location_match)
new_id = location_match.group(1)
# Fetch the item from database and set it to course_id_db, and convert the filled post template data above to
# similar format by replacing the keys with post data attributes.
course_in_db = db.get_course(new_id)
course_posted = self._convert(edited_course)
# Compare the data in database and the post template above.
self.assertDictContainsSubset(course_posted, course_in_db)
# Next check that by posting invalid JSON data we get status code 415
invalid_json = "INVALID " + json.dumps(new_course)
rv = self.app.put(location, headers=self.header_auth, data=invalid_json)
self.assertEquals(rv.status_code,415)
# Check that template structure is validated
invalid_json = json.dumps(new_course['template'])
rv = self.app.put(location, headers=self.header_auth, data=invalid_json)
self.assertEquals(rv.status_code,400)
# Lastly, we delete the course
rv = self.app.delete(location, headers=self.header_auth)
self.assertEquals(rv.status_code,204)
def test_course_delete(self):
'''
Check that course in not able to get course list without authenticating.
'''
print '(' + self.test_course_delete.__name__ + ')', \
self.test_course_delete.__doc__
# First create the course
resource_url = self.courselist_resource_url
rv = self.app.post(resource_url, headers=self.header_auth, data=json.dumps(self.test_course_template_2))
self.assertEquals(rv.status_code,201)
location = rv.location
self.assertIsNotNone(location)
# Get the identifier of the just created item, fetch it from database and compare.
location = rv.location
location_match = re.match('.*courses/([^/]+)/', location)
self.assertIsNotNone(location_match)
new_id = location_match.group(1)
# Then, we delete the course
rv = self.app.delete(location, headers=self.header_auth)
self.assertEquals(rv.status_code,204)
# Try to fetch the deleted course from database - expect to fail
self.assertIsNone(db.get_course(new_id))
def test_for_method_not_allowed(self):
'''
For inconsistency check for 405, method not allowed.
'''
print '(' + self.test_course_get.__name__ + ')', \
self.test_course_get.__doc__
# CourseList/PUT should not exist
rv = self.app.put(self.courselist_resource_url, headers=self.header_auth)
self.assertEquals(rv.status_code,405)
# CourseList/DELETE should not exist
rv = self.app.delete(self.courselist_resource_url, headers=self.header_auth)
self.assertEquals(rv.status_code,405)
# Course/POST should not exist
rv = self.app.post(self.course_resource_url, headers=self.header_auth)
self.assertEquals(rv.status_code,405)
def _isIdentical(self, api_item, db_item):
'''
Check whether template data corresponds to data stored in the database.
'''
return api_item['courseId'] == db_item['course_id'] and \
api_item['name'] == db_item['course_name'] and \
api_item['archiveId'] == db_item['archive_id'] and \
api_item['description'] == db_item['description'] and \
api_item['inLanguage'] == db_item['language_id'] and \
api_item['creditPoints'] == db_item['credit_points'] and \
api_item['courseCode'] == db_item['course_code']
def _convert(self, template_data):
'''
Convert template data to a dictionary representing the format the data is saved in the database.
'''
trans_table = {"name":"course_name", "url":"url", "archiveId":"archive_id", "courseCode":"course_code",
"dateModified": "modified_date", "modifierId":"modifier_id", "courseId":"course_id",
"description":"description", "inLanguage":"language_id", "creditPoints":"credit_points",
"teacherId":"teacher_id", "teacherName":"teacher_name"}
data = self._create_dict(template_data['template']['data'])
db_item = {}
for key, val in data.items():
db_item[trans_table[key]] = val
return db_item
def _create_dict(self,item):
'''
Create a dictionary from template data for easier handling.
'''
dict = {}
for f in item:
dict[f['name']] = f['value']
return dict
if __name__ == '__main__':
print 'Start running tests'
unittest.main()
| petterip/exam-archive | test/rest_api_test_course.py | Python | mit | 16,344 |
#!/usr/bin/env python2.7
import sys
for line in open(sys.argv[1]):
cut=line.split('\t')
if len(cut)<11: continue
print ">"+cut[0]
print cut[9]
print "+"
print cut[10]
| ursky/metaWRAP | bin/metawrap-scripts/sam_to_fastq.py | Python | mit | 173 |
# -*- coding: utf-8 -*-
# Keyak v2 implementation by Jos Wetzels and Wouter Bokslag
# hereby denoted as "the implementer".
# Based on Keccak Python and Keyak v2 C++ implementations
# by the Keccak, Keyak and Ketje Teams, namely, Guido Bertoni,
# Joan Daemen, Michaรซl Peeters, Gilles Van Assche and Ronny Van Keer
#
# For more information, feedback or questions, please refer to:
# http://keyak.noekeon.org/
# http://keccak.noekeon.org/
# http://ketje.noekeon.org/
from StringIO import StringIO
class stringStream(StringIO):
# Peek (extract byte without advancing position, return None if no more stream is available)
def peek(self):
oldPos = self.tell()
b = self.read(1)
newPos = self.tell()
if((newPos == (oldPos+1)) and (b != '')):
r = ord(b)
else:
r = None
self.seek(oldPos, 0)
return r
# Pop a single byte (as integer representation)
def get(self):
return ord(self.read(1))
# Push a single byte (as integer representation)
def put(self, b):
self.write(chr(b))
return
# Erase buffered contents
def erase(self):
self.truncate(0)
self.seek(0, 0)
return
# Set buffered contents
def setvalue(self, s):
self.erase()
self.write(s)
return
def hasMore(I):
return (I.peek() != None)
def enc8(x):
if (x > 255):
raise Exception("The integer %d cannot be encoded on 8 bits." % x)
else:
return x
# Constant-time comparison from the Django source: https://github.com/django/django/blob/master/django/utils/crypto.py
# Is constant-time only if both strings are of equal length but given the use-case that is always the case.
def constant_time_compare(val1, val2):
if len(val1) != len(val2):
return False
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0 | samvartaka/keyak-python | utils.py | Python | mit | 1,775 |
import _plotly_utils.basevalidators
class BordercolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bordercolor", parent_name="sankey.hoverlabel", **kwargs
):
super(BordercolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/sankey/hoverlabel/_bordercolor.py | Python | mit | 482 |
import numpy as np
import warnings
from .._explainer import Explainer
from packaging import version
torch = None
class PyTorchDeep(Explainer):
def __init__(self, model, data):
# try and import pytorch
global torch
if torch is None:
import torch
if version.parse(torch.__version__) < version.parse("0.4"):
warnings.warn("Your PyTorch version is older than 0.4 and not supported.")
# check if we have multiple inputs
self.multi_input = False
if type(data) == list:
self.multi_input = True
if type(data) != list:
data = [data]
self.data = data
self.layer = None
self.input_handle = None
self.interim = False
self.interim_inputs_shape = None
self.expected_value = None # to keep the DeepExplainer base happy
if type(model) == tuple:
self.interim = True
model, layer = model
model = model.eval()
self.layer = layer
self.add_target_handle(self.layer)
# if we are taking an interim layer, the 'data' is going to be the input
# of the interim layer; we will capture this using a forward hook
with torch.no_grad():
_ = model(*data)
interim_inputs = self.layer.target_input
if type(interim_inputs) is tuple:
# this should always be true, but just to be safe
self.interim_inputs_shape = [i.shape for i in interim_inputs]
else:
self.interim_inputs_shape = [interim_inputs.shape]
self.target_handle.remove()
del self.layer.target_input
self.model = model.eval()
self.multi_output = False
self.num_outputs = 1
with torch.no_grad():
outputs = model(*data)
# also get the device everything is running on
self.device = outputs.device
if outputs.shape[1] > 1:
self.multi_output = True
self.num_outputs = outputs.shape[1]
self.expected_value = outputs.mean(0).cpu().numpy()
def add_target_handle(self, layer):
input_handle = layer.register_forward_hook(get_target_input)
self.target_handle = input_handle
def add_handles(self, model, forward_handle, backward_handle):
"""
Add handles to all non-container layers in the model.
Recursively for non-container layers
"""
handles_list = []
model_children = list(model.children())
if model_children:
for child in model_children:
handles_list.extend(self.add_handles(child, forward_handle, backward_handle))
else: # leaves
handles_list.append(model.register_forward_hook(forward_handle))
handles_list.append(model.register_backward_hook(backward_handle))
return handles_list
def remove_attributes(self, model):
"""
Removes the x and y attributes which were added by the forward handles
Recursively searches for non-container layers
"""
for child in model.children():
if 'nn.modules.container' in str(type(child)):
self.remove_attributes(child)
else:
try:
del child.x
except AttributeError:
pass
try:
del child.y
except AttributeError:
pass
def gradient(self, idx, inputs):
self.model.zero_grad()
X = [x.requires_grad_() for x in inputs]
outputs = self.model(*X)
selected = [val for val in outputs[:, idx]]
grads = []
if self.interim:
interim_inputs = self.layer.target_input
for idx, input in enumerate(interim_inputs):
grad = torch.autograd.grad(selected, input,
retain_graph=True if idx + 1 < len(interim_inputs) else None,
allow_unused=True)[0]
if grad is not None:
grad = grad.cpu().numpy()
else:
grad = torch.zeros_like(X[idx]).cpu().numpy()
grads.append(grad)
del self.layer.target_input
return grads, [i.detach().cpu().numpy() for i in interim_inputs]
else:
for idx, x in enumerate(X):
grad = torch.autograd.grad(selected, x,
retain_graph=True if idx + 1 < len(X) else None,
allow_unused=True)[0]
if grad is not None:
grad = grad.cpu().numpy()
else:
grad = torch.zeros_like(X[idx]).cpu().numpy()
grads.append(grad)
return grads
def shap_values(self, X, ranked_outputs=None, output_rank_order="max", check_additivity=False):
# X ~ self.model_input
# X_data ~ self.data
# check if we have multiple inputs
if not self.multi_input:
assert type(X) != list, "Expected a single tensor model input!"
X = [X]
else:
assert type(X) == list, "Expected a list of model inputs!"
X = [x.detach().to(self.device) for x in X]
if ranked_outputs is not None and self.multi_output:
with torch.no_grad():
model_output_values = self.model(*X)
# rank and determine the model outputs that we will explain
if output_rank_order == "max":
_, model_output_ranks = torch.sort(model_output_values, descending=True)
elif output_rank_order == "min":
_, model_output_ranks = torch.sort(model_output_values, descending=False)
elif output_rank_order == "max_abs":
_, model_output_ranks = torch.sort(torch.abs(model_output_values), descending=True)
else:
assert False, "output_rank_order must be max, min, or max_abs!"
model_output_ranks = model_output_ranks[:, :ranked_outputs]
else:
model_output_ranks = (torch.ones((X[0].shape[0], self.num_outputs)).int() *
torch.arange(0, self.num_outputs).int())
# add the gradient handles
handles = self.add_handles(self.model, add_interim_values, deeplift_grad)
if self.interim:
self.add_target_handle(self.layer)
# compute the attributions
output_phis = []
for i in range(model_output_ranks.shape[1]):
phis = []
if self.interim:
for k in range(len(self.interim_inputs_shape)):
phis.append(np.zeros((X[0].shape[0], ) + self.interim_inputs_shape[k][1: ]))
else:
for k in range(len(X)):
phis.append(np.zeros(X[k].shape))
for j in range(X[0].shape[0]):
# tile the inputs to line up with the background data samples
tiled_X = [X[l][j:j + 1].repeat(
(self.data[l].shape[0],) + tuple([1 for k in range(len(X[l].shape) - 1)])) for l
in range(len(X))]
joint_x = [torch.cat((tiled_X[l], self.data[l]), dim=0) for l in range(len(X))]
# run attribution computation graph
feature_ind = model_output_ranks[j, i]
sample_phis = self.gradient(feature_ind, joint_x)
# assign the attributions to the right part of the output arrays
if self.interim:
sample_phis, output = sample_phis
x, data = [], []
for k in range(len(output)):
x_temp, data_temp = np.split(output[k], 2)
x.append(x_temp)
data.append(data_temp)
for l in range(len(self.interim_inputs_shape)):
phis[l][j] = (sample_phis[l][self.data[l].shape[0]:] * (x[l] - data[l])).mean(0)
else:
for l in range(len(X)):
phis[l][j] = (torch.from_numpy(sample_phis[l][self.data[l].shape[0]:]).to(self.device) * (X[l][j: j + 1] - self.data[l])).cpu().detach().numpy().mean(0)
output_phis.append(phis[0] if not self.multi_input else phis)
# cleanup; remove all gradient handles
for handle in handles:
handle.remove()
self.remove_attributes(self.model)
if self.interim:
self.target_handle.remove()
if not self.multi_output:
return output_phis[0]
elif ranked_outputs is not None:
return output_phis, model_output_ranks
else:
return output_phis
# Module hooks
def deeplift_grad(module, grad_input, grad_output):
"""The backward hook which computes the deeplift
gradient for an nn.Module
"""
# first, get the module type
module_type = module.__class__.__name__
# first, check the module is supported
if module_type in op_handler:
if op_handler[module_type].__name__ not in ['passthrough', 'linear_1d']:
return op_handler[module_type](module, grad_input, grad_output)
else:
print('Warning: unrecognized nn.Module: {}'.format(module_type))
return grad_input
def add_interim_values(module, input, output):
"""The forward hook used to save interim tensors, detached
from the graph. Used to calculate the multipliers
"""
try:
del module.x
except AttributeError:
pass
try:
del module.y
except AttributeError:
pass
module_type = module.__class__.__name__
if module_type in op_handler:
func_name = op_handler[module_type].__name__
# First, check for cases where we don't need to save the x and y tensors
if func_name == 'passthrough':
pass
else:
# check only the 0th input varies
for i in range(len(input)):
if i != 0 and type(output) is tuple:
assert input[i] == output[i], "Only the 0th input may vary!"
# if a new method is added, it must be added here too. This ensures tensors
# are only saved if necessary
if func_name in ['maxpool', 'nonlinear_1d']:
# only save tensors if necessary
if type(input) is tuple:
setattr(module, 'x', torch.nn.Parameter(input[0].detach()))
else:
setattr(module, 'x', torch.nn.Parameter(input.detach()))
if type(output) is tuple:
setattr(module, 'y', torch.nn.Parameter(output[0].detach()))
else:
setattr(module, 'y', torch.nn.Parameter(output.detach()))
if module_type in failure_case_modules:
input[0].register_hook(deeplift_tensor_grad)
def get_target_input(module, input, output):
"""A forward hook which saves the tensor - attached to its graph.
Used if we want to explain the interim outputs of a model
"""
try:
del module.target_input
except AttributeError:
pass
setattr(module, 'target_input', input)
# From the documentation: "The current implementation will not have the presented behavior for
# complex Module that perform many operations. In some failure cases, grad_input and grad_output
# will only contain the gradients for a subset of the inputs and outputs.
# The tensor hook below handles such failure cases (currently, MaxPool1d). In such cases, the deeplift
# grad should still be computed, and then appended to the complex_model_gradients list. The tensor hook
# will then retrieve the proper gradient from this list.
failure_case_modules = ['MaxPool1d']
def deeplift_tensor_grad(grad):
return_grad = complex_module_gradients[-1]
del complex_module_gradients[-1]
return return_grad
complex_module_gradients = []
def passthrough(module, grad_input, grad_output):
"""No change made to gradients"""
return None
def maxpool(module, grad_input, grad_output):
pool_to_unpool = {
'MaxPool1d': torch.nn.functional.max_unpool1d,
'MaxPool2d': torch.nn.functional.max_unpool2d,
'MaxPool3d': torch.nn.functional.max_unpool3d
}
pool_to_function = {
'MaxPool1d': torch.nn.functional.max_pool1d,
'MaxPool2d': torch.nn.functional.max_pool2d,
'MaxPool3d': torch.nn.functional.max_pool3d
}
delta_in = module.x[: int(module.x.shape[0] / 2)] - module.x[int(module.x.shape[0] / 2):]
dup0 = [2] + [1 for i in delta_in.shape[1:]]
# we also need to check if the output is a tuple
y, ref_output = torch.chunk(module.y, 2)
cross_max = torch.max(y, ref_output)
diffs = torch.cat([cross_max - ref_output, y - cross_max], 0)
# all of this just to unpool the outputs
with torch.no_grad():
_, indices = pool_to_function[module.__class__.__name__](
module.x, module.kernel_size, module.stride, module.padding,
module.dilation, module.ceil_mode, True)
xmax_pos, rmax_pos = torch.chunk(pool_to_unpool[module.__class__.__name__](
grad_output[0] * diffs, indices, module.kernel_size, module.stride,
module.padding, list(module.x.shape)), 2)
org_input_shape = grad_input[0].shape # for the maxpool 1d
grad_input = [None for _ in grad_input]
grad_input[0] = torch.where(torch.abs(delta_in) < 1e-7, torch.zeros_like(delta_in),
(xmax_pos + rmax_pos) / delta_in).repeat(dup0)
if module.__class__.__name__ == 'MaxPool1d':
complex_module_gradients.append(grad_input[0])
# the grad input that is returned doesn't matter, since it will immediately be
# be overridden by the grad in the complex_module_gradient
grad_input[0] = torch.ones(org_input_shape)
return tuple(grad_input)
def linear_1d(module, grad_input, grad_output):
"""No change made to gradients."""
return None
def nonlinear_1d(module, grad_input, grad_output):
delta_out = module.y[: int(module.y.shape[0] / 2)] - module.y[int(module.y.shape[0] / 2):]
delta_in = module.x[: int(module.x.shape[0] / 2)] - module.x[int(module.x.shape[0] / 2):]
dup0 = [2] + [1 for i in delta_in.shape[1:]]
# handles numerical instabilities where delta_in is very small by
# just taking the gradient in those cases
grads = [None for _ in grad_input]
grads[0] = torch.where(torch.abs(delta_in.repeat(dup0)) < 1e-6, grad_input[0],
grad_output[0] * (delta_out / delta_in).repeat(dup0))
return tuple(grads)
op_handler = {}
# passthrough ops, where we make no change to the gradient
op_handler['Dropout3d'] = passthrough
op_handler['Dropout2d'] = passthrough
op_handler['Dropout'] = passthrough
op_handler['AlphaDropout'] = passthrough
op_handler['Conv1d'] = linear_1d
op_handler['Conv2d'] = linear_1d
op_handler['Conv3d'] = linear_1d
op_handler['ConvTranspose1d'] = linear_1d
op_handler['ConvTranspose2d'] = linear_1d
op_handler['ConvTranspose3d'] = linear_1d
op_handler['Linear'] = linear_1d
op_handler['AvgPool1d'] = linear_1d
op_handler['AvgPool2d'] = linear_1d
op_handler['AvgPool3d'] = linear_1d
op_handler['AdaptiveAvgPool1d'] = linear_1d
op_handler['AdaptiveAvgPool2d'] = linear_1d
op_handler['AdaptiveAvgPool3d'] = linear_1d
op_handler['BatchNorm1d'] = linear_1d
op_handler['BatchNorm2d'] = linear_1d
op_handler['BatchNorm3d'] = linear_1d
op_handler['LeakyReLU'] = nonlinear_1d
op_handler['ReLU'] = nonlinear_1d
op_handler['ELU'] = nonlinear_1d
op_handler['Sigmoid'] = nonlinear_1d
op_handler["Tanh"] = nonlinear_1d
op_handler["Softplus"] = nonlinear_1d
op_handler['Softmax'] = nonlinear_1d
op_handler['MaxPool1d'] = maxpool
op_handler['MaxPool2d'] = maxpool
op_handler['MaxPool3d'] = maxpool
| slundberg/shap | shap/explainers/_deep/deep_pytorch.py | Python | mit | 16,170 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
import curses
from . import docs
from .content import SubmissionContent, SubredditContent
from .page import Page, PageController, logged_in
from .objects import Navigator, Color, Command
from .exceptions import TemporaryFileError
class SubmissionController(PageController):
character_map = {}
class SubmissionPage(Page):
FOOTER = docs.FOOTER_SUBMISSION
def __init__(self, reddit, term, config, oauth, url=None, submission=None):
super(SubmissionPage, self).__init__(reddit, term, config, oauth)
self.controller = SubmissionController(self, keymap=config.keymap)
if url:
self.content = SubmissionContent.from_url(
reddit, url, term.loader,
max_comment_cols=config['max_comment_cols'])
else:
self.content = SubmissionContent(
submission, term.loader,
max_comment_cols=config['max_comment_cols'])
# Start at the submission post, which is indexed as -1
self.nav = Navigator(self.content.get, page_index=-1)
self.selected_subreddit = None
@SubmissionController.register(Command('SUBMISSION_TOGGLE_COMMENT'))
def toggle_comment(self):
"Toggle the selected comment tree between visible and hidden"
current_index = self.nav.absolute_index
self.content.toggle(current_index)
# This logic handles a display edge case after a comment toggle. We
# want to make sure that when we re-draw the page, the cursor stays at
# its current absolute position on the screen. In order to do this,
# apply a fixed offset if, while inverted, we either try to hide the
# bottom comment or toggle any of the middle comments.
if self.nav.inverted:
data = self.content.get(current_index)
if data['hidden'] or self.nav.cursor_index != 0:
window = self._subwindows[-1][0]
n_rows, _ = window.getmaxyx()
self.nav.flip(len(self._subwindows) - 1)
self.nav.top_item_height = n_rows
@SubmissionController.register(Command('SUBMISSION_EXIT'))
def exit_submission(self):
"Close the submission and return to the subreddit page"
self.active = False
@SubmissionController.register(Command('REFRESH'))
def refresh_content(self, order=None, name=None):
"Re-download comments and reset the page index"
order = order or self.content.order
url = name or self.content.name
with self.term.loader('Refreshing page'):
self.content = SubmissionContent.from_url(
self.reddit, url, self.term.loader, order=order,
max_comment_cols=self.config['max_comment_cols'])
if not self.term.loader.exception:
self.nav = Navigator(self.content.get, page_index=-1)
@SubmissionController.register(Command('PROMPT'))
def prompt_subreddit(self):
"Open a prompt to navigate to a different subreddit"
name = self.term.prompt_input('Enter page: /')
if name is not None:
with self.term.loader('Loading page'):
content = SubredditContent.from_name(
self.reddit, name, self.term.loader)
if not self.term.loader.exception:
self.selected_subreddit = content
self.active = False
@SubmissionController.register(Command('SUBMISSION_OPEN_IN_BROWSER'))
def open_link(self):
"Open the selected item with the webbrowser"
data = self.get_selected_item()
url = data.get('permalink')
if url:
self.term.open_browser(url)
else:
self.term.flash()
@SubmissionController.register(Command('SUBMISSION_OPEN_IN_PAGER'))
def open_pager(self):
"Open the selected item with the system's pager"
data = self.get_selected_item()
if data['type'] == 'Submission':
text = '\n\n'.join((data['permalink'], data['text']))
self.term.open_pager(text)
elif data['type'] == 'Comment':
text = '\n\n'.join((data['permalink'], data['body']))
self.term.open_pager(text)
else:
self.term.flash()
@SubmissionController.register(Command('SUBMISSION_POST'))
@logged_in
def add_comment(self):
"""
Submit a reply to the selected item.
Selected item:
Submission - add a top level comment
Comment - add a comment reply
"""
data = self.get_selected_item()
if data['type'] == 'Submission':
body = data['text']
reply = data['object'].add_comment
elif data['type'] == 'Comment':
body = data['body']
reply = data['object'].reply
else:
self.term.flash()
return
# Construct the text that will be displayed in the editor file.
# The post body will be commented out and added for reference
lines = ['# |' + line for line in body.split('\n')]
content = '\n'.join(lines)
comment_info = docs.COMMENT_FILE.format(
author=data['author'],
type=data['type'].lower(),
content=content)
with self.term.open_editor(comment_info) as comment:
if not comment:
self.term.show_notification('Canceled')
return
with self.term.loader('Posting', delay=0):
reply(comment)
# Give reddit time to process the submission
time.sleep(2.0)
if self.term.loader.exception is None:
self.refresh_content()
else:
raise TemporaryFileError()
@SubmissionController.register(Command('DELETE'))
@logged_in
def delete_comment(self):
"Delete the selected comment"
if self.get_selected_item()['type'] == 'Comment':
self.delete_item()
else:
self.term.flash()
@SubmissionController.register(Command('SUBMISSION_OPEN_IN_URLVIEWER'))
def comment_urlview(self):
data = self.get_selected_item()
comment = data.get('body') or data.get('text') or data.get('url_full')
if comment:
self.term.open_urlview(comment)
else:
self.term.flash()
def _draw_item(self, win, data, inverted):
if data['type'] == 'MoreComments':
return self._draw_more_comments(win, data)
elif data['type'] == 'HiddenComment':
return self._draw_more_comments(win, data)
elif data['type'] == 'Comment':
return self._draw_comment(win, data, inverted)
else:
return self._draw_submission(win, data)
def _draw_comment(self, win, data, inverted):
n_rows, n_cols = win.getmaxyx()
n_cols -= 1
# Handle the case where the window is not large enough to fit the text.
valid_rows = range(0, n_rows)
offset = 0 if not inverted else -(data['n_rows'] - n_rows)
# If there isn't enough space to fit the comment body on the screen,
# replace the last line with a notification.
split_body = data['split_body']
if data['n_rows'] > n_rows:
# Only when there is a single comment on the page and not inverted
if not inverted and len(self._subwindows) == 0:
cutoff = data['n_rows'] - n_rows + 1
split_body = split_body[:-cutoff]
split_body.append('(Not enough space to display)')
row = offset
if row in valid_rows:
attr = curses.A_BOLD
attr |= (Color.BLUE if not data['is_author'] else Color.GREEN)
self.term.add_line(win, '{author} '.format(**data), row, 1, attr)
if data['flair']:
attr = curses.A_BOLD | Color.YELLOW
self.term.add_line(win, '{flair} '.format(**data), attr=attr)
text, attr = self.term.get_arrow(data['likes'])
self.term.add_line(win, text, attr=attr)
self.term.add_line(win, ' {score} {created} '.format(**data))
if data['gold']:
text, attr = self.term.guilded
self.term.add_line(win, text, attr=attr)
if data['stickied']:
text, attr = '[stickied]', Color.GREEN
self.term.add_line(win, text, attr=attr)
if data['saved']:
text, attr = '[saved]', Color.GREEN
self.term.add_line(win, text, attr=attr)
for row, text in enumerate(split_body, start=offset+1):
if row in valid_rows:
self.term.add_line(win, text, row, 1)
# Unfortunately vline() doesn't support custom color so we have to
# build it one segment at a time.
attr = Color.get_level(data['level'])
x = 0
for y in range(n_rows):
self.term.addch(win, y, x, self.term.vline, attr)
return attr | self.term.vline
def _draw_more_comments(self, win, data):
n_rows, n_cols = win.getmaxyx()
n_cols -= 1
self.term.add_line(win, '{body}'.format(**data), 0, 1)
self.term.add_line(
win, ' [{count}]'.format(**data), attr=curses.A_BOLD)
attr = Color.get_level(data['level'])
self.term.addch(win, 0, 0, self.term.vline, attr)
return attr | self.term.vline
def _draw_submission(self, win, data):
n_rows, n_cols = win.getmaxyx()
n_cols -= 3 # one for each side of the border + one for offset
for row, text in enumerate(data['split_title'], start=1):
self.term.add_line(win, text, row, 1, curses.A_BOLD)
row = len(data['split_title']) + 1
attr = curses.A_BOLD | Color.GREEN
self.term.add_line(win, '{author}'.format(**data), row, 1, attr)
attr = curses.A_BOLD | Color.YELLOW
if data['flair']:
self.term.add_line(win, ' {flair}'.format(**data), attr=attr)
self.term.add_line(win, ' {created} {subreddit}'.format(**data))
row = len(data['split_title']) + 2
attr = curses.A_UNDERLINE | Color.BLUE
self.term.add_line(win, '{url}'.format(**data), row, 1, attr)
offset = len(data['split_title']) + 3
# Cut off text if there is not enough room to display the whole post
split_text = data['split_text']
if data['n_rows'] > n_rows:
cutoff = data['n_rows'] - n_rows + 1
split_text = split_text[:-cutoff]
split_text.append('(Not enough space to display)')
for row, text in enumerate(split_text, start=offset):
self.term.add_line(win, text, row, 1)
row = len(data['split_title']) + len(split_text) + 3
self.term.add_line(win, '{score} '.format(**data), row, 1)
text, attr = self.term.get_arrow(data['likes'])
self.term.add_line(win, text, attr=attr)
self.term.add_line(win, ' {comments} '.format(**data))
if data['gold']:
text, attr = self.term.guilded
self.term.add_line(win, text, attr=attr)
if data['nsfw']:
text, attr = 'NSFW', (curses.A_BOLD | Color.RED)
self.term.add_line(win, text, attr=attr)
if data['saved']:
text, attr = '[saved]', Color.GREEN
self.term.add_line(win, text, attr=attr)
win.border()
| shaggytwodope/rtv | rtv/submission_page.py | Python | mit | 11,574 |
#!/usr/bin/env python
import os
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Test label reading from an MNI tag file
#
# The current directory must be writeable.
#
try:
fname = "mni-tagtest.tag"
channel = open(fname, "wb")
channel.close()
# create some random points in a sphere
#
sphere1 = vtk.vtkPointSource()
sphere1.SetNumberOfPoints(13)
xform = vtk.vtkTransform()
xform.RotateWXYZ(20, 1, 0, 0)
xformFilter = vtk.vtkTransformFilter()
xformFilter.SetTransform(xform)
xformFilter.SetInputConnection(sphere1.GetOutputPort())
labels = vtk.vtkStringArray()
labels.InsertNextValue("0")
labels.InsertNextValue("1")
labels.InsertNextValue("2")
labels.InsertNextValue("3")
labels.InsertNextValue("Halifax")
labels.InsertNextValue("Toronto")
labels.InsertNextValue("Vancouver")
labels.InsertNextValue("Larry")
labels.InsertNextValue("Bob")
labels.InsertNextValue("Jackie")
labels.InsertNextValue("10")
labels.InsertNextValue("11")
labels.InsertNextValue("12")
weights = vtk.vtkDoubleArray()
weights.InsertNextValue(1.0)
weights.InsertNextValue(1.1)
weights.InsertNextValue(1.2)
weights.InsertNextValue(1.3)
weights.InsertNextValue(1.4)
weights.InsertNextValue(1.5)
weights.InsertNextValue(1.6)
weights.InsertNextValue(1.7)
weights.InsertNextValue(1.8)
weights.InsertNextValue(1.9)
weights.InsertNextValue(0.9)
weights.InsertNextValue(0.8)
weights.InsertNextValue(0.7)
writer = vtk.vtkMNITagPointWriter()
writer.SetFileName(fname)
writer.SetInputConnection(sphere1.GetOutputPort())
writer.SetInputConnection(1, xformFilter.GetOutputPort())
writer.SetLabelText(labels)
writer.SetWeights(weights)
writer.SetComments("Volume 1: sphere points\nVolume 2: transformed points")
writer.Write()
reader = vtk.vtkMNITagPointReader()
reader.CanReadFile(fname)
reader.SetFileName(fname)
textProp = vtk.vtkTextProperty()
textProp.SetFontSize(12)
textProp.SetColor(1.0, 1.0, 0.5)
labelHier = vtk.vtkPointSetToLabelHierarchy()
labelHier.SetInputConnection(reader.GetOutputPort())
labelHier.SetTextProperty(textProp)
labelHier.SetLabelArrayName("LabelText")
labelHier.SetMaximumDepth(15)
labelHier.SetTargetLabelCount(12)
labelMapper = vtk.vtkLabelPlacementMapper()
labelMapper.SetInputConnection(labelHier.GetOutputPort())
labelMapper.UseDepthBufferOff()
labelMapper.SetShapeToRect()
labelMapper.SetStyleToOutline()
labelActor = vtk.vtkActor2D()
labelActor.SetMapper(labelMapper)
glyphSource = vtk.vtkSphereSource()
glyphSource.SetRadius(0.01)
glyph = vtk.vtkGlyph3D()
glyph.SetSourceConnection(glyphSource.GetOutputPort())
glyph.SetInputConnection(reader.GetOutputPort())
mapper = vtk.vtkDataSetMapper()
mapper.SetInputConnection(glyph.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# Create rendering stuff
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren1.AddViewProp(actor)
ren1.AddViewProp(labelActor)
ren1.SetBackground(0, 0, 0)
renWin.SetSize(300, 300)
renWin.Render()
try:
os.remove(fname)
except OSError:
pass
# render the image
#
# iren.Start()
except IOError:
print "Unable to test the writer/reader."
| timkrentz/SunTracker | IMU/VTK-6.2.0/IO/MINC/Testing/Python/TestMNITagPoints.py | Python | mit | 3,826 |
import uuid
from uqbar.objects import new
from supriya.patterns.Pattern import Pattern
class EventPattern(Pattern):
### CLASS VARIABLES ###
__slots__ = ()
### SPECIAL METHODS ###
def _coerce_iterator_output(self, expr, state=None):
import supriya.patterns
if not isinstance(expr, supriya.patterns.Event):
expr = supriya.patterns.NoteEvent(**expr)
if expr.get("uuid") is None:
expr = new(expr, uuid=uuid.uuid4())
return expr
### PUBLIC METHODS ###
def play(self, clock=None, server=None):
import supriya.patterns
import supriya.realtime
event_player = supriya.patterns.RealtimeEventPlayer(
self, clock=clock, server=server or supriya.realtime.Server.default()
)
event_player.start()
return event_player
def with_bus(self, calculation_rate="audio", channel_count=None, release_time=0.25):
import supriya.patterns
return supriya.patterns.Pbus(
self,
calculation_rate=calculation_rate,
channel_count=channel_count,
release_time=release_time,
)
def with_effect(self, synthdef, release_time=0.25, **settings):
import supriya.patterns
return supriya.patterns.Pfx(
self, synthdef=synthdef, release_time=release_time, **settings
)
def with_group(self, release_time=0.25):
import supriya.patterns
return supriya.patterns.Pgroup(self, release_time=release_time)
| Pulgama/supriya | supriya/patterns/EventPattern.py | Python | mit | 1,545 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigiq_regkey_license_assignment
short_description: Manage regkey license assignment on BIG-IPs from a BIG-IQ
description:
- Manages the assignment of regkey licenses on a BIG-IQ. Assignment means
the license is assigned to a BIG-IP, or it needs to be assigned to a BIG-IP.
Additionally, this module supports revoking the assignments from BIG-IP devices.
version_added: "1.0.0"
options:
pool:
description:
- The registration key pool to use.
type: str
required: True
key:
description:
- The registration key you want to assign from the pool.
type: str
required: True
device:
description:
- When C(managed) is C(no), specifies the address, or hostname, where the BIG-IQ
can reach the remote device to register.
- When C(managed) is C(yes), specifies the managed device, or device UUID, that
you want to register.
- If C(managed) is C(yes), it is very important you do not have more than
one device with the same name. BIG-IQ internally recognizes devices by their ID,
and therefore, this module cannot guarantee the correct device will be
registered. The device returned is the device that is used.
type: str
required: True
managed:
description:
- Whether the specified device is a managed or un-managed device.
- When C(state) is C(present), this parameter is required.
type: bool
device_port:
description:
- Specifies the port of the remote device to connect to.
- If this parameter is not specified, the default is C(443).
type: int
default: 443
device_username:
description:
- The username used to connect to the remote device.
- This username should be one that has sufficient privileges on the remote device
to do licensing. Usually this is the C(Administrator) role.
- When C(managed) is C(no), this parameter is required.
type: str
device_password:
description:
- The password of the C(device_username).
- When C(managed) is C(no), this parameter is required.
type: str
state:
description:
- When C(present), ensures the device is assigned the specified license.
- When C(absent), ensures the license is revoked from the remote device and freed
on the BIG-IQ.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5networks.f5_modules.f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Register an unmanaged device
bigiq_regkey_license_assignment:
pool: my-regkey-pool
key: XXXX-XXXX-XXXX-XXXX-XXXX
device: 1.1.1.1
managed: no
device_username: admin
device_password: secret
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Register a managed device, by name
bigiq_regkey_license_assignment:
pool: my-regkey-pool
key: XXXX-XXXX-XXXX-XXXX-XXXX
device: bigi1.foo.com
managed: yes
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Register a managed device, by UUID
bigiq_regkey_license_assignment:
pool: my-regkey-pool
key: XXXX-XXXX-XXXX-XXXX-XXXX
device: 7141a063-7cf8-423f-9829-9d40599fa3e0
managed: yes
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
# only common fields returned
'''
import re
import time
from datetime import datetime
from ansible.module_utils.basic import AnsibleModule
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, f5_argument_spec
)
from ..module_utils.icontrol import bigiq_version
from ..module_utils.ipaddress import is_valid_ip
from ..module_utils.teem import send_teem
class Parameters(AnsibleF5Parameters):
api_map = {
'deviceReference': 'device_reference',
'deviceAddress': 'device_address',
'httpsPort': 'device_port'
}
api_attributes = [
'deviceReference', 'deviceAddress', 'httpsPort', 'managed'
]
returnables = [
'device_address', 'device_reference', 'device_username', 'device_password',
'device_port', 'managed'
]
updatables = [
'device_reference', 'device_address', 'device_username', 'device_password',
'device_port', 'managed'
]
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
raise
return result
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def device_password(self):
if self._values['device_password'] is None:
return None
return self._values['device_password']
@property
def device_username(self):
if self._values['device_username'] is None:
return None
return self._values['device_username']
@property
def device_address(self):
if self.device_is_address:
return self._values['device']
@property
def device_port(self):
if self._values['device_port'] is None:
return None
return int(self._values['device_port'])
@property
def device_is_address(self):
if is_valid_ip(self.device):
return True
return False
@property
def device_is_id(self):
pattern = r'[A-Za-z0-9]{8}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{12}'
if re.match(pattern, self.device):
return True
return False
@property
def device_is_name(self):
if not self.device_is_address and not self.device_is_id:
return True
return False
@property
def device_reference(self):
if not self.managed:
return None
if self.device_is_address:
# This range lookup is how you do lookups for single IP addresses. Weird.
filter = "address+eq+'{0}...{0}'".format(self.device)
elif self.device_is_name:
filter = "hostname+eq+'{0}'".format(self.device)
elif self.device_is_id:
filter = "uuid+eq+'{0}'".format(self.device)
else:
raise F5ModuleError(
"Unknown device format '{0}'".format(self.device)
)
uri = "https://{0}:{1}/mgmt/shared/resolver/device-groups/cm-bigip-allBigIpDevices/devices/" \
"?$filter={2}&$top=1".format(self.client.provider['server'],
self.client.provider['server_port'], filter)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
raise F5ModuleError(
"No device with the specified address was found."
)
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
id = response['items'][0]['uuid']
result = dict(
link='https://localhost/mgmt/shared/resolver/device-groups/cm-bigip-allBigIpDevices/devices/{0}'.format(id)
)
return result
@property
def pool_id(self):
filter = "(name%20eq%20'{0}')".format(self.pool)
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses?$filter={2}&$top=1'.format(
self.client.provider['server'],
self.client.provider['server_port'],
filter
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
raise F5ModuleError(
"No pool with the specified name was found."
)
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
return response['items'][0]['id']
@property
def member_id(self):
if self.device_is_address:
# This range lookup is how you do lookups for single IP addresses. Weird.
filter = "deviceAddress+eq+'{0}...{0}'".format(self.device)
elif self.device_is_name:
filter = "deviceName+eq+'{0}'".format(self.device)
elif self.device_is_id:
filter = "deviceMachineId+eq+'{0}'".format(self.device)
else:
raise F5ModuleError(
"Unknown device format '{0}'".format(self.device)
)
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses/{2}/offerings/{3}/members/' \
'?$filter={4}'.format(self.client.provider['server'], self.client.provider['server_port'],
self.pool_id, self.key, filter)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
return None
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
result = response['items'][0]['id']
return result
class Changes(Parameters):
pass
class UsableChanges(Changes):
@property
def device_port(self):
if self._values['managed']:
return None
return self._values['device_port']
@property
def device_username(self):
if self._values['managed']:
return None
return self._values['device_username']
@property
def device_password(self):
if self._values['managed']:
return None
return self._values['device_password']
@property
def device_reference(self):
if not self._values['managed']:
return None
return self._values['device_reference']
@property
def device_address(self):
if self._values['managed']:
return None
return self._values['device_address']
@property
def managed(self):
return None
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params, client=self.client)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
start = datetime.now().isoformat()
version = bigiq_version(self.client)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.module, version)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return False
return self.create()
def exists(self):
if self.want.member_id is None:
return False
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses/{2}/offerings/{3}/members/{4}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.pool_id,
self.want.key,
self.want.member_id
)
resp = self.client.api.get(uri)
if resp.status == 200:
return True
return False
def remove(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
# Artificial sleeping to wait for remote licensing (on BIG-IP) to complete
#
# This should be something that BIG-IQ can do natively in 6.1-ish time.
time.sleep(60)
return True
def create(self):
self._set_changed_options()
if not self.want.managed:
if self.want.device_username is None:
raise F5ModuleError(
"You must specify a 'device_username' when working with unmanaged devices."
)
if self.want.device_password is None:
raise F5ModuleError(
"You must specify a 'device_password' when working with unmanaged devices."
)
if self.module.check_mode:
return True
self.create_on_device()
if not self.exists():
raise F5ModuleError(
"Failed to license the remote device."
)
self.wait_for_device_to_be_licensed()
# Artificial sleeping to wait for remote licensing (on BIG-IP) to complete
#
# This should be something that BIG-IQ can do natively in 6.1-ish time.
time.sleep(60)
return True
def create_on_device(self):
params = self.changes.api_params()
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses/{2}/offerings/{3}/members/'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.pool_id,
self.want.key
)
if not self.want.managed:
params['username'] = self.want.device_username
params['password'] = self.want.device_password
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def wait_for_device_to_be_licensed(self):
count = 0
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses/{2}/offerings/{3}/members/{4}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.pool_id,
self.want.key,
self.want.member_id
)
while count < 3:
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if response['status'] == 'LICENSED':
count += 1
else:
count = 0
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses/{2}/offerings/{3}/members/{4}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.pool_id,
self.want.key,
self.want.member_id
)
params = {}
if not self.want.managed:
params.update(self.changes.api_params())
params['id'] = self.want.member_id
params['username'] = self.want.device_username
params['password'] = self.want.device_password
self.client.api.delete(uri, json=params)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
pool=dict(required=True),
key=dict(required=True, no_log=True),
device=dict(required=True),
managed=dict(type='bool'),
device_port=dict(type='int', default=443),
device_username=dict(no_log=True),
device_password=dict(no_log=True),
state=dict(default='present', choices=['absent', 'present'])
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.required_if = [
['state', 'present', ['key', 'managed']],
['managed', False, ['device', 'device_username', 'device_password']],
['managed', True, ['device']]
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
required_if=spec.required_if
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| F5Networks/f5-ansible-modules | ansible_collections/f5networks/f5_modules/plugins/modules/bigiq_regkey_license_assignment.py | Python | mit | 19,962 |
'''
salt.utils
~~~~~~~~~~
'''
class lazy_property(object):
'''
meant to be used for lazy evaluation of an object attribute.
property should represent non-mutable data, as it replaces itself.
http://stackoverflow.com/a/6849299/564003
'''
def __init__(self, fget):
self.fget = fget
self.func_name = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return None
value = self.fget(obj)
setattr(obj, self.func_name, value)
return value
| johnnoone/salt-targeting | src/salt/utils/__init__.py | Python | mit | 537 |
import re
from setuptools import setup
def find_version(filename):
_version_re = re.compile(r"__version__ = '(.*)'")
for line in open(filename):
version_match = _version_re.match(line)
if version_match:
return version_match.group(1)
__version__ = find_version('librdflib/__init__.py')
with open('README.md', 'rt') as f:
long_description = f.read()
tests_require = ['pytest']
setup(
name='librdflib',
version=__version__,
description='librdf parser for rdflib',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/tgbugs/pyontutils/tree/master/librdflib',
author='Tom Gillespie',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
keywords='rdflib librdf rdf parser parsing ttl rdfxml',
packages=['librdflib'],
python_requires='>=3',
tests_require=tests_require,
install_requires=[
'rdflib', # really 5.0.0 if my changes go in but dev < 5
],
extras_require={'dev': ['pytest-cov', 'wheel'],
'test': tests_require,
},
entry_points={
'rdf.plugins.parser': [
'librdfxml = librdflib:libRdfxmlParser',
'libttl = librdflib:libTurtleParser',
],
},
)
| tgbugs/pyontutils | librdflib/setup.py | Python | mit | 1,448 |
"""This module contains examples of the op() function
where:
op(f,x) returns a stream where x is a stream, and f
is an operator on lists, i.e., f is a function from
a list to a list. These lists are of lists of arbitrary
objects other than streams and agents.
Function f must be stateless, i.e., for any lists u, v:
f(u.extend(v)) = f(u).extend(f(v))
(Stateful functions are given in OpStateful.py with
examples in ExamplesOpWithState.py.)
Let f be a stateless operator on lists and let x be a stream.
If at some point, the value of stream x is a list u then at
that point, the value of stream op(f,x) is the list f(u).
If at a later point, the value of stream x is the list:
u.extend(v) then, at that point the value of stream op(f,x)
is f(u).extend(f(v)).
As a specific example, consider the following f():
def f(lst): return [w * w for w in lst]
If at some point in time, the value of x is [3, 7],
then at that point the value of op(f,x) is f([3, 7])
or [9, 49]. If at a later point, the value of x is
[3, 7, 0, 11, 5] then the value of op(f,x) at that point
is f([3, 7, 0, 11, 5]) or [9, 49, 0, 121, 25].
"""
if __name__ == '__main__':
if __package__ is None:
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
from Agent import *
from ListOperators import *
from PrintFunctions import print_streams_recent
def example_1():
print "example_1"
print "op(f, x): f is a function from a list to a list"
print "x is a stream \n"
# FUNCTIONS FROM LIST TO LIST
# This example uses the following list operators:
# functions from a list to a list.
# f, g, h, r
# Example A: function using list comprehension
def f(lst): return [w*w for w in lst]
# Example B: function using filter
threshold = 6
def predicate(w):
return w > threshold
def g(lst):
return filter(predicate, lst)
# Example C: function using map
# Raise each element of the list to the n-th power.
n = 3
def power(w):
return w**n
def h(lst):
return map(power, lst)
# Example D: function using another list comprehension
# Discard any element of x that is not a
# multiple of a parameter n, and divide the
# elements that are multiples of n by n.
n = 3
def r(lst):
result = []
for w in lst:
if w%n == 0: result.append(w/n)
return result
# EXAMPLES OF OPERATIONS ON STREAMS
# The input stream for these examples
x = Stream('x')
print 'x is the input stream.'
print 'a is a stream consisting of the squares of the input'
print 'b is the stream consisting of values that exceed 6'
print 'c is the stream consisting of the third powers of the input'
print 'd is the stream consisting of values that are multiples of 3 divided by 3'
print 'newa is the same as a. It is defined in a more succinct fashion.'
print 'newb has squares that exceed 6.'
print ''
# The output streams a, b, c, d obtained by
# applying the list operators f, g, h, r to
# stream x.
a = op(f, x)
b = op(g, x)
c = op(h, x)
d = op(r, x)
# You can also define a function only on streams.
# You can do this using functools in Python or
# by simple encapsulation as shown below.
def F(x): return op(f,x)
def G(x): return op(g,x)
newa = F(x)
newb = G(F(x))
# The advantage is that F is a function only
# of streams. So, function composition looks cleaner
# as in G(F(x))
# Name the output streams to label the output
# so that reading the output is easier.
a.set_name('a')
newa.set_name('newa')
b.set_name('b')
newb.set_name('newb')
c.set_name('c')
d.set_name('d')
# At this point x is the empty stream:
# its value is []
x.extend([3, 7])
# Now the value of x is [3, 7]
print "FIRST STEP"
print_streams_recent([x, a, b, c, d, newa, newb])
print ""
x.extend([0, 11, 15])
# Now the value of x is [3, 7, 0, 11, 15]
print "SECOND STEP"
print_streams_recent([x, a, b, c, d, newa, newb])
def main():
example_1()
if __name__ == '__main__':
main()
| zatricion/Streams | ExamplesElementaryOperations/ExamplesOpNoState.py | Python | mit | 4,241 |
## Close
### What is the value of the first triangle number to have over five hundred divisors?
print max([len(m) for m in map(lambda k: [n for n in range(1,(k+1)) if k%n == 0], [sum(range(n)) for n in range(1,1000)])]) | jacksarick/My-Code | Python/python challenges/euler/012_divisable_tri_nums.py | Python | mit | 219 |
from errors import *
from manager import SchemaManager
| Livefyre/pseudonym | pseudonym/__init__.py | Python | mit | 55 |
import random
from datetime import datetime
from multiprocessing import Pool
import numpy as np
from scipy.optimize import minimize
def worker_func(args):
self = args[0]
m = args[1]
k = args[2]
r = args[3]
return (self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) ** 2
def optimized_func_i_der(args):
"""
The derivative of the optimized function with respect to the
ith component of the vector r
"""
self = args[0]
r = args[1]
i = args[2]
result = 0
M = len(self.data)
for m in range(M):
Nm = self.data[m].shape[0] - 1
for k in range(Nm + 1):
result += ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
return result
def worker_func_der(args):
self = args[0]
m = args[1]
k = args[2]
r = args[3]
i = args[4]
return ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
class Agent:
num_features = 22
def __init__(self):
self.lf = 0.2 # Learning factor lambda
self.data = [] # The features' values for all the games
self.rewards = [] # Reward values for moving from 1 state to the next
self.rt = np.array([])
self.max_iter = 50
def set_learning_factor(self, learning_factor):
assert(learning_factor >= 0 and learning_factor <= 1)
self.lf = learning_factor
def set_rt(self, rt):
assert(len(rt) == self.num_features)
self.rt = rt
def set_iter(self, max_iter):
self.max_iter = max_iter
def set_data(self, data):
self.data = []
self.rewards = []
for game in data:
game = np.vstack((game, np.zeros(self.num_features + 1)))
self.data.append(game[:, :-1])
self.rewards.append(game[:, -1:])
def eval_func(self, m, k, r):
"""
The evaluation function value for the set of weights (vector) r
at the mth game and kth board state """
return np.dot(r, self.data[m][k])
def eval_func_der(self, m, k, r, i):
"""
Find the derivative of the evaluation function with respect
to the ith component of the vector r
"""
return self.data[m][k][i]
def get_reward(self, m, s):
"""
Get reward for moving from state s to state (s + 1)
"""
return self.rewards[m][s + 1][0]
def temporal_diff(self, m, s):
"""
The temporal diffence value for state s to state (s+1) in the mth game
"""
return (self.get_reward(m, s) + self.eval_func(m, s + 1, self.rt) -
self.eval_func(m, s, self.rt))
def temporal_diff_sum(self, m, k):
Nm = self.data[m].shape[0] - 1
result = 0
for s in range(k, Nm):
result += self.lf**(s - k) * self.temporal_diff(m, s)
return result
def optimized_func(self, r):
result = 0
M = len(self.data)
pool = Pool(processes=4)
for m in range(M):
Nm = self.data[m].shape[0] - 1
k_args = range(Nm + 1)
self_args = [self] * len(k_args)
m_args = [m] * len(k_args)
r_args = [r] * len(k_args)
result += sum(pool.map(worker_func,
zip(self_args, m_args, k_args, r_args)))
return result
def optimized_func_i_der(self, r, i):
"""
The derivative of the optimized function with respect to the
ith component of the vector r
"""
result = 0
M = len(self.data)
for m in range(M):
Nm = self.data[m].shape[0] - 1
for k in range(Nm + 1):
result += ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
return result
def optimized_func_der(self, r):
p = Pool(processes=4)
self_args = [self] * len(r)
i_args = range(len(r))
r_args = [r] * len(r)
return np.array(p.map(optimized_func_i_der,
zip(self_args, r_args, i_args)))
def callback(self, r):
print("Iteration %d completed at %s" %
(self.cur_iter, datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.cur_iter += 1
def compute_next_rt(self):
print("Start computing at %s" %
(datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.cur_iter = 1
r0 = np.array([random.randint(-10, 10)
for i in range(self.num_features)])
res = minimize(self.optimized_func, r0, method='BFGS',
jac=self.optimized_func_der,
options={'maxiter': self.max_iter, 'disp': True},
callback=self.callback)
return res.x
| ndt93/tetris | scripts/agent3.py | Python | mit | 5,234 |
# -*- coding: utf-8 -*-
""""
ProjectName: pydemi
Repo: https://github.com/chrisenytc/pydemi
Copyright (c) 2014 Christopher EnyTC
Licensed under the MIT license.
"""
# Dependencies
import uuid
from api import app
from hashlib import sha1
from flask import request
from flask import jsonify as JSON
from api.models.user import User
from cors import cors
@app.route('/signup', methods=['POST'])
@cors(origin='*', methods=['POST'])
def signup():
# Create new user
new_user = User()
new_user.name = request.form['name']
new_user.email = request.form['email']
new_user.password = sha1(request.form['password']).hexdigest()
new_user.token = str(uuid.uuid4())
new_user.save()
return JSON(message='User created successfully')
@app.route('/signin', methods=['POST'])
@cors(origin='*', methods=['POST'])
def signin():
# Retorna a user data
user_info = User.objects(email=request.form['email'], password=sha1(
request.form['password']).hexdigest())
if user_info.count():
return JSON(token=user_info.get().token, roles=user_info.get().roles)
else:
return JSON(message='User not found')
| chrisenytc/pydemi | api/controllers/users.py | Python | mit | 1,151 |
team_mapping = {
"SY": "Sydney",
"WB": "Western Bulldogs",
"WC": "West Coast",
"HW": "Hawthorn",
"GE": "Geelong",
"FR": "Fremantle",
"RI": "Richmond",
"CW": "Collingwood",
"CA": "Carlton",
"GW": "Greater Western Sydney",
"AD": "Adelaide",
"GC": "Gold Coast",
"ES": "Essendon",
"ME": "Melbourne",
"NM": "North Melbourne",
"PA": "Port Adelaide",
"BL": "Brisbane Lions",
"SK": "St Kilda"
}
def get_team_name(code):
return team_mapping[code]
def get_team_code(full_name):
for code, name in team_mapping.items():
if name == full_name:
return code
return full_name
def get_match_description(response):
match_container = response.xpath("//td[@colspan = '5' and @align = 'center']")[0]
match_details = match_container.xpath(".//text()").extract()
return {
"round": match_details[1],
"venue": match_details[3],
"date": match_details[6],
"attendance": match_details[8],
"homeTeam": response.xpath("(//a[contains(@href, 'teams/')])[1]/text()").extract_first(),
"awayTeam": response.xpath("(//a[contains(@href, 'teams/')])[2]/text()").extract_first(),
"homeScore": int(response.xpath("//table[1]/tr[2]/td[5]/b/text()").extract_first()),
"awayScore": int(response.xpath("//table[1]/tr[3]/td[5]/b/text()").extract_first())
}
def get_match_urls(response):
for match in response.xpath("//a[contains(@href, 'stats/games/')]/@href").extract():
yield response.urljoin(match) | bairdj/beveridge | src/scrapy/afltables/afltables/common.py | Python | mit | 1,563 |
from keras.applications import imagenet_utils
from keras.applications import mobilenet
def dummyPreprocessInput(image):
image -= 127.5
return image
def getPreprocessFunction(preprocessType):
if preprocessType == "dummy":
return dummyPreprocessInput
elif preprocessType == "mobilenet":
return mobilenet.preprocess_input
elif preprocessType == "imagenet":
return imagenet_utils.preprocess_input
else:
raise Exception(preprocessType + " not supported")
| SlipknotTN/Dogs-Vs-Cats-Playground | deep_learning/keras/lib/preprocess/preprocess.py | Python | mit | 511 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
from layers_basic import LW_Layer, default_data_format
from layers_convolutional import conv_output_length
###############################################
class _LW_Pooling1D(LW_Layer):
input_dim = 3
def __init__(self, pool_size=2, strides=None, padding='valid'):
if strides is None:
strides = pool_size
assert padding in {'valid', 'same'}, 'border_mode must be in {valid, same}'
self.pool_length = pool_size
self.stride = strides
self.border_mode = padding
def get_output_shape_for(self, input_shape):
length = conv_output_length(input_shape[1], self.pool_length, self.border_mode, self.stride)
return (input_shape[0], length, input_shape[2])
class LW_MaxPooling1D(_LW_Pooling1D):
def __init__(self, pool_size=2, strides=None, padding='valid'):
super(LW_MaxPooling1D, self).__init__(pool_size, strides, padding)
class LW_AveragePooling1D(_LW_Pooling1D):
def __init__(self, pool_size=2, strides=None, padding='valid'):
super(LW_AveragePooling1D, self).__init__(pool_size, strides, padding)
###############################################
class _LW_Pooling2D(LW_Layer):
def __init__(self, pool_size=(2, 2), strides=None, padding='valid', data_format='default'):
if data_format == 'default':
data_format = default_data_format
assert data_format in {'channels_last', 'channels_first'}, 'data_format must be in {channels_last, channels_first}'
self.pool_size = tuple(pool_size)
if strides is None:
strides = self.pool_size
self.strides = tuple(strides)
assert padding in {'valid', 'same'}, 'border_mode must be in {valid, same}'
self.border_mode = padding
self.dim_ordering = data_format
def get_output_shape_for(self, input_shape):
if self.dim_ordering == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
elif self.dim_ordering == 'channels_last':
rows = input_shape[1]
cols = input_shape[2]
else:
raise Exception('Invalid dim_ordering: ' + self.dim_ordering)
rows = conv_output_length(rows, self.pool_size[0], self.border_mode, self.strides[0])
cols = conv_output_length(cols, self.pool_size[1], self.border_mode, self.strides[1])
if self.dim_ordering == 'channels_first':
return (input_shape[0], input_shape[1], rows, cols)
elif self.dim_ordering == 'channels_last':
return (input_shape[0], rows, cols, input_shape[3])
else:
raise Exception('Invalid dim_ordering: ' + self.dim_ordering)
class LW_MaxPooling2D(_LW_Pooling2D):
def __init__(self, pool_size=(2, 2), strides=None, padding='valid', data_format='default'):
super(LW_MaxPooling2D, self).__init__(pool_size, strides, padding, data_format)
class LW_AveragePooling2D(_LW_Pooling2D):
def __init__(self, pool_size=(2, 2), strides=None, padding='valid', data_format='default'):
super(LW_AveragePooling2D, self).__init__(pool_size, strides, padding, data_format)
###############################################
class _LW_Pooling3D(LW_Layer):
def __init__(self, pool_size=(2, 2, 2), strides=None, border_mode='valid', dim_ordering='default'):
if dim_ordering == 'default':
dim_ordering = default_data_format
assert dim_ordering in {'channels_last', 'channels_first'}, 'data_format must be in {channels_last, channels_first}'
self.pool_size = tuple(pool_size)
if strides is None:
strides = self.pool_size
self.strides = tuple(strides)
assert border_mode in {'valid', 'same'}, 'border_mode must be in {valid, same}'
self.border_mode = border_mode
self.dim_ordering = dim_ordering
def get_output_shape_for(self, input_shape):
if self.dim_ordering == 'channels_first':
len_dim1 = input_shape[2]
len_dim2 = input_shape[3]
len_dim3 = input_shape[4]
elif self.dim_ordering == 'channels_last':
len_dim1 = input_shape[1]
len_dim2 = input_shape[2]
len_dim3 = input_shape[3]
else:
raise Exception('Invalid dim_ordering: ' + self.dim_ordering)
len_dim1 = conv_output_length(len_dim1, self.pool_size[0], self.border_mode, self.strides[0])
len_dim2 = conv_output_length(len_dim2, self.pool_size[1], self.border_mode, self.strides[1])
len_dim3 = conv_output_length(len_dim3, self.pool_size[2], self.border_mode, self.strides[2])
if self.dim_ordering == 'channels_first':
return (input_shape[0], input_shape[1], len_dim1, len_dim2, len_dim3)
elif self.dim_ordering == 'channels_last':
return (input_shape[0], len_dim1, len_dim2, len_dim3, input_shape[4])
else:
raise Exception('Invalid dim_ordering: ' + self.dim_ordering)
class LW_MaxPooling3D(_LW_Pooling3D):
def __init__(self, pool_size=(2, 2, 2), strides=None, border_mode='valid', dim_ordering='default'):
super(LW_MaxPooling3D, self).__init__(pool_size, strides, border_mode, dim_ordering)
class LW_AveragePooling3D(_LW_Pooling3D):
def __init__(self, pool_size=(2, 2, 2), strides=None, border_mode='valid', dim_ordering='default'):
super(LW_AveragePooling3D, self).__init__(pool_size, strides, border_mode, dim_ordering)
###############################################
class _LW_GlobalPooling1D(LW_Layer):
def __init__(self):
pass
def get_output_shape_for(self, input_shape):
return (input_shape[0], input_shape[2])
class LW_GlobalAveragePooling1D(_LW_GlobalPooling1D):
pass
class LW_GlobalMaxPooling1D(_LW_GlobalPooling1D):
pass
###############################################
class _LW_GlobalPooling2D(LW_Layer):
def __init__(self, data_format='default'):
if data_format == 'default':
data_format = default_data_format
self.dim_ordering = data_format
def get_output_shape_for(self, input_shape):
if self.dim_ordering == 'channels_last':
return (input_shape[0], input_shape[3])
else:
return (input_shape[0], input_shape[1])
class LW_GlobalAveragePooling2D(_LW_GlobalPooling2D):
pass
class LW_GlobalMaxPooling2D(_LW_GlobalPooling2D):
pass
###############################################
class _LW_GlobalPooling3D(LW_Layer):
def __init__(self, data_format='default'):
if data_format == 'default':
data_format = default_data_format
self.dim_ordering = data_format
def get_output_shape_for(self, input_shape):
if self.dim_ordering == 'channels_last':
return (input_shape[0], input_shape[4])
else:
return (input_shape[0], input_shape[1])
class LW_GlobalAveragePooling3D(_LW_GlobalPooling3D):
pass
class LW_GlobalMaxPooling3D(_LW_GlobalPooling3D):
pass
###############################################
if __name__ == '__main__':
pass | SummaLabs/DLS | app/backend/core/models-keras-2x-api/lightweight_layers/layers_pooling.py | Python | mit | 7,111 |
import sys
tagging_filepath = sys.argv[1]
following_filepath = sys.argv[2]
delim = '\t'
if len(sys.argv) > 3:
delim = sys.argv[3]
graph = {}
for line in open(tagging_filepath):
entry = line.rstrip().split('\t')
src = entry[0]
dst = entry[1]
if not src in graph: graph[src] = {}
graph[src][dst] = 0
for line in open(following_filepath):
entry = line.rstrip().split('\t')
src = entry[0]
dst = entry[1]
if src in graph and dst in graph[src]:
graph[src][dst] += 1
if dst in graph and src in graph[dst]:
graph[dst][src] += 2
w_dir = 0
wo_dir = 0
count = 0.0
for src in graph:
for dst in graph[src]:
val = graph[src][dst]
count += 1
if val in [1,3]:
w_dir += 1
if val in [1,2,3]:
wo_dir += 1
print "%s\t%s" % (w_dir/count, wo_dir/count)
| yamaguchiyuto/icwsm15 | tag_follow_disagreement.py | Python | mit | 857 |
#!/usr/bin/env python
#
# Copyright 2010 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Python client library for the Facebook Platform.
This client library is designed to support the Graph API and the
official Facebook JavaScript SDK, which is the canonical way to
implement Facebook authentication. Read more about the Graph API at
http://developers.facebook.com/docs/api. You can download the Facebook
JavaScript SDK at http://github.com/facebook/connect-js/.
If your application is using Google AppEngine's webapp framework, your
usage of this module might look like this:
user = facebook.get_user_from_cookie(self.request.cookies, key, secret)
if user:
graph = facebook.GraphAPI(user["access_token"])
profile = graph.get_object("me")
friends = graph.get_connections("me", "friends")
"""
import cgi
import time
import urllib
import urllib2
import httplib
import hashlib
import hmac
import base64
import logging
import socket
# Find a JSON parser
try:
import simplejson as json
except ImportError:
try:
from django.utils import simplejson as json
except ImportError:
import json
_parse_json = json.loads
# Find a query string parser
try:
from urlparse import parse_qs
except ImportError:
from cgi import parse_qs
class GraphAPI(object):
"""A client for the Facebook Graph API.
See http://developers.facebook.com/docs/api for complete
documentation for the API.
The Graph API is made up of the objects in Facebook (e.g., people,
pages, events, photos) and the connections between them (e.g.,
friends, photo tags, and event RSVPs). This client provides access
to those primitive types in a generic way. For example, given an
OAuth access token, this will fetch the profile of the active user
and the list of the user's friends:
graph = facebook.GraphAPI(access_token)
user = graph.get_object("me")
friends = graph.get_connections(user["id"], "friends")
You can see a list of all of the objects and connections supported
by the API at http://developers.facebook.com/docs/reference/api/.
You can obtain an access token via OAuth or by using the Facebook
JavaScript SDK. See
http://developers.facebook.com/docs/authentication/ for details.
If you are using the JavaScript SDK, you can use the
get_user_from_cookie() method below to get the OAuth access token
for the active user from the cookie saved by the SDK.
"""
def __init__(self, access_token=None, timeout=None):
self.access_token = access_token
self.timeout = timeout
def get_object(self, id, **args):
"""Fetchs the given object from the graph."""
return self.request(id, args)
def get_objects(self, ids, **args):
"""Fetchs all of the given object from the graph.
We return a map from ID to object. If any of the IDs are
invalid, we raise an exception.
"""
args["ids"] = ",".join(ids)
return self.request("", args)
def get_connections(self, id, connection_name, **args):
"""Fetchs the connections for given object."""
return self.request(id + "/" + connection_name, args)
def put_object(self, parent_object, connection_name, **data):
"""Writes the given object to the graph, connected to the given parent.
For example,
graph.put_object("me", "feed", message="Hello, world")
writes "Hello, world" to the active user's wall. Likewise, this
will comment on a the first post of the active user's feed:
feed = graph.get_connections("me", "feed")
post = feed["data"][0]
graph.put_object(post["id"], "comments", message="First!")
See http://developers.facebook.com/docs/api#publishing for all
of the supported writeable objects.
Certain write operations require extended permissions. For
example, publishing to a user's feed requires the
"publish_actions" permission. See
http://developers.facebook.com/docs/publishing/ for details
about publishing permissions.
"""
assert self.access_token, "Write operations require an access token"
return self.request(parent_object + "/" + connection_name,
post_args=data)
def put_wall_post(self, message, attachment={}, profile_id="me"):
"""Writes a wall post to the given profile's wall.
We default to writing to the authenticated user's wall if no
profile_id is specified.
attachment adds a structured attachment to the status message
being posted to the Wall. It should be a dictionary of the form:
{"name": "Link name"
"link": "http://www.example.com/",
"caption": "{*actor*} posted a new review",
"description": "This is a longer description of the attachment",
"picture": "http://www.example.com/thumbnail.jpg"}
"""
return self.put_object(profile_id, "feed", message=message,
**attachment)
def put_comment(self, object_id, message):
"""Writes the given comment on the given post."""
return self.put_object(object_id, "comments", message=message)
def put_like(self, object_id):
"""Likes the given post."""
return self.put_object(object_id, "likes")
def delete_object(self, id):
"""Deletes the object with the given ID from the graph."""
self.request(id, post_args={"method": "delete"})
def delete_request(self, user_id, request_id):
"""Deletes the Request with the given ID for the given user."""
conn = httplib.HTTPSConnection('graph.facebook.com')
url = '/%s_%s?%s' % (
request_id,
user_id,
urllib.urlencode({'access_token': self.access_token}),
)
conn.request('DELETE', url)
response = conn.getresponse()
data = response.read()
response = _parse_json(data)
# Raise an error if we got one, but don't not if Facebook just
# gave us a Bool value
if (response and isinstance(response, dict) and response.get("error")):
raise GraphAPIError(response)
conn.close()
def put_photo(self, image, message=None, album_id=None, **kwargs):
"""Uploads an image using multipart/form-data.
image=File like object for the image
message=Caption for your image
album_id=None posts to /me/photos which uses or creates and uses
an album for your application.
"""
object_id = album_id or "me"
#it would have been nice to reuse self.request;
#but multipart is messy in urllib
post_args = {
'access_token': self.access_token,
'source': image,
'message': message,
}
post_args.update(kwargs)
content_type, body = self._encode_multipart_form(post_args)
req = urllib2.Request(("https://graph.facebook.com/%s/photos" %
object_id),
data=body)
req.add_header('Content-Type', content_type)
try:
data = urllib2.urlopen(req).read()
#For Python 3 use this:
#except urllib2.HTTPError as e:
except urllib2.HTTPError, e:
data = e.read() # Facebook sends OAuth errors as 400, and urllib2
# throws an exception, we want a GraphAPIError
try:
response = _parse_json(data)
# Raise an error if we got one, but don't not if Facebook just
# gave us a Bool value
if (response and isinstance(response, dict) and
response.get("error")):
raise GraphAPIError(response)
except ValueError:
response = data
return response
# based on: http://code.activestate.com/recipes/146306/
def _encode_multipart_form(self, fields):
"""Encode files as 'multipart/form-data'.
Fields are a dict of form name-> value. For files, value should
be a file object. Other file-like objects might work and a fake
name will be chosen.
Returns (content_type, body) ready for httplib.HTTP instance.
"""
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L = []
for (key, value) in fields.items():
logging.debug("Encoding %s, (%s)%s" % (key, type(value), value))
if not value:
continue
L.append('--' + BOUNDARY)
if hasattr(value, 'read') and callable(value.read):
filename = getattr(value, 'name', '%s.jpg' % key)
L.append(('Content-Disposition: form-data;'
'name="%s";'
'filename="%s"') % (key, filename))
L.append('Content-Type: image/jpeg')
value = value.read()
logging.debug(type(value))
else:
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
if isinstance(value, unicode):
logging.debug("Convert to ascii")
value = value.encode('ascii')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def request(self, path, args=None, post_args=None):
"""Fetches the given path in the Graph API.
We translate args to a valid query string. If post_args is
given, we send a POST request to the given path with the given
arguments.
"""
args = args or {}
if self.access_token:
if post_args is not None:
post_args["access_token"] = self.access_token
else:
args["access_token"] = self.access_token
post_data = None if post_args is None else urllib.urlencode(post_args)
try:
file = urllib2.urlopen("https://graph.facebook.com/" + path + "?" +
urllib.urlencode(args),
post_data, timeout=self.timeout)
except urllib2.HTTPError, e:
response = _parse_json(e.read())
raise GraphAPIError(response)
except TypeError:
# Timeout support for Python <2.6
if self.timeout:
socket.setdefaulttimeout(self.timeout)
file = urllib2.urlopen("https://graph.facebook.com/" + path + "?" +
urllib.urlencode(args), post_data)
try:
fileInfo = file.info()
if fileInfo.maintype == 'text':
response = _parse_json(file.read())
elif fileInfo.maintype == 'image':
mimetype = fileInfo['content-type']
response = {
"data": file.read(),
"mime-type": mimetype,
"url": file.url,
}
else:
raise GraphAPIError('Maintype was not text or image')
finally:
file.close()
if response and isinstance(response, dict) and response.get("error"):
raise GraphAPIError(response["error"]["type"],
response["error"]["message"])
return response
def fql(self, query, args=None, post_args=None):
"""FQL query.
Example query: "SELECT affiliations FROM user WHERE uid = me()"
"""
args = args or {}
if self.access_token:
if post_args is not None:
post_args["access_token"] = self.access_token
else:
args["access_token"] = self.access_token
post_data = None if post_args is None else urllib.urlencode(post_args)
"""Check if query is a dict and
use the multiquery method
else use single query
"""
if not isinstance(query, basestring):
args["queries"] = query
fql_method = 'fql.multiquery'
else:
args["query"] = query
fql_method = 'fql.query'
args["format"] = "json"
try:
file = urllib2.urlopen("https://api.facebook.com/method/" +
fql_method + "?" + urllib.urlencode(args),
post_data, timeout=self.timeout)
except TypeError:
# Timeout support for Python <2.6
if self.timeout:
socket.setdefaulttimeout(self.timeout)
file = urllib2.urlopen("https://api.facebook.com/method/" +
fql_method + "?" + urllib.urlencode(args),
post_data)
try:
content = file.read()
response = _parse_json(content)
#Return a list if success, return a dictionary if failed
if type(response) is dict and "error_code" in response:
raise GraphAPIError(response)
except Exception, e:
raise e
finally:
file.close()
return response
def extend_access_token(self, app_id, app_secret):
"""
Extends the expiration time of a valid OAuth access token. See
<https://developers.facebook.com/roadmap/offline-access-removal/
#extend_token>
"""
args = {
"client_id": app_id,
"client_secret": app_secret,
"grant_type": "fb_exchange_token",
"fb_exchange_token": self.access_token,
}
response = urllib.urlopen("https://graph.facebook.com/oauth/"
"access_token?" +
urllib.urlencode(args)).read()
query_str = parse_qs(response)
if "access_token" in query_str:
result = {"access_token": query_str["access_token"][0]}
if "expires" in query_str:
result["expires"] = query_str["expires"][0]
return result
else:
response = json.loads(response)
raise GraphAPIError(response)
class GraphAPIError(Exception):
def __init__(self, result):
#Exception.__init__(self, message)
#self.type = type
self.result = result
try:
self.type = result["error_code"]
except:
self.type = ""
# OAuth 2.0 Draft 10
try:
self.message = result["error_description"]
except:
# OAuth 2.0 Draft 00
try:
self.message = result["error"]["message"]
except:
# REST server style
try:
self.message = result["error_msg"]
except:
self.message = result
Exception.__init__(self, self.message)
def get_user_from_cookie(cookies, app_id, app_secret):
"""Parses the cookie set by the official Facebook JavaScript SDK.
cookies should be a dictionary-like object mapping cookie names to
cookie values.
If the user is logged in via Facebook, we return a dictionary with
the keys "uid" and "access_token". The former is the user's
Facebook ID, and the latter can be used to make authenticated
requests to the Graph API. If the user is not logged in, we
return None.
Download the official Facebook JavaScript SDK at
http://github.com/facebook/connect-js/. Read more about Facebook
authentication at
http://developers.facebook.com/docs/authentication/.
"""
cookie = cookies.get("fbsr_" + app_id, "")
if not cookie:
return None
parsed_request = parse_signed_request(cookie, app_secret)
if not parsed_request:
return None
try:
result = get_access_token_from_code(parsed_request["code"], "",
app_id, app_secret)
except GraphAPIError:
return None
result["uid"] = parsed_request["user_id"]
return result
def parse_signed_request(signed_request, app_secret):
""" Return dictionary with signed request data.
We return a dictionary containing the information in the
signed_request. This includes a user_id if the user has authorised
your application, as well as any information requested.
If the signed_request is malformed or corrupted, False is returned.
"""
try:
encoded_sig, payload = map(str, signed_request.split('.', 1))
sig = base64.urlsafe_b64decode(encoded_sig + "=" *
((4 - len(encoded_sig) % 4) % 4))
data = base64.urlsafe_b64decode(payload + "=" *
((4 - len(payload) % 4) % 4))
except IndexError:
# Signed request was malformed.
return False
except TypeError:
# Signed request had a corrupted payload.
return False
data = _parse_json(data)
if data.get('algorithm', '').upper() != 'HMAC-SHA256':
return False
# HMAC can only handle ascii (byte) strings
# http://bugs.python.org/issue5285
app_secret = app_secret.encode('ascii')
payload = payload.encode('ascii')
expected_sig = hmac.new(app_secret,
msg=payload,
digestmod=hashlib.sha256).digest()
if sig != expected_sig:
return False
return data
def auth_url(app_id, canvas_url, perms=None, **kwargs):
url = "https://www.facebook.com/dialog/oauth?"
kvps = {'client_id': app_id, 'redirect_uri': canvas_url}
if perms:
kvps['scope'] = ",".join(perms)
kvps.update(kwargs)
return url + urllib.urlencode(kvps)
def get_access_token_from_code(code, redirect_uri, app_id, app_secret):
"""Get an access token from the "code" returned from an OAuth dialog.
Returns a dict containing the user-specific access token and its
expiration date (if applicable).
"""
args = {
"code": code,
"redirect_uri": redirect_uri,
"client_id": app_id,
"client_secret": app_secret,
}
# We would use GraphAPI.request() here, except for that the fact
# that the response is a key-value pair, and not JSON.
response = urllib.urlopen("https://graph.facebook.com/oauth/access_token" +
"?" + urllib.urlencode(args)).read()
query_str = parse_qs(response)
if "access_token" in query_str:
result = {"access_token": query_str["access_token"][0]}
if "expires" in query_str:
result["expires"] = query_str["expires"][0]
return result
else:
response = json.loads(response)
raise GraphAPIError(response)
def get_app_access_token(app_id, app_secret):
"""Get the access_token for the app.
This token can be used for insights and creating test users.
app_id = retrieved from the developer page
app_secret = retrieved from the developer page
Returns the application access_token.
"""
# Get an app access token
args = {'grant_type': 'client_credentials',
'client_id': app_id,
'client_secret': app_secret}
file = urllib2.urlopen("https://graph.facebook.com/oauth/access_token?" +
urllib.urlencode(args))
try:
result = file.read().split("=")[1]
finally:
file.close()
return result
| Agnishom/ascii-art-007 | facebook.py | Python | mit | 20,087 |
# coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
import threading
from typing import Optional, Tuple
from pyqrllib.pyqrllib import bin2hstr
from pyqryptonight.pyqryptonight import StringToUInt256, UInt256ToString
from qrl.core import config, BlockHeader
from qrl.core.AddressState import AddressState
from qrl.core.Block import Block
from qrl.core.BlockMetadata import BlockMetadata
from qrl.core.DifficultyTracker import DifficultyTracker
from qrl.core.GenesisBlock import GenesisBlock
from qrl.core.PoWValidator import PoWValidator
from qrl.core.txs.Transaction import Transaction
from qrl.core.txs.CoinBase import CoinBase
from qrl.core.TransactionPool import TransactionPool
from qrl.core.misc import logger
from qrl.crypto.Qryptonight import Qryptonight
from qrl.generated import qrl_pb2, qrlstateinfo_pb2
class ChainManager:
def __init__(self, state):
self._state = state
self.tx_pool = TransactionPool(None)
self._last_block = Block.deserialize(GenesisBlock().serialize())
self.current_difficulty = StringToUInt256(str(config.user.genesis_difficulty))
self.trigger_miner = False
self.lock = threading.RLock()
@property
def height(self):
with self.lock:
if not self._last_block:
return -1
return self._last_block.block_number
@property
def last_block(self) -> Block:
with self.lock:
return self._last_block
@property
def total_coin_supply(self):
with self.lock:
return self._state.total_coin_supply
def get_block_datapoint(self, headerhash):
with self.lock:
return self._state.get_block_datapoint(headerhash)
def get_cumulative_difficulty(self):
with self.lock:
last_block_metadata = self._state.get_block_metadata(self._last_block.headerhash)
return last_block_metadata.cumulative_difficulty
def get_block_by_number(self, block_number) -> Optional[Block]:
with self.lock:
return self._state.get_block_by_number(block_number)
def get_block_header_hash_by_number(self, block_number) -> Optional[bytes]:
with self.lock:
return self._state.get_block_header_hash_by_number(block_number)
def get_block(self, header_hash: bytes) -> Optional[Block]:
with self.lock:
return self._state.get_block(header_hash)
def get_address_balance(self, address: bytes) -> int:
with self.lock:
return self._state.get_address_balance(address)
def get_address_is_used(self, address: bytes) -> bool:
with self.lock:
return self._state.get_address_is_used(address)
def get_address_state(self, address: bytes) -> AddressState:
with self.lock:
return self._state.get_address_state(address)
def get_all_address_state(self):
with self.lock:
return self._state.get_all_address_state()
def get_tx_metadata(self, transaction_hash) -> list:
with self.lock:
return self._state.get_tx_metadata(transaction_hash)
def get_last_transactions(self):
with self.lock:
return self._state.get_last_txs()
def get_unconfirmed_transaction(self, transaction_hash) -> list:
with self.lock:
for tx_set in self.tx_pool.transactions:
tx = tx_set[1].transaction
if tx.txhash == transaction_hash:
return [tx, tx_set[1].timestamp]
if transaction_hash in self.tx_pool.pending_tx_pool_hash:
for tx_set in self.tx_pool.pending_tx_pool:
tx = tx_set[1].transaction
if tx.txhash == transaction_hash:
return [tx, tx_set[1].timestamp]
return []
def get_block_metadata(self, header_hash: bytes) -> Optional[BlockMetadata]:
with self.lock:
return self._state.get_block_metadata(header_hash)
def get_blockheader_and_metadata(self, block_number=0) -> Tuple:
with self.lock:
block_number = block_number or self.height # if both are non-zero, then block_number takes priority
result = (None, None)
block = self.get_block_by_number(block_number)
if block:
blockheader = block.blockheader
blockmetadata = self.get_block_metadata(blockheader.headerhash)
result = (blockheader, blockmetadata)
return result
def get_block_to_mine(self, miner, wallet_address) -> list:
with miner.lock: # Trying to acquire miner.lock to make sure pre_block_logic is not running
with self.lock:
last_block = self.last_block
last_block_metadata = self.get_block_metadata(last_block.headerhash)
return miner.get_block_to_mine(wallet_address,
self.tx_pool,
last_block,
last_block_metadata.block_difficulty)
def get_measurement(self, block_timestamp, parent_headerhash, parent_metadata: BlockMetadata):
with self.lock:
return self._state.get_measurement(block_timestamp, parent_headerhash, parent_metadata)
def get_block_size_limit(self, block: Block):
with self.lock:
return self._state.get_block_size_limit(block)
def get_block_is_duplicate(self, block: Block) -> bool:
with self.lock:
return self._state.get_block(block.headerhash) is not None
def validate_mining_nonce(self, blockheader: BlockHeader, enable_logging=True):
with self.lock:
parent_metadata = self.get_block_metadata(blockheader.prev_headerhash)
parent_block = self._state.get_block(blockheader.prev_headerhash)
measurement = self.get_measurement(blockheader.timestamp, blockheader.prev_headerhash, parent_metadata)
diff, target = DifficultyTracker.get(
measurement=measurement,
parent_difficulty=parent_metadata.block_difficulty)
if enable_logging:
logger.debug('-----------------START--------------------')
logger.debug('Validate #%s', blockheader.block_number)
logger.debug('block.timestamp %s', blockheader.timestamp)
logger.debug('parent_block.timestamp %s', parent_block.timestamp)
logger.debug('parent_block.difficulty %s', UInt256ToString(parent_metadata.block_difficulty))
logger.debug('diff %s', UInt256ToString(diff))
logger.debug('target %s', bin2hstr(target))
logger.debug('-------------------END--------------------')
if not PoWValidator().verify_input(blockheader.mining_blob, target):
if enable_logging:
logger.warning("PoW verification failed")
qn = Qryptonight()
tmp_hash = qn.hash(blockheader.mining_blob)
logger.warning("{}".format(bin2hstr(tmp_hash)))
logger.debug('%s', blockheader.to_json())
return False
return True
def get_headerhashes(self, start_blocknumber):
with self.lock:
start_blocknumber = max(0, start_blocknumber)
end_blocknumber = min(self._last_block.block_number,
start_blocknumber + 2 * config.dev.reorg_limit)
total_expected_headerhash = end_blocknumber - start_blocknumber + 1
node_header_hash = qrl_pb2.NodeHeaderHash()
node_header_hash.block_number = start_blocknumber
block = self._state.get_block_by_number(end_blocknumber)
block_headerhash = block.headerhash
node_header_hash.headerhashes.append(block_headerhash)
end_blocknumber -= 1
while end_blocknumber >= start_blocknumber:
block_metadata = self._state.get_block_metadata(block_headerhash)
for headerhash in block_metadata.last_N_headerhashes[-1::-1]:
node_header_hash.headerhashes.append(headerhash)
end_blocknumber -= len(block_metadata.last_N_headerhashes)
if len(block_metadata.last_N_headerhashes) == 0:
break
block_headerhash = block_metadata.last_N_headerhashes[0]
node_header_hash.headerhashes[:] = node_header_hash.headerhashes[-1::-1]
del node_header_hash.headerhashes[:len(node_header_hash.headerhashes) - total_expected_headerhash]
return node_header_hash
def set_broadcast_tx(self, broadcast_tx):
with self.lock:
self.tx_pool.set_broadcast_tx(broadcast_tx)
def load(self, genesis_block):
# load() has the following tasks:
# Write Genesis Block into State immediately
# Register block_number <-> blockhash mapping
# Calculate difficulty Metadata for Genesis Block
# Generate AddressStates from Genesis Block balances
# Apply Genesis Block's transactions to the state
# Detect if we are forked from genesis block and if so initiate recovery.
height = self._state.get_mainchain_height()
if height == -1:
self._state.put_block(genesis_block, None)
block_number_mapping = qrl_pb2.BlockNumberMapping(headerhash=genesis_block.headerhash,
prev_headerhash=genesis_block.prev_headerhash)
self._state.put_block_number_mapping(genesis_block.block_number, block_number_mapping, None)
parent_difficulty = StringToUInt256(str(config.user.genesis_difficulty))
self.current_difficulty, _ = DifficultyTracker.get(
measurement=config.dev.mining_setpoint_blocktime,
parent_difficulty=parent_difficulty)
block_metadata = BlockMetadata.create()
block_metadata.set_block_difficulty(self.current_difficulty)
block_metadata.set_cumulative_difficulty(self.current_difficulty)
self._state.put_block_metadata(genesis_block.headerhash, block_metadata, None)
addresses_state = dict()
for genesis_balance in GenesisBlock().genesis_balance:
bytes_addr = genesis_balance.address
addresses_state[bytes_addr] = AddressState.get_default(bytes_addr)
addresses_state[bytes_addr]._data.balance = genesis_balance.balance
for tx_idx in range(1, len(genesis_block.transactions)):
tx = Transaction.from_pbdata(genesis_block.transactions[tx_idx])
for addr in tx.addrs_to:
addresses_state[addr] = AddressState.get_default(addr)
coinbase_tx = Transaction.from_pbdata(genesis_block.transactions[0])
if not isinstance(coinbase_tx, CoinBase):
return False
addresses_state[coinbase_tx.addr_to] = AddressState.get_default(coinbase_tx.addr_to)
if not coinbase_tx.validate_extended(genesis_block.block_number):
return False
coinbase_tx.apply_state_changes(addresses_state)
for tx_idx in range(1, len(genesis_block.transactions)):
tx = Transaction.from_pbdata(genesis_block.transactions[tx_idx])
tx.apply_state_changes(addresses_state)
self._state.put_addresses_state(addresses_state)
self._state.update_tx_metadata(genesis_block, None)
self._state.update_mainchain_height(0, None)
else:
self._last_block = self.get_block_by_number(height)
self.current_difficulty = self._state.get_block_metadata(self._last_block.headerhash).block_difficulty
fork_state = self._state.get_fork_state()
if fork_state:
block = self._state.get_block(fork_state.initiator_headerhash)
self._fork_recovery(block, fork_state)
def _apply_block(self, block: Block, batch) -> bool:
address_set = self._state.prepare_address_list(block) # Prepare list for current block
addresses_state = self._state.get_state_mainchain(address_set)
if not block.apply_state_changes(addresses_state):
return False
self._state.put_addresses_state(addresses_state, batch)
return True
def _update_chainstate(self, block: Block, batch):
self._last_block = block
self._update_block_number_mapping(block, batch)
self.tx_pool.remove_tx_in_block_from_pool(block)
self._state.update_mainchain_height(block.block_number, batch)
self._state.update_tx_metadata(block, batch)
def _try_branch_add_block(self, block, batch, check_stale=True) -> (bool, bool):
"""
This function returns list of bool types. The first bool represent
if the block has been added successfully and the second bool
represent the fork_flag, which becomes true when a block triggered
into fork recovery.
:param block:
:param batch:
:return: [Added successfully, fork_flag]
"""
if self._last_block.headerhash == block.prev_headerhash:
if not self._apply_block(block, batch):
return False, False
self._state.put_block(block, batch)
last_block_metadata = self._state.get_block_metadata(self._last_block.headerhash)
if last_block_metadata is None:
logger.warning("Could not find log metadata for %s", bin2hstr(self._last_block.headerhash))
return False, False
last_block_difficulty = int(UInt256ToString(last_block_metadata.cumulative_difficulty))
new_block_metadata = self._add_block_metadata(block.headerhash, block.timestamp, block.prev_headerhash, batch)
new_block_difficulty = int(UInt256ToString(new_block_metadata.cumulative_difficulty))
if new_block_difficulty > last_block_difficulty:
if self._last_block.headerhash != block.prev_headerhash:
fork_state = qrlstateinfo_pb2.ForkState(initiator_headerhash=block.headerhash)
self._state.put_fork_state(fork_state, batch)
self._state.write_batch(batch)
return self._fork_recovery(block, fork_state), True
self._update_chainstate(block, batch)
if check_stale:
self.tx_pool.check_stale_txn(self._state, block.block_number)
self.trigger_miner = True
return True, False
def _remove_block_from_mainchain(self, block: Block, latest_block_number: int, batch):
addresses_set = self._state.prepare_address_list(block)
addresses_state = self._state.get_state_mainchain(addresses_set)
for tx_idx in range(len(block.transactions) - 1, -1, -1):
tx = Transaction.from_pbdata(block.transactions[tx_idx])
tx.revert_state_changes(addresses_state, self)
self.tx_pool.add_tx_from_block_to_pool(block, latest_block_number)
self._state.update_mainchain_height(block.block_number - 1, batch)
self._state.rollback_tx_metadata(block, batch)
self._state.remove_blocknumber_mapping(block.block_number, batch)
self._state.put_addresses_state(addresses_state, batch)
def _get_fork_point(self, block: Block):
tmp_block = block
hash_path = []
while True:
if not block:
raise Exception('[get_state] No Block Found %s, Initiator %s', block.headerhash, tmp_block.headerhash)
mainchain_block = self.get_block_by_number(block.block_number)
if mainchain_block and mainchain_block.headerhash == block.headerhash:
break
if block.block_number == 0:
raise Exception('[get_state] Alternate chain genesis is different, Initiator %s', tmp_block.headerhash)
hash_path.append(block.headerhash)
block = self._state.get_block(block.prev_headerhash)
return block.headerhash, hash_path
def _rollback(self, forked_header_hash: bytes, fork_state: qrlstateinfo_pb2.ForkState = None):
"""
Rollback from last block to the block just before the forked_header_hash
:param forked_header_hash:
:param fork_state:
:return:
"""
hash_path = []
while self._last_block.headerhash != forked_header_hash:
block = self._state.get_block(self._last_block.headerhash)
mainchain_block = self._state.get_block_by_number(block.block_number)
if block is None:
logger.warning("self.state.get_block(self.last_block.headerhash) returned None")
if mainchain_block is None:
logger.warning("self.get_block_by_number(block.block_number) returned None")
if block.headerhash != mainchain_block.headerhash:
break
hash_path.append(self._last_block.headerhash)
batch = self._state.batch
self._remove_block_from_mainchain(self._last_block, block.block_number, batch)
if fork_state:
fork_state.old_mainchain_hash_path.extend([self._last_block.headerhash])
self._state.put_fork_state(fork_state, batch)
self._state.write_batch(batch)
self._last_block = self._state.get_block(self._last_block.prev_headerhash)
return hash_path
def add_chain(self, hash_path: list, fork_state: qrlstateinfo_pb2.ForkState) -> bool:
"""
Add series of blocks whose headerhash mentioned into hash_path
:param hash_path:
:param fork_state:
:param batch:
:return:
"""
with self.lock:
start = 0
try:
start = hash_path.index(self._last_block.headerhash) + 1
except ValueError:
# Following condition can only be true if the fork recovery was interrupted last time
if self._last_block.headerhash in fork_state.old_mainchain_hash_path:
return False
for i in range(start, len(hash_path)):
header_hash = hash_path[i]
block = self._state.get_block(header_hash)
batch = self._state.batch
if not self._apply_block(block, batch):
return False
self._update_chainstate(block, batch)
logger.debug('Apply block #%d - [batch %d | %s]', block.block_number, i, hash_path[i])
self._state.write_batch(batch)
self._state.delete_fork_state()
return True
def _fork_recovery(self, block: Block, fork_state: qrlstateinfo_pb2.ForkState) -> bool:
logger.info("Triggered Fork Recovery")
# This condition only becomes true, when fork recovery was interrupted
if fork_state.fork_point_headerhash:
logger.info("Recovering from last fork recovery interruption")
forked_header_hash, hash_path = fork_state.fork_point_headerhash, fork_state.new_mainchain_hash_path
else:
forked_header_hash, hash_path = self._get_fork_point(block)
fork_state.fork_point_headerhash = forked_header_hash
fork_state.new_mainchain_hash_path.extend(hash_path)
self._state.put_fork_state(fork_state)
rollback_done = False
if fork_state.old_mainchain_hash_path:
b = self._state.get_block(fork_state.old_mainchain_hash_path[-1])
if b and b.prev_headerhash == fork_state.fork_point_headerhash:
rollback_done = True
if not rollback_done:
logger.info("Rolling back")
old_hash_path = self._rollback(forked_header_hash, fork_state)
else:
old_hash_path = fork_state.old_mainchain_hash_path
if not self.add_chain(hash_path[-1::-1], fork_state):
logger.warning("Fork Recovery Failed... Recovering back to old mainchain")
# If above condition is true, then it means, the node failed to add_chain
# Thus old chain state, must be retrieved
self._rollback(forked_header_hash)
self.add_chain(old_hash_path[-1::-1], fork_state) # Restores the old chain state
return False
logger.info("Fork Recovery Finished")
self.trigger_miner = True
return True
def _add_block(self, block, batch=None, check_stale=True) -> (bool, bool):
self.trigger_miner = False
block_size_limit = self.get_block_size_limit(block)
if block_size_limit and block.size > block_size_limit:
logger.info('Block Size greater than threshold limit %s > %s', block.size, block_size_limit)
return False, False
return self._try_branch_add_block(block, batch, check_stale)
def add_block(self, block: Block, check_stale=True) -> bool:
with self.lock:
if block.block_number < self.height - config.dev.reorg_limit:
logger.debug('Skipping block #%s as beyond re-org limit', block.block_number)
return False
if self.get_block_is_duplicate(block):
return False
batch = self._state.batch
block_flag, fork_flag = self._add_block(block, batch=batch, check_stale=check_stale)
if block_flag:
if not fork_flag:
self._state.write_batch(batch)
logger.info('Added Block #%s %s', block.block_number, bin2hstr(block.headerhash))
return True
return False
def _add_block_metadata(self,
headerhash,
block_timestamp,
parent_headerhash,
batch):
block_metadata = self._state.get_block_metadata(headerhash)
if not block_metadata:
block_metadata = BlockMetadata.create()
parent_metadata = self._state.get_block_metadata(parent_headerhash)
parent_block_difficulty = parent_metadata.block_difficulty
parent_cumulative_difficulty = parent_metadata.cumulative_difficulty
block_metadata.update_last_headerhashes(parent_metadata.last_N_headerhashes, parent_headerhash)
measurement = self._state.get_measurement(block_timestamp, parent_headerhash, parent_metadata)
block_difficulty, _ = DifficultyTracker.get(
measurement=measurement,
parent_difficulty=parent_block_difficulty)
block_cumulative_difficulty = StringToUInt256(str(
int(UInt256ToString(block_difficulty)) +
int(UInt256ToString(parent_cumulative_difficulty))))
block_metadata.set_block_difficulty(block_difficulty)
block_metadata.set_cumulative_difficulty(block_cumulative_difficulty)
parent_metadata.add_child_headerhash(headerhash)
self._state.put_block_metadata(parent_headerhash, parent_metadata, batch)
self._state.put_block_metadata(headerhash, block_metadata, batch)
return block_metadata
def _update_block_number_mapping(self, block, batch):
block_number_mapping = qrl_pb2.BlockNumberMapping(headerhash=block.headerhash,
prev_headerhash=block.prev_headerhash)
self._state.put_block_number_mapping(block.block_number, block_number_mapping, batch)
| jleni/QRL | src/qrl/core/ChainManager.py | Python | mit | 23,847 |
#!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for manipulating blocks and transactions."""
import struct
import time
import unittest
from .address import (
key_to_p2sh_p2wpkh,
key_to_p2wpkh,
script_to_p2sh_p2wsh,
script_to_p2wsh,
)
from .messages import (
CBlock,
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
hash256,
ser_uint256,
tx_from_hex,
uint256_from_str,
)
from .script import (
CScript,
CScriptNum,
CScriptOp,
OP_1,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_RETURN,
OP_TRUE,
)
from .script_util import (
key_to_p2wpkh_script,
script_to_p2wsh_script,
)
from .util import assert_equal
WITNESS_SCALE_FACTOR = 4
MAX_BLOCK_SIGOPS = 20000
MAX_BLOCK_SIGOPS_WEIGHT = MAX_BLOCK_SIGOPS * WITNESS_SCALE_FACTOR
# Genesis block time (regtest)
TIME_GENESIS_BLOCK = 1296688602
# Coinbase transaction outputs can only be spent after this number of new blocks (network rule)
COINBASE_MATURITY = 100
# Soft-fork activation heights
DERSIG_HEIGHT = 102 # BIP 66
CLTV_HEIGHT = 111 # BIP 65
CSV_ACTIVATION_HEIGHT = 432
# From BIP141
WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed"
NORMAL_GBT_REQUEST_PARAMS = {"rules": ["segwit"]}
VERSIONBITS_LAST_OLD_BLOCK_VERSION = 4
def create_block(hashprev=None, coinbase=None, ntime=None, *, version=None, tmpl=None, txlist=None):
"""Create a block (with regtest difficulty)."""
block = CBlock()
if tmpl is None:
tmpl = {}
block.nVersion = version or tmpl.get('version') or VERSIONBITS_LAST_OLD_BLOCK_VERSION
block.nTime = ntime or tmpl.get('curtime') or int(time.time() + 600)
block.hashPrevBlock = hashprev or int(tmpl['previousblockhash'], 0x10)
if tmpl and not tmpl.get('bits') is None:
block.nBits = struct.unpack('>I', bytes.fromhex(tmpl['bits']))[0]
else:
block.nBits = 0x207fffff # difficulty retargeting is disabled in REGTEST chainparams
if coinbase is None:
coinbase = create_coinbase(height=tmpl['height'])
block.vtx.append(coinbase)
if txlist:
for tx in txlist:
if not hasattr(tx, 'calc_sha256'):
tx = tx_from_hex(tx)
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
def get_witness_script(witness_root, witness_nonce):
witness_commitment = uint256_from_str(hash256(ser_uint256(witness_root) + ser_uint256(witness_nonce)))
output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(witness_commitment)
return CScript([OP_RETURN, output_data])
def add_witness_commitment(block, nonce=0):
"""Add a witness commitment to the block's coinbase transaction.
According to BIP141, blocks with witness rules active must commit to the
hash of all in-block transactions including witness."""
# First calculate the merkle root of the block's
# transactions, with witnesses.
witness_nonce = nonce
witness_root = block.calc_witness_merkle_root()
# witness_nonce should go to coinbase witness.
block.vtx[0].wit.vtxinwit = [CTxInWitness()]
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(witness_nonce)]
# witness commitment is the last OP_RETURN output in coinbase
block.vtx[0].vout.append(CTxOut(0, get_witness_script(witness_root, witness_nonce)))
block.vtx[0].rehash()
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
def script_BIP34_coinbase_height(height):
if height <= 16:
res = CScriptOp.encode_op_n(height)
# Append dummy to increase scriptSig size above 2 (see bad-cb-length consensus rule)
return CScript([res, OP_1])
return CScript([CScriptNum(height)])
def create_coinbase(height, pubkey=None, extra_output_script=None, fees=0, nValue=50):
"""Create a coinbase transaction.
If pubkey is passed in, the coinbase output will be a P2PK output;
otherwise an anyone-can-spend output.
If extra_output_script is given, make a 0-value output to that
script. This is useful to pad block weight/sigops as needed. """
coinbase = CTransaction()
coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff), script_BIP34_coinbase_height(height), 0xffffffff))
coinbaseoutput = CTxOut()
coinbaseoutput.nValue = nValue * COIN
if nValue == 50:
halvings = int(height / 150) # regtest
coinbaseoutput.nValue >>= halvings
coinbaseoutput.nValue += fees
if pubkey is not None:
coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG])
else:
coinbaseoutput.scriptPubKey = CScript([OP_TRUE])
coinbase.vout = [coinbaseoutput]
if extra_output_script is not None:
coinbaseoutput2 = CTxOut()
coinbaseoutput2.nValue = 0
coinbaseoutput2.scriptPubKey = extra_output_script
coinbase.vout.append(coinbaseoutput2)
coinbase.calc_sha256()
return coinbase
def create_tx_with_script(prevtx, n, script_sig=b"", *, amount, script_pub_key=CScript()):
"""Return one-input, one-output transaction object
spending the prevtx's n-th output with the given amount.
Can optionally pass scriptPubKey and scriptSig, default is anyone-can-spend output.
"""
tx = CTransaction()
assert n < len(prevtx.vout)
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), script_sig, 0xffffffff))
tx.vout.append(CTxOut(amount, script_pub_key))
tx.calc_sha256()
return tx
def create_transaction(node, txid, to_address, *, amount):
""" Return signed transaction spending the first output of the
input txid. Note that the node must have a wallet that can
sign for the output that is being spent.
"""
raw_tx = create_raw_transaction(node, txid, to_address, amount=amount)
tx = tx_from_hex(raw_tx)
return tx
def create_raw_transaction(node, txid, to_address, *, amount):
""" Return raw signed transaction spending the first output of the
input txid. Note that the node must have a wallet that can sign
for the output that is being spent.
"""
psbt = node.createpsbt(inputs=[{"txid": txid, "vout": 0}], outputs={to_address: amount})
for _ in range(2):
for w in node.listwallets():
wrpc = node.get_wallet_rpc(w)
signed_psbt = wrpc.walletprocesspsbt(psbt)
psbt = signed_psbt['psbt']
final_psbt = node.finalizepsbt(psbt)
assert_equal(final_psbt["complete"], True)
return final_psbt['hex']
def get_legacy_sigopcount_block(block, accurate=True):
count = 0
for tx in block.vtx:
count += get_legacy_sigopcount_tx(tx, accurate)
return count
def get_legacy_sigopcount_tx(tx, accurate=True):
count = 0
for i in tx.vout:
count += i.scriptPubKey.GetSigOpCount(accurate)
for j in tx.vin:
# scriptSig might be of type bytes, so convert to CScript for the moment
count += CScript(j.scriptSig).GetSigOpCount(accurate)
return count
def witness_script(use_p2wsh, pubkey):
"""Create a scriptPubKey for a pay-to-witness TxOut.
This is either a P2WPKH output for the given pubkey, or a P2WSH output of a
1-of-1 multisig for the given pubkey. Returns the hex encoding of the
scriptPubKey."""
if not use_p2wsh:
# P2WPKH instead
pkscript = key_to_p2wpkh_script(pubkey)
else:
# 1-of-1 multisig
witness_script = CScript([OP_1, bytes.fromhex(pubkey), OP_1, OP_CHECKMULTISIG])
pkscript = script_to_p2wsh_script(witness_script)
return pkscript.hex()
def create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount):
"""Return a transaction (in hex) that spends the given utxo to a segwit output.
Optionally wrap the segwit output using P2SH."""
if use_p2wsh:
program = CScript([OP_1, bytes.fromhex(pubkey), OP_1, OP_CHECKMULTISIG])
addr = script_to_p2sh_p2wsh(program) if encode_p2sh else script_to_p2wsh(program)
else:
addr = key_to_p2sh_p2wpkh(pubkey) if encode_p2sh else key_to_p2wpkh(pubkey)
if not encode_p2sh:
assert_equal(node.getaddressinfo(addr)['scriptPubKey'], witness_script(use_p2wsh, pubkey))
return node.createrawtransaction([utxo], {addr: amount})
def send_to_witness(use_p2wsh, node, utxo, pubkey, encode_p2sh, amount, sign=True, insert_redeem_script=""):
"""Create a transaction spending a given utxo to a segwit output.
The output corresponds to the given pubkey: use_p2wsh determines whether to
use P2WPKH or P2WSH; encode_p2sh determines whether to wrap in P2SH.
sign=True will have the given node sign the transaction.
insert_redeem_script will be added to the scriptSig, if given."""
tx_to_witness = create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount)
if (sign):
signed = node.signrawtransactionwithwallet(tx_to_witness)
assert "errors" not in signed or len(["errors"]) == 0
return node.sendrawtransaction(signed["hex"])
else:
if (insert_redeem_script):
tx = tx_from_hex(tx_to_witness)
tx.vin[0].scriptSig += CScript([bytes.fromhex(insert_redeem_script)])
tx_to_witness = tx.serialize().hex()
return node.sendrawtransaction(tx_to_witness)
class TestFrameworkBlockTools(unittest.TestCase):
def test_create_coinbase(self):
height = 20
coinbase_tx = create_coinbase(height=height)
assert_equal(CScriptNum.decode(coinbase_tx.vin[0].scriptSig), height)
| yenliangl/bitcoin | test/functional/test_framework/blocktools.py | Python | mit | 9,688 |
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; only version 2 of the License is applicable.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# This plugin is to monitor queue lengths in Redis. Based on redis_info.py by
# Garret Heaton <powdahound at gmail.com>, hence the GPL at the top.
import collectd
from contextlib import closing, contextmanager
import socket
# Host to connect to. Override in config by specifying 'Host'.
REDIS_HOST = 'localhost'
# Port to connect on. Override in config by specifying 'Port'.
REDIS_PORT = 6379
# Verbose logging on/off. Override in config by specifying 'Verbose'.
VERBOSE_LOGGING = False
# Queue names to monitor. Override in config by specifying 'Queues'.
QUEUE_NAMES = []
def fetch_queue_lengths(queue_names):
"""Connect to Redis server and request queue lengths.
Return a dictionary from queue names to integers.
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((REDIS_HOST, REDIS_PORT))
log_verbose('Connected to Redis at %s:%s' % (REDIS_HOST, REDIS_PORT))
except socket.error, e:
collectd.error('redis_queues plugin: Error connecting to %s:%d - %r'
% (REDIS_HOST, REDIS_PORT, e))
return None
queue_lengths = {}
with closing(s) as redis_socket:
for queue_name in queue_names:
log_verbose('Requesting length of queue %s' % queue_name)
redis_socket.sendall('llen %s\r\n' % queue_name)
with closing(redis_socket.makefile('r')) as response_file:
response = response_file.readline()
if response.startswith(':'):
try:
queue_lengths[queue_name] = int(response[1:-1])
except ValueError:
log_verbose('Invalid response: %r' % response)
else:
log_verbose('Invalid response: %r' % response)
return queue_lengths
def configure_callback(conf):
"""Receive configuration block"""
global REDIS_HOST, REDIS_PORT, VERBOSE_LOGGING, QUEUE_NAMES
for node in conf.children:
if node.key == 'Host':
REDIS_HOST = node.values[0]
elif node.key == 'Port':
REDIS_PORT = int(node.values[0])
elif node.key == 'Verbose':
VERBOSE_LOGGING = bool(node.values[0])
elif node.key == 'Queues':
QUEUE_NAMES = list(node.values)
else:
collectd.warning('redis_queues plugin: Unknown config key: %s.'
% node.key)
log_verbose('Configured with host=%s, port=%s' % (REDIS_HOST, REDIS_PORT))
for queue in QUEUE_NAMES:
log_verbose('Watching queue %s' % queue)
if not QUEUE_NAMES:
log_verbose('Not watching any queues')
def read_callback():
log_verbose('Read callback called')
queue_lengths = fetch_queue_lengths(QUEUE_NAMES)
if queue_lengths is None:
# An earlier error, reported to collectd by fetch_queue_lengths
return
for queue_name, queue_length in queue_lengths.items():
log_verbose('Sending value: %s=%s' % (queue_name, queue_length))
val = collectd.Values(plugin='redis_queues')
val.type = 'gauge'
val.type_instance = queue_name
val.values = [queue_length]
val.dispatch()
def log_verbose(msg):
if not VERBOSE_LOGGING:
return
collectd.info('redis plugin [verbose]: %s' % msg)
# register callbacks
collectd.register_config(configure_callback)
collectd.register_read(read_callback)
| alphagov/govuk-puppet | modules/collectd/files/usr/lib/collectd/python/redis_queues.py | Python | mit | 4,100 |
from .DiscreteFactor import State, DiscreteFactor
from .CPD import TabularCPD
from .JointProbabilityDistribution import JointProbabilityDistribution
__all__ = ['TabularCPD',
'DiscreteFactor',
'State'
]
| khalibartan/pgmpy | pgmpy/factors/discrete/__init__.py | Python | mit | 236 |
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Fieldset, Layout
from django import forms
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.models import User
from django.contrib.auth.password_validation import validate_password
from django.core.exceptions import ValidationError
from django.db import transaction
from django.forms import ModelForm
from django.utils.translation import ugettext_lazy as _
from django_filters import FilterSet
from easy_select2 import Select2
from crispy_layout_mixin import form_actions, to_row
from utils import (TIPO_TELEFONE, YES_NO_CHOICES, get_medicos,
get_or_create_grupo)
from .models import Especialidade, EspecialidadeMedico, Usuario
class EspecialidadeMedicoFilterSet(FilterSet):
class Meta:
model = EspecialidadeMedico
fields = ['especialidade']
def __init__(self, *args, **kwargs):
super(EspecialidadeMedicoFilterSet, self).__init__(*args, **kwargs)
row1 = to_row([('especialidade', 12)])
self.form.helper = FormHelper()
self.form.helper.form_method = 'GET'
self.form.helper.layout = Layout(
Fieldset(_('Pesquisar Mรฉdico'),
row1, form_actions(save_label='Filtrar'))
)
class MudarSenhaForm(forms.Form):
nova_senha = forms.CharField(
label="Nova Senha", max_length=30,
widget=forms.PasswordInput(
attrs={'class': 'form-control form-control-lg',
'name': 'senha',
'placeholder': 'Nova Senha'}))
confirmar_senha = forms.CharField(
label="Confirmar Senha", max_length=30,
widget=forms.PasswordInput(
attrs={'class': 'form-control form-control-lg',
'name': 'confirmar_senha',
'placeholder': 'Confirmar Senha'}))
class LoginForm(AuthenticationForm):
username = forms.CharField(
label="Username", max_length=30,
widget=forms.TextInput(
attrs={'class': 'form-control form-control-lg',
'name': 'username',
'placeholder': 'Usuรกrio'}))
password = forms.CharField(
label="Password", max_length=30,
widget=forms.PasswordInput(
attrs={'class': 'form-control',
'name': 'password',
'placeholder': 'Senha'}))
class UsuarioForm(ModelForm):
# Usuรกrio
password = forms.CharField(
max_length=20,
label=_('Senha'),
widget=forms.PasswordInput())
password_confirm = forms.CharField(
max_length=20,
label=_('Confirmar Senha'),
widget=forms.PasswordInput())
class Meta:
model = Usuario
fields = ['username', 'email', 'nome', 'password', 'password_confirm',
'data_nascimento', 'sexo', 'plano', 'tipo', 'cep', 'end',
'numero', 'complemento', 'bairro', 'referencia',
'primeiro_telefone', 'segundo_telefone']
widgets = {'email': forms.TextInput(
attrs={'style': 'text-transform:lowercase;'})}
def __init__(self, *args, **kwargs):
super(UsuarioForm, self).__init__(*args, **kwargs)
self.fields['primeiro_telefone'].widget.attrs['class'] = 'telefone'
self.fields['segundo_telefone'].widget.attrs['class'] = 'telefone'
def valida_igualdade(self, texto1, texto2, msg):
if texto1 != texto2:
raise ValidationError(msg)
return True
def clean(self):
if ('password' not in self.cleaned_data or
'password_confirm' not in self.cleaned_data):
raise ValidationError(_('Favor informar senhas atuais ou novas'))
msg = _('As senhas nรฃo conferem.')
self.valida_igualdade(
self.cleaned_data['password'],
self.cleaned_data['password_confirm'],
msg)
try:
validate_password(self.cleaned_data['password'])
except ValidationError as error:
raise ValidationError(error)
return self.cleaned_data
@transaction.atomic
def save(self, commit=False):
usuario = super(UsuarioForm, self).save(commit)
# Cria User
u = User.objects.create(username=usuario.username, email=usuario.email)
u.set_password(self.cleaned_data['password'])
u.is_active = True
u.groups.add(get_or_create_grupo(self.cleaned_data['tipo'].descricao))
u.save()
usuario.user = u
usuario.save()
return usuario
class UsuarioEditForm(ModelForm):
# Primeiro Telefone
primeiro_tipo = forms.ChoiceField(
widget=forms.Select(),
choices=TIPO_TELEFONE,
label=_('Tipo Telefone'))
primeiro_ddd = forms.CharField(max_length=2, label=_('DDD'))
primeiro_numero = forms.CharField(max_length=10, label=_('Nรบmero'))
primeiro_principal = forms.TypedChoiceField(
widget=forms.Select(),
label=_('Telefone Principal?'),
choices=YES_NO_CHOICES)
# Primeiro Telefone
segundo_tipo = forms.ChoiceField(
required=False,
widget=forms.Select(),
choices=TIPO_TELEFONE,
label=_('Tipo Telefone'))
segundo_ddd = forms.CharField(required=False, max_length=2, label=_('DDD'))
segundo_numero = forms.CharField(
required=False, max_length=10, label=_('Nรบmero'))
segundo_principal = forms.ChoiceField(
required=False,
widget=forms.Select(),
label=_('Telefone Principal?'),
choices=YES_NO_CHOICES)
class Meta:
model = Usuario
fields = ['username', 'email', 'nome', 'data_nascimento', 'sexo',
'plano', 'tipo', 'cep', 'end', 'numero', 'complemento',
'bairro', 'referencia', 'primeiro_telefone',
'segundo_telefone']
widgets = {'username': forms.TextInput(attrs={'readonly': 'readonly'}),
'email': forms.TextInput(
attrs={'style': 'text-transform:lowercase;'}),
}
def __init__(self, *args, **kwargs):
super(UsuarioEditForm, self).__init__(*args, **kwargs)
self.fields['primeiro_telefone'].widget.attrs['class'] = 'telefone'
self.fields['segundo_telefone'].widget.attrs['class'] = 'telefone'
def valida_igualdade(self, texto1, texto2, msg):
if texto1 != texto2:
raise ValidationError(msg)
return True
def clean_primeiro_numero(self):
cleaned_data = self.cleaned_data
telefone = Telefone()
telefone.tipo = self.data['primeiro_tipo']
telefone.ddd = self.data['primeiro_ddd']
telefone.numero = self.data['primeiro_numero']
telefone.principal = self.data['primeiro_principal']
cleaned_data['primeiro_telefone'] = telefone
return cleaned_data
def clean_segundo_numero(self):
cleaned_data = self.cleaned_data
telefone = Telefone()
telefone.tipo = self.data['segundo_tipo']
telefone.ddd = self.data['segundo_ddd']
telefone.numero = self.data['segundo_numero']
telefone.principal = self.data['segundo_principal']
cleaned_data['segundo_telefone'] = telefone
return cleaned_data
@transaction.atomic
def save(self, commit=False):
usuario = super(UsuarioEditForm, self).save(commit)
# Primeiro telefone
tel = usuario.primeiro_telefone
tel.tipo = self.data['primeiro_tipo']
tel.ddd = self.data['primeiro_ddd']
tel.numero = self.data['primeiro_numero']
tel.principal = self.data['primeiro_principal']
tel.save()
usuario.primeiro_telefone = tel
# Segundo telefone
tel = usuario.segundo_telefone
if tel:
tel.tipo = self.data['segundo_tipo']
tel.ddd = self.data['segundo_ddd']
tel.numero = self.data['segundo_numero']
tel.principal = self.data['segundo_principal']
tel.save()
usuario.segundo_telefone = tel
# User
u = usuario.user
u.email = usuario.email
u.groups.remove(u.groups.first())
u.groups.add(get_or_create_grupo(self.cleaned_data['tipo'].descricao))
u.save()
usuario.save()
return usuario
class EspecialidadeMedicoForm(ModelForm):
medico = forms.ModelChoiceField(
queryset=get_medicos(),
widget=Select2(select2attrs={'width': '535px'}))
especialidade = forms.ModelChoiceField(
queryset=Especialidade.objects.all(),
widget=Select2(select2attrs={'width': '535px'}))
class Meta:
model = EspecialidadeMedico
fields = ['especialidade', 'medico']
| eduardoedson/scp | usuarios/forms.py | Python | mit | 8,823 |
#! python3
"""
GUI for Ultrasonic Temperature Controller
Copyright (c) 2015 by Stefan Lehmann
"""
import os
import datetime
import logging
import json
import serial
from qtpy.QtWidgets import QAction, QDialog, QMainWindow, QMessageBox, \
QDockWidget, QLabel, QFileDialog, QApplication
from qtpy.QtGui import QIcon
from qtpy.QtCore import QSettings, QCoreApplication, Qt, QThread, \
Signal
from serial.serialutil import SerialException
from jsonwatch.jsonitem import JsonItem
from jsonwatch.jsonnode import JsonNode
from jsonwatchqt.logger import LoggingWidget
from pyqtconfig.config import QSettingsManager
from jsonwatchqt.plotsettings import PlotSettingsWidget
from jsonwatchqt.objectexplorer import ObjectExplorer
from jsonwatchqt.plotwidget import PlotWidget
from jsonwatchqt.serialdialog import SerialDialog, PORT_SETTING, \
BAUDRATE_SETTING
from jsonwatchqt.utilities import critical, pixmap
from jsonwatchqt.recorder import RecordWidget
from jsonwatchqt.csvsettings import CSVSettingsDialog, DECIMAL_SETTING, \
SEPARATOR_SETTING
logger = logging.getLogger("jsonwatchqt.mainwindow")
WINDOWSTATE_SETTING = "mainwindow/windowstate"
GEOMETRY_SETTING = "mainwindow/geometry"
FILENAME_SETTING = "mainwindow/filename"
def strip(s):
return s.strip()
def utf8_to_bytearray(x):
return bytearray(x, 'utf-8')
def bytearray_to_utf8(x):
return x.decode('utf-8')
def set_default_settings(settings: QSettingsManager):
settings.set_defaults({
DECIMAL_SETTING: ',',
SEPARATOR_SETTING: ';'
})
class SerialWorker(QThread):
data_received = Signal(datetime.datetime, str)
def __init__(self, ser: serial.Serial, parent=None):
super().__init__(parent)
self.serial = ser
self._quit = False
def run(self):
while not self._quit:
try:
if self.serial.isOpen() and self.serial.inWaiting():
self.data_received.emit(
datetime.datetime.now(),
strip(bytearray_to_utf8(self.serial.readline()))
)
except SerialException:
pass
def quit(self):
self._quit = True
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super().__init__(parent)
self.recording_enabled = False
self.serial = serial.Serial()
self.rootnode = JsonNode('')
self._connected = False
self._dirty = False
self._filename = None
# settings
self.settings = QSettingsManager()
set_default_settings(self.settings)
# Controller Settings
self.settingsDialog = None
# object explorer
self.objectexplorer = ObjectExplorer(self.rootnode, self)
self.objectexplorer.nodevalue_changed.connect(self.send_serialdata)
self.objectexplorer.nodeproperty_changed.connect(self.set_dirty)
self.objectexplorerDockWidget = QDockWidget(self.tr("object explorer"),
self)
self.objectexplorerDockWidget.setObjectName(
"objectexplorer_dockwidget")
self.objectexplorerDockWidget.setWidget(self.objectexplorer)
# plot widget
self.plot = PlotWidget(self.rootnode, self.settings, self)
# plot settings
self.plotsettings = PlotSettingsWidget(self.settings, self.plot, self)
self.plotsettingsDockWidget = QDockWidget(self.tr("plot settings"),
self)
self.plotsettingsDockWidget.setObjectName("plotsettings_dockwidget")
self.plotsettingsDockWidget.setWidget(self.plotsettings)
# log widget
self.loggingWidget = LoggingWidget(self)
self.loggingDockWidget = QDockWidget(self.tr("logger"), self)
self.loggingDockWidget.setObjectName("logging_dockwidget")
self.loggingDockWidget.setWidget(self.loggingWidget)
# record widget
self.recordWidget = RecordWidget(self.rootnode, self)
self.recordDockWidget = QDockWidget(self.tr("data recording"), self)
self.recordDockWidget.setObjectName("record_dockwidget")
self.recordDockWidget.setWidget(self.recordWidget)
# actions and menus
self._init_actions()
self._init_menus()
# statusbar
statusbar = self.statusBar()
statusbar.setVisible(True)
self.connectionstateLabel = QLabel(self.tr("Not connected"))
statusbar.addPermanentWidget(self.connectionstateLabel)
statusbar.showMessage(self.tr("Ready"))
# layout
self.setCentralWidget(self.plot)
self.addDockWidget(Qt.LeftDockWidgetArea,
self.objectexplorerDockWidget)
self.addDockWidget(Qt.LeftDockWidgetArea, self.plotsettingsDockWidget)
self.addDockWidget(Qt.BottomDockWidgetArea, self.loggingDockWidget)
self.addDockWidget(Qt.BottomDockWidgetArea, self.recordDockWidget)
self.load_settings()
def _init_actions(self):
# Serial Dialog
self.serialdlgAction = QAction(self.tr("Serial Settings..."), self)
self.serialdlgAction.setShortcut("F6")
self.serialdlgAction.setIcon(QIcon(pixmap("configure.png")))
self.serialdlgAction.triggered.connect(self.show_serialdlg)
# Connect
self.connectAction = QAction(self.tr("Connect"), self)
self.connectAction.setShortcut("F5")
self.connectAction.setIcon(QIcon(pixmap("network-connect-3.png")))
self.connectAction.triggered.connect(self.toggle_connect)
# Quit
self.quitAction = QAction(self.tr("Quit"), self)
self.quitAction.setShortcut("Alt+F4")
self.quitAction.setIcon(QIcon(pixmap("window-close-3.png")))
self.quitAction.triggered.connect(self.close)
# Save Config as
self.saveasAction = QAction(self.tr("Save as..."), self)
self.saveasAction.setShortcut("Ctrl+Shift+S")
self.saveasAction.setIcon(QIcon(pixmap("document-save-as-5.png")))
self.saveasAction.triggered.connect(self.show_savecfg_dlg)
# Save file
self.saveAction = QAction(self.tr("Save"), self)
self.saveAction.setShortcut("Ctrl+S")
self.saveAction.setIcon(QIcon(pixmap("document-save-5.png")))
self.saveAction.triggered.connect(self.save_file)
# Load file
self.loadAction = QAction(self.tr("Open..."), self)
self.loadAction.setShortcut("Ctrl+O")
self.loadAction.setIcon(QIcon(pixmap("document-open-7.png")))
self.loadAction.triggered.connect(self.show_opencfg_dlg)
# New
self.newAction = QAction(self.tr("New"), self)
self.newAction.setShortcut("Ctrl+N")
self.newAction.setIcon(QIcon(pixmap("document-new-6.png")))
self.newAction.triggered.connect(self.new)
# start recording
self.startrecordingAction = QAction(self.tr("Start recording"), self)
self.startrecordingAction.setShortcut("F9")
self.startrecordingAction.setIcon(QIcon(pixmap("media-record-6.png")))
self.startrecordingAction.triggered.connect(self.start_recording)
# stop recording
self.stoprecordingAction = QAction(self.tr("Stop recording"), self)
self.stoprecordingAction.setShortcut("F10")
self.stoprecordingAction.setIcon(QIcon(pixmap("media-playback-stop-8.png")))
self.stoprecordingAction.setEnabled(False)
self.stoprecordingAction.triggered.connect(self.stop_recording)
# clear record
self.clearrecordAction = QAction(self.tr("Clear"), self)
self.clearrecordAction.setIcon(QIcon(pixmap("editclear.png")))
self.clearrecordAction.triggered.connect(self.clear_record)
# export record
self.exportcsvAction = QAction(self.tr("Export to csv..."), self)
self.exportcsvAction.setIcon(QIcon(pixmap("text_csv.png")))
self.exportcsvAction.triggered.connect(self.export_csv)
# show record settings
self.recordsettingsAction = QAction(self.tr("Settings..."), self)
self.recordsettingsAction.setIcon(QIcon(pixmap("configure.png")))
self.recordsettingsAction.triggered.connect(self.show_recordsettings)
# Info
self.infoAction = QAction(self.tr("Info"), self)
self.infoAction.setShortcut("F1")
self.infoAction.triggered.connect(self.show_info)
def _init_menus(self):
# file menu
self.fileMenu = self.menuBar().addMenu(self.tr("File"))
self.fileMenu.addAction(self.newAction)
self.fileMenu.addAction(self.loadAction)
self.fileMenu.addAction(self.saveAction)
self.fileMenu.addAction(self.saveasAction)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.connectAction)
self.fileMenu.addAction(self.serialdlgAction)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.quitAction)
# view menu
self.viewMenu = self.menuBar().addMenu(self.tr("View"))
self.viewMenu.addAction(
self.objectexplorerDockWidget.toggleViewAction())
self.viewMenu.addAction(self.plotsettingsDockWidget.toggleViewAction())
self.viewMenu.addAction(self.loggingDockWidget.toggleViewAction())
self.viewMenu.addAction(self.recordDockWidget.toggleViewAction())
# record menu
self.recordMenu = self.menuBar().addMenu(self.tr("Record"))
self.recordMenu.addAction(self.startrecordingAction)
self.recordMenu.addAction(self.stoprecordingAction)
self.recordMenu.addAction(self.exportcsvAction)
self.recordMenu.addSeparator()
self.recordMenu.addAction(self.clearrecordAction)
self.recordMenu.addSeparator()
self.recordMenu.addAction(self.recordsettingsAction)
# info menu
self.menuBar().addAction(self.infoAction)
def show_info(self):
QMessageBox.about(
self, QApplication.applicationName(),
"%s %s\n"
"Copyright (c) by %s" %
(
QCoreApplication.applicationName(),
QCoreApplication.applicationVersion(),
QCoreApplication.organizationName(),
)
)
def load_file(self, filename):
old_filename = self.filename if self.filename != filename else None
self.filename = filename
try:
with open(filename, 'rb') as f:
try:
self.objectexplorer.model().beginResetModel()
self.rootnode.load(bytearray_to_utf8(f.read()))
self.objectexplorer.model().endResetModel()
except ValueError as e:
critical(self, "File '%s' is not a valid config file."
% filename)
logger.error(str(e))
if old_filename is not None:
self.load_file(old_filename)
else:
self.filename = None
except FileNotFoundError as e:
logger.error(str(e))
self.filename = None
self.objectexplorer.refresh()
def load_settings(self):
settings = QSettings()
# window geometry
try:
self.restoreGeometry(settings.value(GEOMETRY_SETTING))
except:
logger.debug("error restoring window geometry")
# window state
try:
self.restoreState(settings.value(WINDOWSTATE_SETTING))
except:
logger.debug("error restoring window state")
# filename
self.filename = settings.value(FILENAME_SETTING)
if self.filename is not None:
self.load_file(self.filename)
def save_settings(self):
settings = QSettings()
settings.setValue(WINDOWSTATE_SETTING, self.saveState())
settings.setValue(GEOMETRY_SETTING, self.saveGeometry())
settings.setValue(FILENAME_SETTING, self.filename)
def closeEvent(self, event):
if self.dirty:
res = QMessageBox.question(
self,
QCoreApplication.applicationName(),
self.tr("Save changes to file '%s'?" %
self.filename
if self.filename is not None else "unknown"),
QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel
)
if res == QMessageBox.Cancel:
event.ignore()
return
elif res == QMessageBox.Yes:
self.save_file()
self.save_settings()
try:
self.worker.quit()
except AttributeError:
pass
try:
self.serial.close()
except (SerialException, AttributeError):
pass
def new(self):
self.objectexplorer.model().beginResetModel()
self.rootnode.clear()
self.objectexplorer.model().endResetModel()
def send_reset(self):
jsonstring = json.dumps({"resetpid": 1})
self.serial.write(bytearray(jsonstring, 'utf-8'))
def receive_serialdata(self, time, data):
self.loggingWidget.log_input(data)
try:
self.rootnode.from_json(data)
except ValueError as e:
logger.error(str(e))
# refresh widgets
self.objectexplorer.refresh()
self.plot.refresh(time)
if self.recording_enabled:
self.recordWidget.add_data(time, self.rootnode)
def send_serialdata(self, node):
if isinstance(node, JsonItem):
if self.serial.isOpen():
s = node.to_json()
self.serial.write(utf8_to_bytearray(s + '\n'))
self.loggingWidget.log_output(s.strip())
def show_serialdlg(self):
dlg = SerialDialog(self.settings, self)
dlg.exec_()
def toggle_connect(self):
if self.serial.isOpen():
self.disconnect()
else:
self.connect()
def connect(self):
# Load port setting
port = self.settings.get(PORT_SETTING)
baudrate = self.settings.get(BAUDRATE_SETTING)
# If no port has been selected before show serial settings dialog
if port is None:
if self.show_serialdlg() == QDialog.Rejected:
return
port = self.settings.get(PORT_SETTING)
baudrate = self.settings.get(BAUDRATE_SETTING)
# Serial connection
try:
self.serial.port = port
self.serial.baudrate = baudrate
self.serial.open()
except ValueError:
QMessageBox.critical(
self, QCoreApplication.applicationName(),
self.tr("Serial parameters e.g. baudrate, databits are out "
"of range.")
)
except SerialException:
QMessageBox.critical(
self, QCoreApplication.applicationName(),
self.tr("The device '%s' can not be found or can not be "
"configured." % port)
)
else:
self.worker = SerialWorker(self.serial, self)
self.worker.data_received.connect(self.receive_serialdata)
self.worker.start()
self.connectAction.setText(self.tr("Disconnect"))
self.connectAction.setIcon(QIcon(pixmap("network-disconnect-3.png")))
self.serialdlgAction.setEnabled(False)
self.connectionstateLabel.setText(
self.tr("Connected to %s") % port)
self._connected = True
self.objectexplorer.refresh()
def disconnect(self):
self.worker.quit()
self.serial.close()
self.connectAction.setText(self.tr("Connect"))
self.connectAction.setIcon(QIcon(pixmap("network-connect-3.png")))
self.serialdlgAction.setEnabled(True)
self.connectionstateLabel.setText(self.tr("Not connected"))
self._connected = False
self.objectexplorer.refresh()
def show_savecfg_dlg(self):
filename, _ = QFileDialog.getSaveFileName(
self, self.tr("Save configuration file..."),
directory=os.path.expanduser("~"),
filter="Json file (*.json)"
)
if filename:
self.filename = filename
self.save_file()
def save_file(self):
if self.filename is not None:
config_string = self.rootnode.dump()
with open(self.filename, 'w') as f:
f.write(config_string)
self.dirty = False
else:
self.show_savecfg_dlg()
def show_opencfg_dlg(self):
# show file dialog
filename, _ = QFileDialog.getOpenFileName(
self, self.tr("Open configuration file..."),
directory=os.path.expanduser("~"),
filter=self.tr("Json file (*.json);;All files (*.*)")
)
# load config file
if filename:
self.load_file(filename)
def refresh_window_title(self):
s = "%s %s" % (QCoreApplication.applicationName(),
QCoreApplication.applicationVersion())
if self.filename is not None:
s += " - " + self.filename
if self.dirty:
s += "*"
self.setWindowTitle(s)
def start_recording(self):
self.recording_enabled = True
self.startrecordingAction.setEnabled(False)
self.stoprecordingAction.setEnabled(True)
def stop_recording(self):
self.recording_enabled = False
self.startrecordingAction.setEnabled(True)
self.stoprecordingAction.setEnabled(False)
def export_csv(self):
filename, _ = QFileDialog.getSaveFileName(
self, QCoreApplication.applicationName(),
filter="CSV files(*.csv);;All files (*.*)"
)
if filename == "":
return
# get current dataframe and export to csv
df = self.recordWidget.dataframe
decimal = self.settings.get(DECIMAL_SETTING)
df = df.applymap(lambda x: str(x).replace(".", decimal))
df.to_csv(
filename, index_label="time",
sep=self.settings.get(SEPARATOR_SETTING)
)
def clear_record(self):
self.recordWidget.clear()
def show_recordsettings(self):
dlg = CSVSettingsDialog(self)
dlg.exec_()
# filename property
@property
def filename(self):
return self._filename
@filename.setter
def filename(self, value=""):
self._filename = value
self.refresh_window_title()
# dirty property
@property
def dirty(self):
return self._dirty
@dirty.setter
def dirty(self, value):
self._dirty = value
self.refresh_window_title()
def set_dirty(self):
self.dirty = True
# connected property
@property
def connected(self):
return self._connected
| MrLeeh/jsonwatchqt | jsonwatchqt/mainwindow.py | Python | mit | 19,071 |
# -*- coding: utf-8 -*-
"""
"""
from datetime import datetime, timedelta
import os
from flask import request
from flask import Flask
import pytz
import db
from utils import get_remote_addr, get_location_data
app = Flask(__name__)
@app.route('/yo-water/', methods=['POST', 'GET'])
def yowater():
payload = request.args if request.args else request.get_json(force=True)
username = payload.get('username')
reminder = db.reminders.find_one({'username': username})
reply_object = payload.get('reply')
if reply_object is None:
if db.reminders.find_one({'username': username}) is None:
address = get_remote_addr(request)
data = get_location_data(address)
if not data:
return 'Timezone needed'
user_data = {'created': datetime.now(pytz.utc),
'username': username}
if data.get('time_zone'):
user_data.update({'timezone': data.get('time_zone')})
db.reminders.insert(user_data)
return 'OK'
else:
reply_text = reply_object.get('text')
if reply_text == u'Can\'t right now ๐':
reminder['trigger_date'] = datetime.now(pytz.utc) + timedelta(minutes=15)
else:
reminder['step'] += 1
reminder['trigger_date'] = datetime.now(pytz.utc) + timedelta(minutes=60)
reminder['last_reply_date'] = datetime.now(pytz.utc)
db.reminders.update({'username': username},
reminder)
db.replies.insert({'username': username,
'created': datetime.now(pytz.utc),
'reply': reply_text})
return 'OK'
if __name__ == "__main__":
app.debug = True
app.run(host="0.0.0.0", port=int(os.environ.get("PORT", "5000")))
| YoApp/yo-water-tracker | server.py | Python | mit | 1,851 |
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
admin.autodiscover()
import views
urlpatterns = patterns('',
url(r'^pis', views.pis),
url(r'^words', views.words, { 'titles': False }),
url(r'^projects', views.projects),
url(r'^posters', views.posters),
url(r'^posterpresenters', views.posterpresenters),
url(r'^pigraph', views.pigraph),
url(r'^institutions', views.institutions),
url(r'^institution/(?P<institutionid>\d+)', views.institution),
url(r'^profile/$', views.profile),
url(r'^schedule/(?P<email>\S+)', views.schedule),
url(r'^ratemeeting/(?P<rmid>\d+)/(?P<email>\S+)', views.ratemeeting),
url(r'^submitrating/(?P<rmid>\d+)/(?P<email>\S+)', views.submitrating),
url(r'^feedback/(?P<email>\S+)', views.after),
url(r'^breakouts', views.breakouts),
url(r'^breakout/(?P<bid>\d+)', views.breakout),
url(r'^about', views.about),
url(r'^buginfo', views.buginfo),
url(r'^allrms', views.allrms),
url(r'^allratings', views.allratings),
url(r'^login', views.login),
url(r'^logout', views.logout),
url(r'^edit_home_page', views.edit_home_page),
url(r'^pi/(?P<userid>\d+)', views.pi), # , name = 'pi'),
url(r'^pi/(?P<email>\S+)', views.piEmail), # , name = 'pi'),
url(r'^project/(?P<abstractid>\S+)', views.project, name = 'project'),
url(r'^scope=(?P<scope>\w+)/(?P<url>.+)$', views.set_scope),
url(r'^active=(?P<active>\d)/(?P<url>.+)$', views.set_active),
url(r'^admin/', include(admin.site.urls)),
(r'', include('django_browserid.urls')),
url(r'^$', views.index, name = 'index'),
) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| ctames/conference-host | webApp/urls.py | Python | mit | 1,850 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import base64
import json
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
from behave import *
@step('I share first element in the history list')
def step_impl(context):
context.execute_steps(u'''
given I open History dialog
''')
history = context.browser.find_element_by_id("HistoryPopup")
entries = history.find_elements_by_xpath('.//li[not(@data-clone-template)]')
assert len(entries) > 0, "There are no entries in the history"
item = entries[0]
item.find_elements_by_xpath('.//*[@data-share-item]')[0].click()
@then('the json to share is shown with url "{url}" and contains the following headers')
def step_impl(context, url):
# Wait for modal to appear
WebDriverWait(context.browser, 10).until(
expected_conditions.visibility_of_element_located(
(By.ID, 'ShareRequestForm')))
output = context.browser.execute_script("return restman.ui.editors.get('#ShareRequestEditor').getValue();")
snippet = json.loads(output)
assert url == snippet["url"], "URL: \"{}\" not in output.\nOutput: {}".format(value, output)
for row in context.table:
assert row['key'] in snippet['headers'], "Header {} is not in output".format(row['key'])
assert row['value'] == snippet['headers'][row['key']], "Header value is not correct. Expected: {}; Actual: {}".format(value, snippet['headers'][name])
@step('I click on import request')
def step_impl(context):
context.execute_steps(u'''
given I open History dialog
''')
# Click on import
context.browser.find_element_by_id('ImportHistory').click()
WebDriverWait(context.browser, 10).until(
expected_conditions.visibility_of_element_located(
(By.ID, 'ImportRequestForm')))
@step('I write a shared request for "{url}"')
def step_impl(context, url):
req = json.dumps({
"method": "POST",
"url": url,
"headers": {
"Content-Type": "application/json",
"X-Test-Header": "shared_request"
},
"body": {
"type": "form",
"content": {
"SomeKey": "SomeValue11233",
"SomeOtherKey": "SomeOtherValue019",
}
}
})
context.browser.execute_script("return restman.ui.editors.setValue('#ImportRequestEditor', atob('{}'));".format(base64.b64encode(req)))
@step('I click on load import request')
def step_impl(context):
# Import request
context.browser.find_element_by_xpath("//*[@id='ImportRequestForm']//input[@value='Import']").click()
| jsargiot/restman | tests/steps/share.py | Python | mit | 2,709 |
#!/usr/bin/env python
"""
This script is used to run tests, create a coverage report and output the
statistics at the end of the tox run.
To run this script just execute ``tox``
"""
import re
from fabric.api import local, warn
from fabric.colors import green, red
if __name__ == '__main__':
# Kept some files for backwards compatibility. If support is dropped,
# remove it here
deprecated_files = '*utils_email*,*utils_log*'
local('flake8 --ignore=E126 --ignore=W391 --statistics'
' --exclude=submodules,migrations,build .')
local('coverage run --source="django_libs" manage.py test -v 2'
' --traceback --failfast --settings=django_libs.tests.settings'
' --pattern="*_tests.py"')
local('coverage html -d coverage'
' --omit="*__init__*,*/settings/*,*/migrations/*,*/tests/*,'
'*admin*,{}"'.format(deprecated_files))
total_line = local('grep -n pc_cov coverage/index.html', capture=True)
percentage = float(re.findall(r'(\d+)%', total_line)[-1])
if percentage < 100:
warn(red('Coverage is {0}%'.format(percentage)))
else:
print(green('Coverage is {0}%'.format(percentage)))
| bitmazk/django-libs | runtests.py | Python | mit | 1,182 |
from baroque.entities.event import Event
class EventCounter:
"""A counter of events."""
def __init__(self):
self.events_count = 0
self.events_count_by_type = dict()
def increment_counting(self, event):
"""Counts an event
Args:
event (:obj:`baroque.entities.event.Event`): the event to be counted
"""
assert isinstance(event, Event)
self.events_count += 1
t = type(event.type)
if t in self.events_count_by_type:
self.events_count_by_type[t] += 1
else:
self.events_count_by_type[t] = 1
def count_all(self):
"""Tells how many events have been counted globally
Returns:
int
"""
return self.events_count
def count(self, eventtype):
"""Tells how many events have been counted of the specified type
Args:
eventtype (:obj:`baroque.entities.eventtype.EventType`): the type of events to be counted
Returns:
int
"""
return self.events_count_by_type.get(type(eventtype), 0)
| baroquehq/baroque | baroque/datastructures/counters.py | Python | mit | 1,120 |
import numpy as np
class Surface(object):
def __init__(self, image, edge_points3d, edge_points2d):
"""
Constructor for a surface defined by a texture image and
4 boundary points. Choose the first point as the origin
of the surface's coordinate system.
:param image: image array
:param edge_points3d: array of 3d coordinates of 4 corner points in clockwise direction
:param edge_points2d: array of 2d coordinates of 4 corner points in clockwise direction
"""
assert len(edge_points3d) == 4 and len(edge_points2d) == 4
self.image = image
self.edge_points3d = edge_points3d
self.edge_points2d = np.float32(edge_points2d) # This is required for using cv2's getPerspectiveTransform
self.normal = self._get_normal_vector()
def top_left_corner3d(self):
return self.edge_points3d[0]
def top_right_corner3d(self):
return self.edge_points3d[1]
def bottom_right_corner3d(self):
return self.edge_points3d[2]
def bottom_left_corner3d(self):
return self.edge_points3d[3]
def distance_to_point(self, point):
point_to_surface = point - self.top_left_corner3d()
distance_to_surface = self.normal.dot(point_to_surface)
return distance_to_surface
def _get_normal_vector(self):
"""
:return: the normal vector of the surface. It determined the front side
of the surface and it's not necessarily a unit vector
"""
p0 = self.edge_points3d[0]
p1 = self.edge_points3d[1]
p3 = self.edge_points3d[3]
v1 = p3 - p0
v2 = p1 - p0
normal = np.cross(v1, v2)
norm = np.linalg.norm(normal)
return normal / norm
class Polyhedron(object):
def __init__(self, surfaces):
self.surfaces = surfaces
class Space(object):
def __init__(self, models=None):
self.models = models or []
def add_model(self, model):
assert isinstance(model, Polyhedron)
self.models.append(model)
class Line2D(object):
def __init__(self, point1, point2):
"""
Using the line equation a*x + b*y + c = 0 with b >= 0
:param point1: starting point
:param point2: ending point
:return: a Line object
"""
assert len(point1) == 2 and len(point2) == 2
self.a = point2[1] - point1[1]
self.b = point1[0] - point2[0]
self.c = point1[1] * point2[0] - point1[0] * point2[1]
if self.b < 0:
self.a = -self.a
self.b = -self.b
self.c = -self.c
def is_point_on_left(self, point):
return self.a * point[0] + self.b * point[1] + self.c > 0
def is_point_on_right(self, point):
return self.a * point[0] + self.b * point[1] + self.c < 0
def is_point_on_line(self, point):
return self.a * point[0] + self.b * point[1] + self.c == 0
def get_y_from_x(self, x):
if self.b == 0:
return 0.0
return 1.0 * (-self.c - self.a * x) / self.b
def get_x_from_y(self, y):
if self.a == 0:
return 0.0
return 1.0 * (-self.c - self.b * y) / self.a
| yangshun/cs4243-project | app/surface.py | Python | mit | 3,220 |
# coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ContributorOrcid(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, uri=None, path=None, host=None):
"""
ContributorOrcid - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'uri': 'str',
'path': 'str',
'host': 'str'
}
self.attribute_map = {
'uri': 'uri',
'path': 'path',
'host': 'host'
}
self._uri = uri
self._path = path
self._host = host
@property
def uri(self):
"""
Gets the uri of this ContributorOrcid.
:return: The uri of this ContributorOrcid.
:rtype: str
"""
return self._uri
@uri.setter
def uri(self, uri):
"""
Sets the uri of this ContributorOrcid.
:param uri: The uri of this ContributorOrcid.
:type: str
"""
self._uri = uri
@property
def path(self):
"""
Gets the path of this ContributorOrcid.
:return: The path of this ContributorOrcid.
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""
Sets the path of this ContributorOrcid.
:param path: The path of this ContributorOrcid.
:type: str
"""
self._path = path
@property
def host(self):
"""
Gets the host of this ContributorOrcid.
:return: The host of this ContributorOrcid.
:rtype: str
"""
return self._host
@host.setter
def host(self, host):
"""
Sets the host of this ContributorOrcid.
:param host: The host of this ContributorOrcid.
:type: str
"""
self._host = host
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ContributorOrcid):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| Royal-Society-of-New-Zealand/NZ-ORCID-Hub | orcid_api/models/contributor_orcid.py | Python | mit | 3,922 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(unique=True, max_length=150)),
('slug', models.SlugField(unique=True, max_length=150)),
('posted', models.DateTimeField(auto_now_add=True, db_index=True)),
],
options={
},
bases=(models.Model,),
),
]
| vollov/i18n-django-api | page/migrations/0001_initial.py | Python | mit | 723 |
from behave import given, when, then
from genosdb.models import User
from genosdb.exceptions import UserNotFound
# 'mongodb://localhost:27017/')
@given('a valid user with values {username}, {password}, {email}, {first_name}, {last_name}')
def step_impl(context, username, password, email, first_name, last_name):
context.base_user = User(username=username, email=email, password=password, first_name=first_name,
last_name=last_name)
@when('I add the user to the collection')
def step_impl(context):
context.user_service.save(context.base_user)
@then('I check {user_name} exists')
def step_impl(context, user_name):
user_exists = context.user_service.exists(user_name)
assert context.base_user.username == user_exists['username']
assert context.base_user.password == user_exists['password']
assert context.base_user.email == user_exists['email']
assert context.base_user.first_name == user_exists['first_name']
assert context.base_user.last_name == user_exists['last_name']
assert user_exists['_id'] is not None
@given('I update {username} {field} with {value}')
def step_impl(context, username, field, value):
user = context.user_service.exists(username)
if user is not None:
user[field] = value
context.user_service.update(user.to_json())
else:
raise UserNotFound(username, "User was not found")
@then('I check {username} {field} is {value}')
def step_impl(context, username, field, value):
user = context.user_service.exists(username)
if user is not None:
assert user[field] == value
else:
raise UserNotFound(username, "User was not found")
| jonrf93/genos | dbservices/tests/functional_tests/steps/user_service_steps.py | Python | mit | 1,685 |
import unittest
from src.data_structures.mockdata import MockData
class TestMockData (unittest.TestCase):
def setUp(self):
self.data = MockData()
def test_random_data(self):
data = MockData()
a_set = data.get_random_elements(10)
self.assertTrue(len(a_set) == 10, "the data should have 10 elements!")
if __name__ == '__main__':
unittest.main() | alcemirsantos/algorithms-py | tests/data_stuctures/test_mockdata.py | Python | mit | 400 |
from rest_framework.filters import (
FilterSet
)
from trialscompendium.trials.models import Treatment
class TreatmentListFilter(FilterSet):
"""
Filter query list from treatment database table
"""
class Meta:
model = Treatment
fields = {'id': ['exact', 'in'],
'no_replicate': ['exact', 'in', 'gte', 'lte'],
'nitrogen_treatment': ['iexact', 'in', 'icontains'],
'phosphate_treatment': ['iexact', 'in', 'icontains'],
'tillage_practice': ['iexact', 'in', 'icontains'],
'cropping_system': ['iexact', 'in', 'icontains'],
'crops_grown': ['iexact', 'in', 'icontains'],
'farm_yard_manure': ['iexact', 'in', 'icontains'],
'farm_residue': ['iexact', 'in', 'icontains'],
}
order_by = ['tillage_practice', 'cropping_system', 'crops_grown']
| nkoech/trialscompendium | trialscompendium/trials/api/treatment/filters.py | Python | mit | 934 |
"""
WSGI config for Carkinos project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Carkinos.settings")
application = get_wsgi_application()
| LeeYiFang/Carkinos | src/Carkinos/wsgi.py | Python | mit | 393 |
from io import BytesIO
from django import forms
from django.http import HttpResponse
from django.template import Context, Template
from braces.views import LoginRequiredMixin
from django.views.generic import DetailView, ListView
from django.views.decorators.http import require_http_methods
from django.contrib import messages
from django.shortcuts import render, redirect
from django.conf import settings
from reportlab.pdfgen.canvas import Canvas
from reportlab.lib.units import inch
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.pagesizes import letter, landscape
from reportlab.platypus import Spacer
from reportlab.platypus import Frame
from reportlab.platypus import Paragraph
from reportlab.platypus import PageTemplate
from reportlab.platypus import BaseDocTemplate
from environ import Env
from members.models import Member
@require_http_methods(['GET', 'POST'])
def member_list(request):
env = Env()
MEMBERS_PASSWORD = env('MEMBERS_PASSWORD')
# handle form submission
if request.POST:
pw_form = PasswordForm(request.POST)
if pw_form.is_valid() and pw_form.cleaned_data['password'] == MEMBERS_PASSWORD:
request.session['password'] = pw_form.cleaned_data['password']
return redirect('members:member_list')
messages.error(request, "The password you entered was incorrect, please try again.")
# form not being submitted, check password
if (request.session.get('password') and request.session['password'] == MEMBERS_PASSWORD):
member_list = Member.objects.all()
return render(request, 'members/member_list.html', {
'member_list': member_list,
})
# password is wrong, render form
pw_form = PasswordForm()
return render(request, 'members/members_password_form.html', {
'pw_form': pw_form,
})
class PasswordForm(forms.Form):
password = forms.CharField(max_length=20,
widget=forms.PasswordInput(attrs={
'class': 'form-control',
'placeholder': 'Enter Password',
}))
def build_frames(pwidth, pheight, ncols):
frames = []
for i in range(ncols):
f = Frame(x1=(i*((pwidth-30) / ncols)+15),
y1=0,
width=((pwidth-30) / ncols),
height=pheight+2,
leftPadding=15,
rightPadding=15,
topPadding=15,
bottomPadding=15,
showBoundary=True)
frames.append(f)
frames[0].showBoundary=False
frames[3].showBoundary=False
return frames
def member_list_pdf(request):
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="memberlist.pdf"'
buffer = BytesIO()
NCOLUMNS = 4
PAGE_WIDTH, PAGE_HEIGHT = landscape(letter)
styles = getSampleStyleSheet()
ptemplate = PageTemplate(frames=build_frames(PAGE_WIDTH, PAGE_HEIGHT, NCOLUMNS))
doc = BaseDocTemplate(
filename=buffer,
pagesize=landscape(letter),
pageTemplates=[ptemplate],
showBoundary=0,
leftMargin=inch,
rightMargin=inch,
topMargin=inch,
bottomMargin=inch,
allowSplitting=0,
title='SSIP209 Members Listing',
author='Max Shkurygin',
_pageBreakQuick=1,
encrypt=None)
template = Template("""
<font size="14"><strong>{{ member.last_name }}, {{ member.first_name }}</strong></font>
<br/>
{% if member.address or member.town %}
{{ member.address }}<br/>
{% if member.town %} {{ member.town }} NY <br/>{% endif %}
{% endif %}
{% if member.homephone %}
(Home) {{ member.homephone }}
<br/>
{% endif %}
{% if member.cellphone %}
(Cell) {{ member.cellphone }}
<br/>
{% endif %}
{% if member.email %}
Email: {{ member.email }}
<br/>
{% endif %}
{% if member.hobbies %}
<strong>My Hobbies</strong>: {{ member.hobbies }}
<br/>
{% endif %}
{% if member.canhelp %}
<strong>I can help with</strong>: {{ member.canhelp }}
<br/>
{% endif %}
{% if member.needhelp %}
<strong>I could use help with</strong>: {{ member.needhelp }}
<br/>
{% endif %}
""")
content = []
for member in Member.objects.all():
context = Context({"member": member})
p = Paragraph(template.render(context), styles["Normal"])
content.append(p)
content.append(Spacer(1, 0.3*inch))
doc.build(content)
pdf = buffer.getvalue()
buffer.close()
response.write(pdf)
return response
| mooja/ssip3 | app/members/views.py | Python | mit | 4,530 |
import time
import multiprocessing
from flask import Flask
app = Flask(__name__)
backProc = None
def testFun():
print('Starting')
while True:
time.sleep(3)
print('looping')
time.sleep(3)
print('3 Seconds Later')
@app.route('/')
def root():
return 'Started a background process with PID ' + str(backProc.pid) + " is running: " + str(backProc.is_alive())
@app.route('/kill')
def kill():
backProc.terminate()
return 'killed: ' + str(backProc.pid)
@app.route('/kill_all')
def kill_all():
proc = multiprocessing.active_children()
for p in proc:
p.terminate()
return 'killed all'
@app.route('/active')
def active():
proc = multiprocessing.active_children()
arr = []
for p in proc:
print(p.pid)
arr.append(p.pid)
return str(arr)
@app.route('/start')
def start():
global backProc
backProc = multiprocessing.Process(target=testFun, args=(), daemon=True)
backProc.start()
return 'started: ' + str(backProc.pid)
if __name__ == '__main__':
app.run()
| wikomega/wikodemo | test.py | Python | mit | 1,073 |
'''
Created by auto_sdk on 2014-12-17 17:22:51
'''
from top.api.base import RestApi
class SubusersGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.user_nick = None
def getapiname(self):
return 'taobao.subusers.get'
| CooperLuan/devops.notes | taobao/top/api/rest/SubusersGetRequest.py | Python | mit | 303 |
# coding: utf-8
import unittest
from config_reader import ConfigReader
class TestConfigReader(unittest.TestCase):
def setUp(self):
self.config = ConfigReader("""
<root>
<person>
<name>ๅฑฑ็ฐ</name>
<age>15</age>
</person>
<person>
<name>ไฝ่ค</name>
<age>43</age>
</person>
</root>
""")
def test_get_names(self):
self.assertEqual(self.config.get_names(), ['ๅฑฑ็ฐ', 'ไฝ่ค'])
def test_get_ages(self):
self.assertEqual(self.config.get_ages(), ['15', '43'])
| orangain/jenkins-docker-sample | tests.py | Python | mit | 681 |
# project/server/tests/test_user.py
import datetime
import unittest
from flask_login import current_user
from base import BaseTestCase
from project.server import bcrypt
from project.server.models import User
from project.server.user.forms import LoginForm
class TestUserBlueprint(BaseTestCase):
def test_correct_login(self):
# Ensure login behaves correctly with correct credentials.
with self.client:
response = self.client.post(
"/login",
data=dict(email="[email protected]", password="admin_user"),
follow_redirects=True,
)
self.assertIn(b"Welcome", response.data)
self.assertIn(b"Logout", response.data)
self.assertIn(b"Members", response.data)
self.assertTrue(current_user.email == "[email protected]")
self.assertTrue(current_user.is_active())
self.assertEqual(response.status_code, 200)
def test_logout_behaves_correctly(self):
# Ensure logout behaves correctly - regarding the session.
with self.client:
self.client.post(
"/login",
data=dict(email="[email protected]", password="admin_user"),
follow_redirects=True,
)
response = self.client.get("/logout", follow_redirects=True)
self.assertIn(b"You were logged out. Bye!", response.data)
self.assertFalse(current_user.is_active)
def test_logout_route_requires_login(self):
# Ensure logout route requres logged in user.
response = self.client.get("/logout", follow_redirects=True)
self.assertIn(b"Please log in to access this page", response.data)
def test_member_route_requires_login(self):
# Ensure member route requres logged in user.
response = self.client.get("/members", follow_redirects=True)
self.assertIn(b"Please log in to access this page", response.data)
def test_validate_success_login_form(self):
# Ensure correct data validates.
form = LoginForm(email="[email protected]", password="admin_user")
self.assertTrue(form.validate())
def test_validate_invalid_email_format(self):
# Ensure invalid email format throws error.
form = LoginForm(email="unknown", password="example")
self.assertFalse(form.validate())
def test_get_by_id(self):
# Ensure id is correct for the current/logged in user.
with self.client:
self.client.post(
"/login",
data=dict(email="[email protected]", password="admin_user"),
follow_redirects=True,
)
self.assertTrue(current_user.id == 1)
def test_registered_on_defaults_to_datetime(self):
# Ensure that registered_on is a datetime.
with self.client:
self.client.post(
"/login",
data=dict(email="[email protected]", password="admin_user"),
follow_redirects=True,
)
user = User.query.filter_by(email="[email protected]").first()
self.assertIsInstance(user.registered_on, datetime.datetime)
def test_check_password(self):
# Ensure given password is correct after unhashing.
user = User.query.filter_by(email="[email protected]").first()
self.assertTrue(
bcrypt.check_password_hash(user.password, "admin_user")
)
self.assertFalse(bcrypt.check_password_hash(user.password, "foobar"))
def test_validate_invalid_password(self):
# Ensure user can't login when the pasword is incorrect.
with self.client:
response = self.client.post(
"/login",
data=dict(email="[email protected]", password="foo_bar"),
follow_redirects=True,
)
self.assertIn(b"Invalid email and/or password.", response.data)
def test_register_route(self):
# Ensure about route behaves correctly.
response = self.client.get("/register", follow_redirects=True)
self.assertIn(b"<h1>Register</h1>\n", response.data)
def test_user_registration(self):
# Ensure registration behaves correctlys.
with self.client:
response = self.client.post(
"/register",
data=dict(
email="[email protected]",
password="testing",
confirm="testing",
),
follow_redirects=True,
)
self.assertIn(b"Welcome", response.data)
self.assertTrue(current_user.email == "[email protected]")
self.assertTrue(current_user.is_active())
self.assertEqual(response.status_code, 200)
if __name__ == "__main__":
unittest.main()
| realpython/flask-skeleton | {{cookiecutter.app_slug}}/project/tests/test_user.py | Python | mit | 4,811 |
inside = lambda x, y: 4*x*x+y*y <= 100
def coll(sx, sy, dx, dy):
m = 0
for p in range(32):
m2 = m + 2**(-p)
if inside(sx + dx * m2, sy + dy * m2): m = m2
return (sx + dx*m, sy + dy*m)
def norm(x, y):
l = (x*x + y*y)**0.5
return (x/l, y/l)
sx, sy = 0, 10.1
dx, dy = 1.4, -19.7
for I in range(999):
sx, sy = coll(sx, sy, dx, dy)
if sy > 0 and abs(sx) <= 0.01:
print(I)
break
mx, my = norm(1, -4*sx/sy)
d = mx*dx + my*dy
dx, dy = -dx + 2 * mx * d, -dy + 2 * my * d
| jokkebk/euler | p144.py | Python | mit | 538 |
import sys
MAX_NUM_STORED_LINES = 200
MAX_NUM_LINES = 10
LINEWIDTH = 80
class CmdText(object):
"""
Represents a command line text device. Text is split into lines
corresponding to the linewidth of the device.
"""
def __init__(self):
"""
Construct empty object.
"""
self.num_lines = 0
self.remaining_lines = MAX_NUM_LINES
self.lines = []
def insert(self, string):
"""
Insert string at the end. This always begins a new line.
"""
if (self.num_lines >= MAX_NUM_LINES):
pass
input_num_lines = num_lines(string)
#if (input_num_lines > self.remaining_lines):
# num = self.remaining_lines
#else:
# num = input_num_lines
num = input_num_lines
new_lines = get_lines(string)
self.lines += new_lines[-num:]
self.update_num_lines()
def merge_after(self, obj):
"""
Merge with another CmdText object by appending the input objects content.
"""
self.lines
def strip_lines(self):
"""
Remove excessive number of lines. This deletes the oldest half.
"""
if (self.num_lines > MAX_NUM_STORED_LINES):
for i in range(MAX_NUM_STORED_LINES // 2):
self.lines.pop(i)
def update_num_lines(self):
"""
Update the number of lines member.
"""
self.num_lines = len(self.lines)
def get_line(self, n):
"""
Return the line with index n.
"""
if n < self.num_lines:
return self.lines[n]
else:
raise IndexError("Line index out of range.")
def print_screen(self):
"""
Return MAX_NUM_LINES lines.
"""
return self.lines[-MAX_NUM_LINES:]
def __iter__(self):
"""
Iterator for CmdText object.
"""
for l in self.lines:
yield l
def __getitem__(self, ind):
return self.lines[ind]
def num_lines(string):
"""
Return number of lines.
"""
line_list = string.split("\n")
num = len(line_list)
for l in line_list:
num += (len(string) // LINEWIDTH + 1)
return num
def get_lines(string):
"""
Return list of lines extracted from string.
"""
line_list = string.split('\n')
new_list = []
for l in line_list:
new_list += [l[i*LINEWIDTH:(i+1)*LINEWIDTH] for i in range(len(l) // LINEWIDTH + 1)]
return new_list
class Command(CmdText):
def __init__(self, string, rind=None):
CmdText.__init__(self)
self.insert(string)
if (rind is not None):
self.response = rind
class Response(CmdText):
def __init__(self, string, cind=None):
CmdText.__init__(self)
self.insert(string)
if (cind is not None):
self.command = cind
class TestCase(object):
"""
Base class for tests.
"""
@classmethod
def run(cls):
"""
Runs all tests (methods which begin with 'test').
"""
#print(cls)
max_len = max([len(a) for a in cls.__dict__])
for key in cls.__dict__:
if key.startswith("test"):
fill = max_len - len(key)
sys.stdout.write("Testing {} ...{} ".format(key, '.'*fill))
try:
cls.__dict__[key]()
except:
raise
else:
print("Test passed!")
print("All tests passed!")
class StaticTest(TestCase):
"""
Tests for static methods.
"""
def test_get_lines_with_empty_string():
assert get_lines("") == [""]
def test_get_lines_with_short_string():
assert len(get_lines("a"*(LINEWIDTH-1))) == 1
def test_get_lines_with_long_string():
assert len(get_lines("a"*(2*LINEWIDTH-1))) == 2
def test_get_lines_with_very_long_string():
assert len(get_lines("a"*(4*LINEWIDTH-1))) == 4
def test_get_lines_with_long_text_string():
text = "This is a test string, which should simulate real text. The command should" \
+ " correctly split this text into two lines."
LINEWIDTH = 80
correct_lines = [text[:LINEWIDTH], text[LINEWIDTH:]]
assert len(get_lines(text)) == len(text) // LINEWIDTH + 1
assert get_lines(text) == correct_lines
class CmdTextTest(object):
"""
Tests for CmdText class methods.
"""
pass | t-mertz/slurmCompanion | django-web/webcmd/cmdtext.py | Python | mit | 4,609 |
"""
Tests for a door card.
"""
import pytest
from onirim import card
from onirim import component
from onirim import core
from onirim import agent
class DoorActor(agent.Actor):
"""
"""
def __init__(self, do_open):
self._do_open = do_open
def open_door(self, content, door_card):
return self._do_open
DRAWN_CAN_NOT_OPEN = (
card.Color.red,
False,
component.Content(
undrawn_cards=[],
hand=[card.key(card.Color.blue)]),
component.Content(
undrawn_cards=[],
hand=[card.key(card.Color.blue)],
limbo=[card.door(card.Color.red)]),
)
DRAWN_DO_NOT_OPEN = (
card.Color.red,
False,
component.Content(
undrawn_cards=[],
hand=[card.key(card.Color.red)]),
component.Content(
undrawn_cards=[],
hand=[card.key(card.Color.red)],
limbo=[card.door(card.Color.red)]),
)
DRAWN_DO_OPEN = (
card.Color.red,
True,
component.Content(
undrawn_cards=[],
hand=[
card.key(card.Color.red),
card.key(card.Color.red),
card.key(card.Color.red),
]),
component.Content(
undrawn_cards=[],
discarded=[card.key(card.Color.red)],
hand=[card.key(card.Color.red), card.key(card.Color.red)],
opened=[card.door(card.Color.red)]),
)
DRAWN_DO_OPEN_2 = (
card.Color.red,
True,
component.Content(
undrawn_cards=[],
hand=[
card.key(card.Color.blue),
card.key(card.Color.red),
]),
component.Content(
undrawn_cards=[],
discarded=[card.key(card.Color.red)],
hand=[card.key(card.Color.blue)],
opened=[card.door(card.Color.red)]),
)
DRAWN_CASES = [
DRAWN_CAN_NOT_OPEN,
DRAWN_DO_NOT_OPEN,
DRAWN_DO_OPEN,
DRAWN_DO_OPEN_2,
]
@pytest.mark.parametrize(
"color, do_open, content, content_after",
DRAWN_CASES)
def test_drawn(color, do_open, content, content_after):
door_card = card.door(color)
door_card.drawn(core.Core(DoorActor(do_open), agent.Observer(), content))
assert content == content_after
| cwahbong/onirim-py | tests/test_door.py | Python | mit | 2,159 |
### This script fetches level-1 PACS imaging data, using a list generated by the
### archive (in the CSV format), attaches sky coordinates and masks to them
### (by calling the convertL1ToScanam task) and save them to disk in the correct
### format for later use by Scanamorphos.
### See important instructions below.
#######################################################
### This script is part of the Scanamorphos package.
### HCSS is free software: you can redistribute it and/or modify
### it under the terms of the GNU Lesser General Public License as
### published by the Free Software Foundation, either version 3 of
### the License, or (at your option) any later version.
#######################################################
## Import classes and definitions:
import os
from herschel.pacs.spg.phot import ConvertL1ToScanamTask
#######################################################
## local settings:
dir_root = "/pcdisk/stark/aribas/Desktop/modeling_TDs/remaps_Cha/PACS/scanamorphos/"
path = dir_root +"L1/"
### number of observations:
n_obs = 2
#######################################################
## Do a multiple target search in the archive and use the "save all results as CSV" option.
## --> ascii table 'results.csv' where lines can be edited
## (suppress unwanted observations and correct target names)
## Create the directories contained in the dir_out variables (l. 57)
## before running this script.
#######################################################
## observations:
table_obs = asciiTableReader(file=dir_root+'results_fast.csv', tableType='CSV', skipRows=1)
list_obsids = table_obs[0].data
list_names = table_obs[1].data
for i_obs in range(n_obs):
##
num_obsid = list_obsids[i_obs]
source = list_names[i_obs]
source = str.lower(str(source))
dir_out = path+source+"_processed_obsids"
# create directory if it does not exist
if not(os.path.exists(dir_out)):
os.system('mkdir '+dir_out)
##
print ""
print "Downloading obsid " + `num_obsid`
obs = getObservation(num_obsid, useHsa=True, instrument="PACS", verbose=True)
###
frames = obs.level1.refs["HPPAVGR"].product.refs[0].product
convertL1ToScanam(frames, cancelGlitch=1, assignRaDec=1, outDir=dir_out)
###
frames = obs.level1.refs["HPPAVGB"].product.refs[0].product
convertL1ToScanam(frames, cancelGlitch=1, assignRaDec=1, outDir=dir_out)
### END OF SCRIPT
#######################################################
| alvaroribas/modeling_TDs | Herschel_mapmaking/scanamorphos/PACS/general_script_L1_PACS.py | Python | mit | 2,499 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from conans.model import Generator
from conans.client.generators import VisualStudioGenerator
from xml.dom import minidom
from conans.util.files import load
class VisualStudioMultiGenerator(Generator):
template = """<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ImportGroup Label="PropertySheets" >
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup />
<ItemDefinitionGroup />
<ItemGroup />
</Project>
"""
@property
def filename(self):
pass
@property
def content(self):
configuration = str(self.conanfile.settings.build_type)
platform = {'x86': 'Win32', 'x86_64': 'x64'}.get(str(self.conanfile.settings.arch))
vsversion = str(self.settings.compiler.version)
# there is also ClCompile.RuntimeLibrary, but it's handling is a bit complicated, so skipping for now
condition = " '$(Configuration)' == '%s' And '$(Platform)' == '%s' And '$(VisualStudioVersion)' == '%s' "\
% (configuration, platform, vsversion + '.0')
name_multi = 'conanbuildinfo_multi.props'
name_current = ('conanbuildinfo_%s_%s_%s.props' % (configuration, platform, vsversion)).lower()
multi_path = os.path.join(self.output_path, name_multi)
if os.path.isfile(multi_path):
content_multi = load(multi_path)
else:
content_multi = self.template
dom = minidom.parseString(content_multi)
import_node = dom.createElement('Import')
import_node.setAttribute('Condition', condition)
import_node.setAttribute('Project', name_current)
import_group = dom.getElementsByTagName('ImportGroup')[0]
children = import_group.getElementsByTagName("Import")
for node in children:
if name_current == node.getAttribute("Project") and condition == node.getAttribute("Condition"):
break
else:
import_group.appendChild(import_node)
content_multi = dom.toprettyxml()
content_multi = "\n".join(line for line in content_multi.splitlines() if line.strip())
vs_generator = VisualStudioGenerator(self.conanfile)
content_current = vs_generator.content
return {name_multi: content_multi, name_current: content_current}
| lasote/conan | conans/client/generators/visualstudio_multi.py | Python | mit | 2,436 |
# Scrapy settings for helloscrapy project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'helloscrapy'
SPIDER_MODULES = ['helloscrapy.spiders']
NEWSPIDER_MODULE = 'helloscrapy.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'helloscrapy (+http://www.yourdomain.com)'
DOWNLOAD_DELAY = 3
ROBOTSTXT_OBEY = True
| orangain/helloscrapy | helloscrapy/settings.py | Python | mit | 525 |
"""
Django settings for djangoApp project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r&j)3lay4i$rm44n%h)bsv_q(9ysqhl@7@aibjm2b=1)0fag9n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangoApp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangoApp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| reggieroby/devpack | frameworks/djangoApp/djangoApp/settings.py | Python | mit | 3,105 |
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['HERTZ_SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ['HERTZ_DEBUG'] != 'False'
ALLOWED_HOSTS = ['*' if DEBUG else os.environ['HERTZ_HOST']]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'widget_tweaks',
'attendance',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hertz.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hertz.wsgi.application'
# Database
if 'DATABASE_HOST' in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': os.environ['POSTGRES_USER'],
'PASSWORD': os.environ['POSTGRES_PASSWORD'],
'HOST': os.environ['DATABASE_HOST'],
'PORT': 5432,
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# STATICFILES_DIRS = [
# os.path.join(BASE_DIR, 'static'),
# ]
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/login'
| seccom-ufsc/hertz | hertz/settings.py | Python | mit | 3,237 |
import re
import warnings
import ctds
from .base import TestExternalDatabase
from .compat import PY3, PY36, unicode_
class TestTdsParameter(TestExternalDatabase):
def test___doc__(self):
self.assertEqual(
ctds.Parameter.__doc__,
'''\
Parameter(value, output=False)
Explicitly define a parameter for :py:meth:`.callproc`,
:py:meth:`.execute`, or :py:meth:`.executemany`. This is necessary
to indicate whether a parameter is *SQL* `OUTPUT` or `INPUT/OUTPUT`
parameter.
:param object value: The parameter's value.
:param bool output: Is the parameter an output parameter.
'''
)
def test_parameter(self):
param1 = ctds.Parameter(b'123', output=True)
self.assertEqual(param1.value, b'123')
self.assertTrue(isinstance(param1, ctds.Parameter))
param2 = ctds.Parameter(b'123')
self.assertEqual(param1.value, b'123')
self.assertEqual(type(param1), type(param2))
self.assertTrue(isinstance(param2, ctds.Parameter))
def test___repr__(self):
for parameter, expected in (
(
ctds.Parameter(b'123', output=True),
"ctds.Parameter(b'123', output=True)" if PY3 else "ctds.Parameter('123', output=True)"
),
(
ctds.Parameter(unicode_('123'), output=False),
"ctds.Parameter('123')" if PY3 else "ctds.Parameter(u'123')"
),
(
ctds.Parameter(None),
"ctds.Parameter(None)"
),
(
ctds.Parameter(ctds.SqlVarBinary(b'4321', size=10)),
"ctds.Parameter(ctds.SqlVarBinary(b'4321', size=10))"
if PY3 else
"ctds.Parameter(ctds.SqlVarBinary('4321', size=10))"
)
):
self.assertEqual(repr(parameter), expected)
def _test__cmp__(self, __cmp__, expected, oper):
cases = (
(ctds.Parameter(b'1234'), ctds.Parameter(b'123')),
(ctds.Parameter(b'123'), ctds.Parameter(b'123')),
(ctds.Parameter(b'123'), ctds.Parameter(b'123', output=True)),
(ctds.Parameter(b'123'), ctds.Parameter(b'1234')),
(ctds.Parameter(b'123'), b'123'),
(ctds.Parameter(b'123'), ctds.Parameter(123)),
(ctds.Parameter(b'123'), unicode_('123')),
(ctds.Parameter(b'123'), ctds.SqlBinary(None)),
(ctds.Parameter(b'123'), 123),
(ctds.Parameter(b'123'), None),
)
for index, args in enumerate(cases):
operation = '[{0}]: {1} {2} {3}'.format(index, repr(args[0]), oper, repr(args[1]))
if expected[index] == TypeError:
try:
__cmp__(*args)
except TypeError as ex:
regex = (
r"'{0}' not supported between instances of '[^']+' and '[^']+'".format(oper)
if not PY3 or PY36
else
r'unorderable types: \S+ {0} \S+'.format(oper)
)
self.assertTrue(re.match(regex, str(ex)), ex)
else:
self.fail('{0} did not fail as expected'.format(operation)) # pragma: nocover
else:
self.assertEqual(__cmp__(*args), expected[index], operation)
def test___cmp__eq(self):
self._test__cmp__(
lambda left, right: left == right,
(
False,
True,
True,
False,
True,
False,
not PY3,
False,
False,
False,
),
'=='
)
def test___cmp__ne(self):
self._test__cmp__(
lambda left, right: left != right,
(
True,
False,
False,
True,
False,
True,
PY3,
True,
True,
True,
),
'!='
)
def test___cmp__lt(self):
self._test__cmp__(
lambda left, right: left < right,
(
False,
False,
False,
True,
False,
TypeError if PY3 else False,
TypeError if PY3 else False,
TypeError if PY3 else False,
TypeError if PY3 else False,
TypeError if PY3 else False,
),
'<'
)
def test___cmp__le(self):
self._test__cmp__(
lambda left, right: left <= right,
(
False,
True,
True,
True,
True,
TypeError if PY3 else False,
TypeError if PY3 else True,
TypeError if PY3 else False,
TypeError if PY3 else False,
TypeError if PY3 else False,
),
'<='
)
def test___cmp__gt(self):
self._test__cmp__(
lambda left, right: left > right,
(
True,
False,
False,
False,
False,
TypeError if PY3 else True,
TypeError if PY3 else False,
TypeError if PY3 else True,
TypeError if PY3 else True,
TypeError if PY3 else True,
),
'>'
)
def test___cmp__ge(self):
self._test__cmp__(
lambda left, right: left >= right,
(
True,
True,
True,
False,
True,
TypeError if PY3 else True,
TypeError if PY3 else True,
TypeError if PY3 else True,
TypeError if PY3 else True,
TypeError if PY3 else True,
),
'>='
)
def test_typeerror(self):
for case in (None, object(), 123, 'foobar'):
self.assertRaises(TypeError, ctds.Parameter, case, b'123')
self.assertRaises(TypeError, ctds.Parameter)
self.assertRaises(TypeError, ctds.Parameter, output=False)
for case in (None, object(), 123, 'foobar'):
self.assertRaises(TypeError, ctds.Parameter, b'123', output=case)
def test_reuse(self):
with self.connect() as connection:
with connection.cursor() as cursor:
for value in (
None,
123456,
unicode_('hello world'),
b'some bytes',
):
for output in (True, False):
parameter = ctds.Parameter(value, output=output)
for _ in range(0, 2):
# Ignore warnings generated due to output parameters
# used with result sets.
with warnings.catch_warnings(record=True):
cursor.execute(
'''
SELECT :0
''',
(parameter,)
)
self.assertEqual(
[tuple(row) for row in cursor.fetchall()],
[(value,)]
)
| zillow/ctds | tests/test_tds_parameter.py | Python | mit | 7,779 |
import copy
import pytest
from peek.line import InvalidIpAddressException, Line, InvalidStatusException
# 127.0.0.1 - - [01/Jan/1970:00:00:01 +0000] "GET / HTTP/1.1" 200 193 "-" "Python"
test_line_contents = {
'ip_address': '127.0.0.1',
'timestamp': '[01/Jan/1970:00:00:01 +0000]',
'verb': 'GET',
'path': '/',
'status': '200',
'size': '193',
'referrer': '-',
'user_agent': 'Python'
}
def get_updated_line_contents(updates=None):
test_contents = copy.deepcopy(test_line_contents)
if updates is not None:
test_contents.update(updates)
return test_contents
test_line = Line(line_contents=test_line_contents)
class TestLineInstantiation:
@pytest.mark.parametrize('expected,actual', [
('127.0.0.1', test_line.ip_address),
(1, test_line.timestamp),
('GET', test_line.verb),
('/', test_line.path),
(200, test_line.status),
(193, test_line.byte_count),
('-', test_line.referrer),
('Python', test_line.user_agent)
])
def test_retrieval(self, expected, actual):
assert expected == actual
class TestLineExceptions:
def test_passing_invalid_ip_address_throws_exception(self):
with pytest.raises(InvalidIpAddressException):
line = Line(line_contents=get_updated_line_contents({'ip_address': 'foobar'}))
def test_passing_non_parseable_status_throws_exception(self):
with pytest.raises(InvalidStatusException):
Line(line_contents=get_updated_line_contents({'status': 'foobar'}))
| purrcat259/peek | tests/unit/test_line.py | Python | mit | 1,585 |
import logging
import requests
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
from django.utils import timezone
from invitations.models import Invitation
logger = logging.getLogger('email')
sentry = logging.getLogger('sentry')
def send_invite(message):
try:
invite = Invitation.objects.get(
id=message.get('id'),
status__in=[Invitation.PENDING, Invitation.ERROR],
)
except Invitation.DoesNotExist:
sentry.error("Invitation to send not found", exc_info=True, extra={'message': message})
return
invite.status = Invitation.PROCESSING
invite.save()
context = {
'invite': invite,
'domain': Site.objects.get_current().domain,
}
subject = "[ContactOtter] Invitation to join ContactOtter from %s" % (invite.sender)
if invite.book:
subject = "[ContactOtter] Invitation to share %s's contact book" % (invite.sender)
txt = get_template('email/invitation.txt').render(context)
html = get_template('email/invitation.html').render(context)
try:
message = EmailMultiAlternatives(
subject=subject,
body=txt,
from_email="ContactOtter <[email protected]>",
to=[invite.email,],
)
message.attach_alternative(html, "text/html")
message.send()
invite.status = Invitation.SENT
invite.sent = timezone.now()
invite.save()
except:
sentry.exception('Problem sending invite', exc_info=True, extra={'invite_id': invite.id})
invite.status = Invitation.ERROR
invite.save()
| phildini/logtacts | invitations/consumers.py | Python | mit | 1,739 |
def burrows_wheeler(text):
"""Calculates the burrows wheeler transform of <text>.
returns the burrows wheeler string and the suffix array indices
The text is assumed to not contain the character $"""
text += "$"
all_permutations = []
for i in range(len(text)):
all_permutations.append((text[i:] + text[:i],i))
all_permutations.sort()
bw_l = [] # burrows wheeler as list
sa_i = [] # suffix array indices
for w,j in all_permutations:
bw_l.append(w[-1])
sa_i.append(j)
return "".join(bw_l), sa_i
| alneberg/sillymap | sillymap/burrows_wheeler.py | Python | mit | 567 |
#!/usr/bin/env python
# coding: utf-8
import os,sys
import ctypes
import numpy as np
from .hmatrix import _C_HMatrix, HMatrix
class _C_MultiHMatrix(ctypes.Structure):
"""Holder for the raw data from the C++ code."""
pass
class AbstractMultiHMatrix:
"""Common code for the two actual MultiHMatrix classes below."""
ndim = 2 # To mimic a numpy 2D array
def __init__(self, c_data: _C_MultiHMatrix, **params):
# Users should use one of the two constructors below.
self.c_data = c_data
self.shape = (self.lib.multi_nbrows(c_data), self.lib.multi_nbcols(c_data))
self.size = self.lib.nbhmats(c_data)
self.lib.getHMatrix.restype=ctypes.POINTER(_C_HMatrix)
self.lib.getHMatrix.argtypes=[ctypes.POINTER(_C_MultiHMatrix), ctypes.c_int]
self.hmatrices = []
for l in range(0,self.size):
c_data_hmatrix = self.lib.getHMatrix(self.c_data,l)
self.hmatrices.append(HMatrix(c_data_hmatrix,**params))
self.params = params.copy()
@classmethod
def from_coefs(cls, getcoefs, nm, points_target, points_source=None, **params):
"""Construct an instance of the class from a evaluation function.
Parameters
----------
getcoefs: Callable
A function evaluating an array of matrices at given coordinates.
points_target: np.ndarray of shape (N, 3)
The coordinates of the target points. If points_source=None, also the coordinates of the target points
points_source: np.ndarray of shape (N, 3)
If not None; the coordinates of the source points.
epsilon: float, keyword-only, optional
Tolerance of the Adaptive Cross Approximation
eta: float, keyword-only, optional
Criterion to choose the blocks to compress
minclustersize: int, keyword-only, optional
Minimum shape of a block
maxblocksize: int, keyword-only, optional
Maximum number of coefficients in a block
Returns
-------
MultiHMatrix or ComplexMultiHMatrix
"""
# Set params.
cls._set_building_params(**params)
# Boilerplate code for Python/C++ interface.
_getcoefs_func_type = ctypes.CFUNCTYPE(None, ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_double))
if points_source is None:
cls.lib.MultiHMatrixCreateSym.restype = ctypes.POINTER(_C_MultiHMatrix)
cls.lib.MultiHMatrixCreateSym.argtypes = [
np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),
ctypes.c_int,
_getcoefs_func_type,
ctypes.c_int
]
# Call the C++ backend.
c_data = cls.lib.MultiHMatrixCreateSym(points_target, points_target.shape[0], _getcoefs_func_type(getcoefs),nm)
else:
cls.lib.MultiHMatrixCreate.restype = ctypes.POINTER(_C_MultiHMatrix)
cls.lib.MultiHMatrixCreate.argtypes = [
np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),
ctypes.c_int,
np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),
ctypes.c_int,
_getcoefs_func_type,
ctypes.c_int
]
# Call the C++ backend.
c_data = cls.lib.MultiHMatrixCreate(points_target,points_target.shape[0],points_source, points_source.shape[0], _getcoefs_func_type(getcoefs),nm)
return cls(c_data, **params)
@classmethod
def from_submatrices(cls, getsubmatrix, nm, points_target, points_source=None, **params):
"""Construct an instance of the class from a evaluation function.
Parameters
----------
points: np.ndarray of shape (N, 3)
The coordinates of the points.
getsubmatrix: Callable
A function evaluating the matrix in a given range.
epsilon: float, keyword-only, optional
Tolerance of the Adaptive Cross Approximation
eta: float, keyword-only, optional
Criterion to choose the blocks to compress
minclustersize: int, keyword-only, optional
Minimum shape of a block
maxblocksize: int, keyword-only, optional
Maximum number of coefficients in a block
Returns
-------
HMatrix or ComplexHMatrix
"""
# Set params.
cls._set_building_params(**params)
# Boilerplate code for Python/C++ interface.
_getsumatrix_func_type = ctypes.CFUNCTYPE(
None, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int),
ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_double)
)
if points_source is None:
cls.lib.MultiHMatrixCreatewithsubmatSym.restype = ctypes.POINTER(_C_MultiHMatrix)
cls.lib.MultiHMatrixCreatewithsubmatSym.argtypes = [
np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),
ctypes.c_int,
_getsumatrix_func_type,
ctypes.c_int
]
# Call the C++ backend.
c_data = cls.lib.MultiHMatrixCreatewithsubmatSym(points_target, points_target.shape[0], _getsumatrix_func_type(getsubmatrix),nm)
else:
cls.lib.MultiHMatrixCreatewithsubmat.restype = ctypes.POINTER(_C_MultiHMatrix)
cls.lib.MultiHMatrixCreatewithsubmat.argtypes = [
np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),
ctypes.c_int,
np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),
ctypes.c_int,
_getsumatrix_func_type,
ctypes.c_int
]
# Call the C++ backend.
c_data = cls.lib.MultiHMatrixCreatewithsubmat(points_target,points_target.shape[0],points_source, points_source.shape[0], _getsumatrix_func_type(getsubmatrix),nm)
return cls(c_data, **params)
@classmethod
def _set_building_params(cls, *, eta=None, minclustersize=None, epsilon=None, maxblocksize=None):
"""Put the parameters in the C++ backend."""
if epsilon is not None:
cls.lib.setepsilon.restype = None
cls.lib.setepsilon.argtypes = [ ctypes.c_double ]
cls.lib.setepsilon(epsilon)
if eta is not None:
cls.lib.seteta.restype = None
cls.lib.seteta.argtypes = [ ctypes.c_double ]
cls.lib.seteta(eta)
if minclustersize is not None:
cls.lib.setminclustersize.restype = None
cls.lib.setminclustersize.argtypes = [ ctypes.c_int ]
cls.lib.setminclustersize(minclustersize)
if maxblocksize is not None:
cls.lib.setmaxblocksize.restype = None
cls.lib.setmaxblocksize.argtypes = [ ctypes.c_int ]
cls.lib.setmaxblocksize(maxblocksize)
def __str__(self):
return f"{self.__class__.__name__}(shape={self.shape})"
def __getitem__(self, key):
# self.lib.getHMatrix.restype=ctypes.POINTER(_C_HMatrix)
# self.lib.getHMatrix.argtypes=[ctypes.POINTER(_C_MultiHMatrix), ctypes.c_int]
# c_data_hmatrix = self.lib.getHMatrix(self.c_data,key)
# return HMatrix(c_data_hmatrix,**self.params)
return self.hmatrices[key]
def matvec(self, l , vector):
"""Matrix-vector product (interface for scipy iterative solvers)."""
assert self.shape[1] == vector.shape[0], "Matrix-vector product of matrices of wrong shapes."
# Boilerplate for Python/C++ interface
self.lib.MultiHMatrixVecProd.argtypes = [
ctypes.POINTER(_C_MultiHMatrix),
ctypes.c_int,
np.ctypeslib.ndpointer(self.dtype, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(self.dtype, flags='C_CONTIGUOUS')
]
# Initialize vector
result = np.zeros((self.shape[0],), dtype=self.dtype)
# Call C++ backend
self.lib.MultiHMatrixVecProd(self.c_data,l , vector, result)
return result
class MultiHMatrix(AbstractMultiHMatrix):
"""A real-valued hierarchical matrix based on htool C++ library.
Create with HMatrix.from_coefs or HMatrix.from_submatrices.
Attributes
----------
c_data:
Pointer to the raw data used by the C++ library.
shape: Tuple[int, int]
Shape of the matrix.
nb_dense_blocks: int
Number of dense blocks in the hierarchical matrix.
nb_low_rank_blocks: int
Number of sparse blocks in the hierarchical matrix.
nb_blocks: int
Total number of blocks in the decomposition.
params: dict
The parameters that have been used to build the matrix.
"""
libfile = os.path.join(os.path.dirname(__file__), '../libhtool_shared')
if 'linux' in sys.platform:
lib = ctypes.cdll.LoadLibrary(libfile+'.so')
elif sys.platform == 'darwin':
lib = ctypes.cdll.LoadLibrary(libfile+'.dylib')
elif sys.platform == 'win32':
lib = ctypes.cdll.LoadLibrary(libfile+'.dll')
dtype = ctypes.c_double
class ComplexMultiHMatrix(AbstractMultiHMatrix):
"""A complex-valued hierarchical matrix based on htool C++ library.
Create with ComplexHMatrix.from_coefs or ComplexHMatrix.from_submatrices.
Attributes
----------
c_data:
Pointer to the raw data used by the C++ library.
shape: Tuple[int, int]
Shape of the matrix.
nb_dense_blocks: int
Number of dense blocks in the hierarchical matrix.
nb_low_rank_blocks: int
Number of sparse blocks in the hierarchical matrix.
nb_blocks: int
Total number of blocks in the decomposition.
params: dict
The parameters that have been used to build the matrix.
"""
libfile = os.path.join(os.path.dirname(__file__), '../libhtool_shared_complex')
if 'linux' in sys.platform:
lib = ctypes.cdll.LoadLibrary(libfile+'.so')
elif sys.platform == 'darwin':
lib = ctypes.cdll.LoadLibrary(libfile+'.dylib')
elif sys.platform == 'win32':
lib = ctypes.cdll.LoadLibrary(libfile+'.dll')
dtype = np.complex128
| PierreMarchand20/htool | interface/htool/multihmatrix.py | Python | mit | 10,354 |
import primes as py
def lcm(a, b):
return a * b / gcd(a, b)
def gcd(a, b):
while b != 0:
(a, b) = (b, a % b)
return a
# Returns two integers x, y such that gcd(a, b) = ax + by
def egcd(a, b):
if a == 0:
return (0, 1)
else:
y, x = egcd(b % a, a)
return (x - (b // a) * y, y)
# Returns an integer x such that ax = 1(mod m)
def modInverse(a, m):
x, y = egcd(a, m)
if gcd(a, m) == 1:
return x % m
# Reduces linear congruence to form x = b(mod m)
def reduceCongr(a, b, m):
gcdAB = gcd(a, b)
a /= gcdAB
b /= gcdAB
m /= gcd(gcdAB, m)
modinv = modInverse(a, m)
b *= modinv
return (1, b, m)
# Returns the incongruent solutions to the linear congruence ax = b(mod m)
def linCongr(a, b, m):
solutions = set()
if (b % gcd(a, m) == 0):
numSols = gcd(a, m)
sol = (b * egcd(a, m)[0] / numSols) % m
for i in xrange(0, numSols):
solutions.add((sol + m * i / numSols) % m)
return solutions
# Uses the Chinese Remainder Theorem to solve a system of linear congruences
def crt(congruences):
x = 0
M = 1
for i in xrange(len(congruences)):
M *= congruences[i][2]
congruences[i] = reduceCongr(congruences[i][0], congruences[i][1], congruences[i][2])
for j in xrange(len(congruences)):
m = congruences[j][2]
if gcd(m, M/m) != 1:
return None
x += congruences[j][1] * modInverse(M/m, m) * M / m
return x % M
# Returns the incongruent solution to any system of linear congruences
def linCongrSystem(congruences):
newCongruences = []
for i in xrange(len(congruences)):
congruences[i] = reduceCongr(congruences[i][0], congruences[i][1], congruences[i][2])
# Tests to see whether the system is solvable
for j in xrange(len(congruences)):
if congruences[i] != congruences[j]:
if (congruences[i][1] - congruences[j][1]) % gcd(congruences[i][2], congruences[j][2]) != 0:
return None
# Splits moduli into prime powers
pFactor = py.primeFactorization(congruences[i][2])
for term in pFactor:
newCongruences.append((1, congruences[i][1], term[0] ** term[1]))
# Discards redundant congruences
newCongruences = sorted(newCongruences, key=lambda x: x[2], reverse = True)
finalCongruences = []
for k in xrange(len(newCongruences)):
isRedundant = False
for l in xrange(0, k):
if newCongruences[l][2] % newCongruences[k][2] == 0:
isRedundant = True
if not isRedundant:
finalCongruences.append(newCongruences[k])
return crt(finalCongruences)
# Returns incongruents solutions to a polynomial congruence
def polyCongr(coefficients, m):
solutions = []
for i in xrange(m):
value = 0
for degree in xrange(len(coefficients)):
value += coefficients[degree] * (i ** (len(coefficients) - degree - 1))
if value % m == 0:
solutions.append(i)
return solutions
| ioguntol/NumTy | numty/congruences.py | Python | mit | 3,551 |
a = a # e 4
a = 1 # 0 int
l = [a] # 0 [int]
d = {a:l} # 0 {int:[int]}
s = "abc"
c = ord(s[2].lower()[0]) # 0 int # 4 (str) -> int
l2 = [range(i) for i in d] # 0 [[int]]
y = [(a,b) for a,b in {1:'2'}.iteritems()] # 0 [(int,str)]
b = 1 # 0 int
if 0:
b = '' # 4 str
else:
b = str(b) # 4 str # 12 int
r = 0 # 0 int
if r: # 3 int
r = str(r) # 4 str # 12 int
r # 0 <int|str>
l = range(5) # 0 [int]
l2 = l[2:3] # 0 [int]
x = l2[1] # 0 int
k = 1() # 0 <unknown> # e 4
del k
k # e 0
l = [] # 0 [int]
x = 1 # 0 int
while x: # 6 int
l = [] # 4 [int]
l.append(1) # 0 [int] # 2 (int) -> None
l = [1, 2] # 0 [int]
l2 = [x for x in l] # 0 [<int|str>]
l2.append('') # 0 [<int|str>]
s = str() # 0 str
s2 = str(s) # 0 str
s3 = repr() # e 5 # 0 str
s4 = repr(s) # 0 str
x = 1 if [] else '' # 0 <int|str>
l = [1] # 0 [<int|str>]
l2 = [''] # 0 [str]
l[:] = l2 # 0 [<int|str>]
b = 1 < 2 < 3 # 0 bool
l = sorted(range(5), key=lambda x:-x) # 0 [int]
d = {} # 0 {<bool|int>:<int|str>}
d1 = {1:''} # 0 {int:str}
d.update(d1)
d[True] = 1
d # 0 {<bool|int>:<int|str>}
l = [] # 0 [int]
l1 = [] # 0 [<unknown>]
l.extend(l1)
l.append(2)
l = [] # 0 [<[str]|int>]
l1 = [[]] # 0 [[str]]
l.extend(l1)
l[0].append('') # e 0
l.append(1)
l = [] # 0 [[<int|str>]]
l2 = [1] # 0 [int]
l3 = [''] # 0 [str]
l.append(l2)
l.append(l3)
for i, s in enumerate("aoeu"): # 4 int # 7 str
pass
x = 1 # 0 int
y = x + 1.0 # 0 float
y << 1 # e 0
l = [1, 1.0] # 0 [float]
1.0 in [1] # e 0
x = `1` # 0 str
def f():
x = `1` # 4 str
d = dict(a=1) # 0 {str:int}
l = list() # 0 [<unknown>]
i = int(1) # 0 int
i = int(1.2) # 0 int
i = abs(1) # 0 int
i = abs(1.0) # 0 float
d = dict() # 0 {int:int}
d[1] = 2
d2 = dict(d) # 0 {<int|str>:<int|str>}
d2[''] = ''
d3 = dict([(1,2)]) # 0 {int:int}
d4 = dict(a=1) # 0 {str:int}
| kmod/icbd | icbd/type_analyzer/tests/basic.py | Python | mit | 1,818 |
# Copyright (c) 2016 nVentiveUX
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Application configuration"""
from django.apps import AppConfig
class ShowcaseConfig(AppConfig):
name = 'mystartupmanager.showcase'
| nVentiveUX/mystartupmanager | mystartupmanager/showcase/apps.py | Python | mit | 1,233 |
import asyncio
import email.utils
import json
import sys
from cgi import parse_header
from collections import namedtuple
from http.cookies import SimpleCookie
from urllib.parse import parse_qs, unquote, urlunparse
from httptools import parse_url
from sanic.exceptions import InvalidUsage
from sanic.log import error_logger, logger
try:
from ujson import loads as json_loads
except ImportError:
if sys.version_info[:2] == (3, 5):
def json_loads(data):
# on Python 3.5 json.loads only supports str not bytes
return json.loads(data.decode())
else:
json_loads = json.loads
DEFAULT_HTTP_CONTENT_TYPE = "application/octet-stream"
# HTTP/1.1: https://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.2.1
# > If the media type remains unknown, the recipient SHOULD treat it
# > as type "application/octet-stream"
class RequestParameters(dict):
"""Hosts a dict with lists as values where get returns the first
value of the list and getlist returns the whole shebang
"""
def get(self, name, default=None):
"""Return the first value, either the default or actual"""
return super().get(name, [default])[0]
def getlist(self, name, default=None):
"""Return the entire list"""
return super().get(name, default)
class StreamBuffer:
def __init__(self, buffer_size=100):
self._queue = asyncio.Queue(buffer_size)
async def read(self):
""" Stop reading when gets None """
payload = await self._queue.get()
self._queue.task_done()
return payload
async def put(self, payload):
await self._queue.put(payload)
def is_full(self):
return self._queue.full()
class Request(dict):
"""Properties of an HTTP request such as URL, headers, etc."""
__slots__ = (
"__weakref__",
"_cookies",
"_ip",
"_parsed_url",
"_port",
"_remote_addr",
"_socket",
"app",
"body",
"endpoint",
"headers",
"method",
"parsed_args",
"parsed_files",
"parsed_form",
"parsed_json",
"raw_url",
"stream",
"transport",
"uri_template",
"version",
)
def __init__(self, url_bytes, headers, version, method, transport):
self.raw_url = url_bytes
# TODO: Content-Encoding detection
self._parsed_url = parse_url(url_bytes)
self.app = None
self.headers = headers
self.version = version
self.method = method
self.transport = transport
# Init but do not inhale
self.body_init()
self.parsed_json = None
self.parsed_form = None
self.parsed_files = None
self.parsed_args = None
self.uri_template = None
self._cookies = None
self.stream = None
self.endpoint = None
def __repr__(self):
return "<{0}: {1} {2}>".format(
self.__class__.__name__, self.method, self.path
)
def __bool__(self):
if self.transport:
return True
return False
def body_init(self):
self.body = []
def body_push(self, data):
self.body.append(data)
def body_finish(self):
self.body = b"".join(self.body)
@property
def json(self):
if self.parsed_json is None:
self.load_json()
return self.parsed_json
def load_json(self, loads=json_loads):
try:
self.parsed_json = loads(self.body)
except Exception:
if not self.body:
return None
raise InvalidUsage("Failed when parsing body as json")
return self.parsed_json
@property
def token(self):
"""Attempt to return the auth header token.
:return: token related to request
"""
prefixes = ("Bearer", "Token")
auth_header = self.headers.get("Authorization")
if auth_header is not None:
for prefix in prefixes:
if prefix in auth_header:
return auth_header.partition(prefix)[-1].strip()
return auth_header
@property
def form(self):
if self.parsed_form is None:
self.parsed_form = RequestParameters()
self.parsed_files = RequestParameters()
content_type = self.headers.get(
"Content-Type", DEFAULT_HTTP_CONTENT_TYPE
)
content_type, parameters = parse_header(content_type)
try:
if content_type == "application/x-www-form-urlencoded":
self.parsed_form = RequestParameters(
parse_qs(self.body.decode("utf-8"))
)
elif content_type == "multipart/form-data":
# TODO: Stream this instead of reading to/from memory
boundary = parameters["boundary"].encode("utf-8")
self.parsed_form, self.parsed_files = parse_multipart_form(
self.body, boundary
)
except Exception:
error_logger.exception("Failed when parsing form")
return self.parsed_form
@property
def files(self):
if self.parsed_files is None:
self.form # compute form to get files
return self.parsed_files
@property
def args(self):
if self.parsed_args is None:
if self.query_string:
self.parsed_args = RequestParameters(
parse_qs(self.query_string)
)
else:
self.parsed_args = RequestParameters()
return self.parsed_args
@property
def raw_args(self):
return {k: v[0] for k, v in self.args.items()}
@property
def cookies(self):
if self._cookies is None:
cookie = self.headers.get("Cookie")
if cookie is not None:
cookies = SimpleCookie()
cookies.load(cookie)
self._cookies = {
name: cookie.value for name, cookie in cookies.items()
}
else:
self._cookies = {}
return self._cookies
@property
def ip(self):
if not hasattr(self, "_socket"):
self._get_address()
return self._ip
@property
def port(self):
if not hasattr(self, "_socket"):
self._get_address()
return self._port
@property
def socket(self):
if not hasattr(self, "_socket"):
self._get_address()
return self._socket
def _get_address(self):
self._socket = self.transport.get_extra_info("peername") or (
None,
None,
)
self._ip = self._socket[0]
self._port = self._socket[1]
@property
def remote_addr(self):
"""Attempt to return the original client ip based on X-Forwarded-For.
:return: original client ip.
"""
if not hasattr(self, "_remote_addr"):
forwarded_for = self.headers.get("X-Forwarded-For", "").split(",")
remote_addrs = [
addr
for addr in [addr.strip() for addr in forwarded_for]
if addr
]
if len(remote_addrs) > 0:
self._remote_addr = remote_addrs[0]
else:
self._remote_addr = ""
return self._remote_addr
@property
def scheme(self):
if (
self.app.websocket_enabled
and self.headers.get("upgrade") == "websocket"
):
scheme = "ws"
else:
scheme = "http"
if self.transport.get_extra_info("sslcontext"):
scheme += "s"
return scheme
@property
def host(self):
# it appears that httptools doesn't return the host
# so pull it from the headers
return self.headers.get("Host", "")
@property
def content_type(self):
return self.headers.get("Content-Type", DEFAULT_HTTP_CONTENT_TYPE)
@property
def match_info(self):
"""return matched info after resolving route"""
return self.app.router.get(self)[2]
@property
def path(self):
return self._parsed_url.path.decode("utf-8")
@property
def query_string(self):
if self._parsed_url.query:
return self._parsed_url.query.decode("utf-8")
else:
return ""
@property
def url(self):
return urlunparse(
(self.scheme, self.host, self.path, None, self.query_string, None)
)
File = namedtuple("File", ["type", "body", "name"])
def parse_multipart_form(body, boundary):
"""Parse a request body and returns fields and files
:param body: bytes request body
:param boundary: bytes multipart boundary
:return: fields (RequestParameters), files (RequestParameters)
"""
files = RequestParameters()
fields = RequestParameters()
form_parts = body.split(boundary)
for form_part in form_parts[1:-1]:
file_name = None
content_type = "text/plain"
content_charset = "utf-8"
field_name = None
line_index = 2
line_end_index = 0
while not line_end_index == -1:
line_end_index = form_part.find(b"\r\n", line_index)
form_line = form_part[line_index:line_end_index].decode("utf-8")
line_index = line_end_index + 2
if not form_line:
break
colon_index = form_line.index(":")
form_header_field = form_line[0:colon_index].lower()
form_header_value, form_parameters = parse_header(
form_line[colon_index + 2 :]
)
if form_header_field == "content-disposition":
field_name = form_parameters.get("name")
file_name = form_parameters.get("filename")
# non-ASCII filenames in RFC2231, "filename*" format
if file_name is None and form_parameters.get("filename*"):
encoding, _, value = email.utils.decode_rfc2231(
form_parameters["filename*"]
)
file_name = unquote(value, encoding=encoding)
elif form_header_field == "content-type":
content_type = form_header_value
content_charset = form_parameters.get("charset", "utf-8")
if field_name:
post_data = form_part[line_index:-4]
if file_name is None:
value = post_data.decode(content_charset)
if field_name in fields:
fields[field_name].append(value)
else:
fields[field_name] = [value]
else:
form_file = File(
type=content_type, name=file_name, body=post_data
)
if field_name in files:
files[field_name].append(form_file)
else:
files[field_name] = [form_file]
else:
logger.debug(
"Form-data field does not have a 'name' parameter "
"in the Content-Disposition header"
)
return fields, files
| lixxu/sanic | sanic/request.py | Python | mit | 11,420 |
from .tile import Split, Stack, TileStack
class Tile(Split):
class left(Stack):
weight = 3
priority = 0
limit = 1
class right(TileStack):
pass
class Max(Split):
class main(Stack):
tile = False
class InstantMsg(Split):
class left(TileStack): # or maybe not tiled ?
weight = 3
class roster(Stack):
limit = 1
priority = 0 # probably roster created first
class Gimp(Split):
class toolbox(Stack):
limit = 1
size = 184
class main(Stack):
weight = 4
priority = 0
class dock(Stack):
limit = 1
size = 324
| tailhook/tilenol | tilenol/layout/examples.py | Python | mit | 657 |
hmm = [
"https://media3.giphy.com/media/TPl5N4Ci49ZQY/giphy.gif",
"https://media0.giphy.com/media/l14qxlCgJ0zUk/giphy.gif",
"https://media4.giphy.com/media/MsWnkCVSXz73i/giphy.gif",
"https://media1.giphy.com/media/l2JJEIMLgrXPEbDGM/giphy.gif",
"https://media0.giphy.com/media/dgK22exekwOLm/giphy.gif"
] | garr741/mr_meeseeks | rtmbot/plugins/hmm.py | Python | mit | 322 |
from djblets.cache.backend import cache_memoize
class BugTracker(object):
"""An interface to a bug tracker.
BugTracker subclasses are used to enable interaction with different
bug trackers.
"""
def get_bug_info(self, repository, bug_id):
"""Get the information for the specified bug.
This should return a dictionary with 'summary', 'description', and
'status' keys.
This is cached for 60 seconds to reduce the number of queries to the
bug trackers and make things seem fast after the first infobox load,
but is still a short enough time to give relatively fresh data.
"""
return cache_memoize(self.make_bug_cache_key(repository, bug_id),
lambda: self.get_bug_info_uncached(repository,
bug_id),
expiration=60)
def get_bug_info_uncached(self, repository, bug_id):
"""Get the information for the specified bug (implementation).
This should be implemented by subclasses, and should return a
dictionary with 'summary', 'description', and 'status' keys.
If any of those are unsupported by the given bug tracker, the unknown
values should be given as an empty string.
"""
return {
'summary': '',
'description': '',
'status': '',
}
def make_bug_cache_key(self, repository, bug_id):
"""Returns a key to use when caching fetched bug information."""
return 'repository-%s-bug-%s' % (repository.pk, bug_id)
| reviewboard/reviewboard | reviewboard/hostingsvcs/bugtracker.py | Python | mit | 1,633 |
import sys
from stack import Stack
def parse_expression_into_parts(expression):
"""
Parse expression into list of parts
:rtype : list
:param expression: str # i.e. "2 * 3 + ( 2 - 3 )"
"""
raise NotImplementedError("complete me!")
def evaluate_expression(a, b, op):
raise NotImplementedError("complete me!")
def evaluate_postfix(parts):
raise NotImplementedError("complete me!")
if __name__ == "__main__":
expr = None
if len(sys.argv) > 1:
expr = sys.argv[1]
parts = parse_expression_into_parts(expr)
print "Evaluating %s == %s" % (expr, evaluate_postfix(parts))
else:
print 'Usage: python postfix.py "<expr>" -- i.e. python postfix.py "9 1 3 + 2 * -"'
print "Spaces are required between every term."
| tylerprete/evaluate-math | postfix.py | Python | mit | 792 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# @author victor li [email protected]
# @date 2015/10/07
import baseHandler
class MainHandler(baseHandler.RequestHandler):
def get(self):
self.redirect('/posts/last')
| lncwwn/woniu | routers/mainHandler.py | Python | mit | 224 |
from ab_tool.tests.common import (SessionTestCase, TEST_COURSE_ID,
TEST_OTHER_COURSE_ID, NONEXISTENT_TRACK_ID, NONEXISTENT_EXPERIMENT_ID,
APIReturn, LIST_MODULES)
from django.core.urlresolvers import reverse
from ab_tool.models import (Experiment, InterventionPointUrl)
from ab_tool.exceptions import (EXPERIMENT_TRACKS_ALREADY_FINALIZED,
NO_TRACKS_FOR_EXPERIMENT, UNAUTHORIZED_ACCESS,
INTERVENTION_POINTS_ARE_INSTALLED)
import json
from mock import patch
class TestExperimentPages(SessionTestCase):
""" Tests related to Experiment and Experiment pages and methods """
def test_create_experiment_view(self):
""" Tests edit_experiment template renders for url 'create_experiment' """
response = self.client.get(reverse("ab_testing_tool_create_experiment"))
self.assertOkay(response)
self.assertTemplateUsed(response, "ab_tool/edit_experiment.html")
def test_create_experiment_view_unauthorized(self):
""" Tests edit_experiment template does not render for url 'create_experiment'
when unauthorized """
self.set_roles([])
response = self.client.get(reverse("ab_testing_tool_create_experiment"), follow=True)
self.assertTemplateNotUsed(response, "ab_tool/create_experiment.html")
self.assertTemplateUsed(response, "ab_tool/not_authorized.html")
def test_edit_experiment_view(self):
""" Tests edit_experiment template renders when authenticated """
experiment = self.create_test_experiment()
response = self.client.get(reverse("ab_testing_tool_edit_experiment", args=(experiment.id,)))
self.assertTemplateUsed(response, "ab_tool/edit_experiment.html")
def test_edit_experiment_view_started_experiment(self):
""" Tests edit_experiment template renders when experiment has started """
experiment = self.create_test_experiment()
experiment.tracks_finalized = True
experiment.save()
response = self.client.get(reverse("ab_testing_tool_edit_experiment", args=(experiment.id,)))
self.assertTemplateUsed(response, "ab_tool/edit_experiment.html")
def test_edit_experiment_view_with_tracks_weights(self):
""" Tests edit_experiment template renders properly with track weights """
experiment = self.create_test_experiment()
experiment.assignment_method = Experiment.WEIGHTED_PROBABILITY_RANDOM
track1 = self.create_test_track(name="track1", experiment=experiment)
track2 = self.create_test_track(name="track2", experiment=experiment)
self.create_test_track_weight(experiment=experiment, track=track1)
self.create_test_track_weight(experiment=experiment, track=track2)
response = self.client.get(reverse("ab_testing_tool_edit_experiment", args=(experiment.id,)))
self.assertTemplateUsed(response, "ab_tool/edit_experiment.html")
def test_edit_experiment_view_unauthorized(self):
""" Tests edit_experiment template doesn't render when unauthorized """
self.set_roles([])
experiment = self.create_test_experiment(course_id=TEST_OTHER_COURSE_ID)
response = self.client.get(reverse("ab_testing_tool_edit_experiment", args=(experiment.id,)),
follow=True)
self.assertTemplateNotUsed(response, "ab_tool/edit_experiment.html")
self.assertTemplateUsed(response, "ab_tool/not_authorized.html")
def test_edit_experiment_view_nonexistent(self):
"""Tests edit_experiment when experiment does not exist"""
e_id = NONEXISTENT_EXPERIMENT_ID
response = self.client.get(reverse("ab_testing_tool_edit_experiment", args=(e_id,)))
self.assertTemplateNotUsed(response, "ab_tool/edit_experiment.html")
self.assertEquals(response.status_code, 404)
def test_edit_experiment_view_wrong_course(self):
""" Tests edit_experiment when attempting to access a experiment from a different course """
experiment = self.create_test_experiment(course_id=TEST_OTHER_COURSE_ID)
response = self.client.get(reverse("ab_testing_tool_edit_experiment", args=(experiment.id,)))
self.assertError(response, UNAUTHORIZED_ACCESS)
def test_edit_experiment_view_last_modified_updated(self):
""" Tests edit_experiment to confirm that the last updated timestamp changes """
experiment = self.create_test_experiment()
experiment.name += " (updated)"
response = self.client.post(reverse("ab_testing_tool_submit_edit_experiment",
args=(experiment.id,)),
content_type="application/json",
data=experiment.to_json())
self.assertEquals(response.content, "success")
updated_experiment = Experiment.objects.get(id=experiment.id)
self.assertLess(experiment.updated_on, updated_experiment.updated_on,
response)
def test_submit_create_experiment(self):
""" Tests that create_experiment creates a Experiment object verified by
DB count when uniformRandom is true"""
Experiment.get_placeholder_course_experiment(TEST_COURSE_ID)
num_experiments = Experiment.objects.count()
experiment = {
"name": "experiment", "notes": "hi", "uniformRandom": True,
"csvUpload": False,
"tracks": [{"id": None, "weighting": None, "name": "A"}]
}
response = self.client.post(
reverse("ab_testing_tool_submit_create_experiment"), follow=True,
content_type="application/json", data=json.dumps(experiment)
)
self.assertEquals(num_experiments + 1, Experiment.objects.count(), response)
def test_submit_create_experiment_csv_upload(self):
""" Tests that create_experiment creates a Experiment object verified by
DB count when csvUpload is True and no track weights are specified"""
Experiment.get_placeholder_course_experiment(TEST_COURSE_ID)
num_experiments = Experiment.objects.count()
experiment = {
"name": "experiment", "notes": "hi", "uniformRandom": False,
"csvUpload": True,
"tracks": [{"id": None, "name": "A"}]
}
response = self.client.post(
reverse("ab_testing_tool_submit_create_experiment"), follow=True,
content_type="application/json", data=json.dumps(experiment)
)
self.assertEquals(num_experiments + 1, Experiment.objects.count(), response)
def test_submit_create_experiment_with_weights_as_assignment_method(self):
""" Tests that create_experiment creates a Experiment object verified by
DB count when uniformRandom is false and the tracks have weightings """
Experiment.get_placeholder_course_experiment(TEST_COURSE_ID)
num_experiments = Experiment.objects.count()
experiment = {
"name": "experiment", "notes": "hi", "uniformRandom": False,
"csvUpload": False,
"tracks": [{"id": None, "weighting": 100, "name": "A"}]
}
response = self.client.post(
reverse("ab_testing_tool_submit_create_experiment"), follow=True,
content_type="application/json", data=json.dumps(experiment)
)
self.assertEquals(num_experiments + 1, Experiment.objects.count(), response)
def test_submit_create_experiment_unauthorized(self):
"""Tests that create_experiment creates a Experiment object verified by DB count"""
self.set_roles([])
Experiment.get_placeholder_course_experiment(TEST_COURSE_ID)
num_experiments = Experiment.objects.count()
experiment = {"name": "experiment", "notes": "hi"}
response = self.client.post(
reverse("ab_testing_tool_submit_create_experiment"), follow=True,
content_type="application/json", data=json.dumps(experiment)
)
self.assertEquals(num_experiments, Experiment.objects.count())
self.assertTemplateUsed(response, "ab_tool/not_authorized.html")
def test_submit_edit_experiment(self):
""" Tests that submit_edit_experiment does not change DB count but does change Experiment
attribute"""
experiment = self.create_test_experiment(name="old_name")
experiment_id = experiment.id
num_experiments = Experiment.objects.count()
experiment = {
"name": "new_name", "notes": "hi", "uniformRandom": True,
"csvUpload": False,
"tracks": [{"id": None, "weighting": None, "name": "A"}]
}
response = self.client.post(
reverse("ab_testing_tool_submit_edit_experiment", args=(experiment_id,)),
follow=True, content_type="application/json", data=json.dumps(experiment)
)
self.assertOkay(response)
self.assertEquals(num_experiments, Experiment.objects.count())
experiment = Experiment.objects.get(id=experiment_id)
self.assertEquals(experiment.name, "new_name")
def test_submit_edit_experiment_changes_assignment_method_to_weighted(self):
""" Tests that submit_edit_experiment changes an Experiment's assignment
method from uniform (default) to weighted"""
experiment = self.create_test_experiment(name="old_name")
experiment_id = experiment.id
num_experiments = Experiment.objects.count()
no_track_weights = experiment.track_probabilites.count()
experiment = {
"name": "new_name", "notes": "hi", "uniformRandom": False,
"csvUpload": False,
"tracks": [{"id": None, "weighting": 20, "name": "A"},
{"id": None, "weighting": 80, "name": "B"}]
}
response = self.client.post(
reverse("ab_testing_tool_submit_edit_experiment", args=(experiment_id,)),
follow=True, content_type="application/json", data=json.dumps(experiment)
)
self.assertOkay(response)
self.assertEquals(num_experiments, Experiment.objects.count())
experiment = Experiment.objects.get(id=experiment_id)
self.assertEquals(experiment.assignment_method, Experiment.WEIGHTED_PROBABILITY_RANDOM)
self.assertEquals(experiment.track_probabilites.count(), no_track_weights + 2)
def test_submit_edit_experiment_changes_assignment_method_to_uniform(self):
""" Tests that submit_edit_experiment changes an Experiment's assignment
method from weighted uniform """
experiment = self.create_test_experiment(
name="old_name", assignment_method=Experiment.WEIGHTED_PROBABILITY_RANDOM)
experiment_id = experiment.id
num_experiments = Experiment.objects.count()
no_tracks = experiment.tracks.count()
experiment = {
"name": "new_name", "notes": "hi", "uniformRandom": True,
"csvUpload": False,
"tracks": [{"id": None, "weighting": None, "name": "A"},
{"id": None, "weighting": None, "name": "B"},
{"id": None, "weighting": None, "name": "C"}]
}
response = self.client.post(
reverse("ab_testing_tool_submit_edit_experiment", args=(experiment_id,)),
follow=True, content_type="application/json", data=json.dumps(experiment)
)
self.assertOkay(response)
self.assertEquals(num_experiments, Experiment.objects.count())
experiment = Experiment.objects.get(id=experiment_id)
self.assertEquals(experiment.assignment_method, Experiment.UNIFORM_RANDOM)
self.assertEquals(experiment.tracks.count(), no_tracks + 3)
def test_submit_edit_experiment_unauthorized(self):
""" Tests submit_edit_experiment when unauthorized"""
self.set_roles([])
experiment = self.create_test_experiment(name="old_name")
experiment_id = experiment.id
experiment = {"name": "new_name", "notes": ""}
response = self.client.post(
reverse("ab_testing_tool_submit_edit_experiment", args=(experiment_id,)),
content_type="application/json", data=json.dumps(experiment), follow=True
)
self.assertTemplateUsed(response, "ab_tool/not_authorized.html")
def test_submit_edit_experiment_nonexistent(self):
""" Tests that submit_edit_experiment method raises error for non-existent Experiment """
experiment_id = NONEXISTENT_EXPERIMENT_ID
experiment = {"name": "new_name", "notes": ""}
response = self.client.post(
reverse("ab_testing_tool_submit_edit_experiment", args=(experiment_id,)),
content_type="application/json", data=json.dumps(experiment)
)
self.assertEquals(response.status_code, 404)
def test_submit_edit_experiment_wrong_course(self):
""" Tests that submit_edit_experiment method raises error for existent Experiment but
for wrong course"""
experiment = self.create_test_experiment(name="old_name",
course_id=TEST_OTHER_COURSE_ID)
data = {"name": "new_name", "notes": ""}
response = self.client.post(
reverse("ab_testing_tool_submit_edit_experiment", args=(experiment.id,)),
content_type="application/json", data=json.dumps(data)
)
self.assertError(response, UNAUTHORIZED_ACCESS)
def test_submit_edit_started_experiment_changes_name_and_notes(self):
""" Tests that submit_edit_experiment changes an Experiment's
name and notes and track names only if the experiment has already been started """
experiment = self.create_test_experiment(name="old_name", notes="old_notes",
tracks_finalized=True)
experiment_id = experiment.id
num_experiments = Experiment.objects.count()
old_track = self.create_test_track(experiment=experiment, name="old_name_track")
experiment_json = {
"name": "new_name", "notes": "new_notes", "tracks": [{"id": old_track.id,
"name": "new_track_name"}],
}
response = self.client.post(
reverse("ab_testing_tool_submit_edit_experiment", args=(experiment_id,)),
follow=True, content_type="application/json", data=json.dumps(experiment_json)
)
self.assertOkay(response)
self.assertEquals(num_experiments, Experiment.objects.count())
experiment = Experiment.objects.get(id=experiment_id)
self.assertEquals(experiment.name, "new_name")
self.assertEquals(experiment.notes, "new_notes")
self.assertEquals(experiment.tracks.all()[0].name, "new_track_name")
def test_submit_edit_started_experiment_does_not_change_tracks(self):
""" Tests that submit_edit_experiment doesn't change tracks for
an experiment that has already been started """
experiment = self.create_test_experiment(name="old_name", tracks_finalized=True,
assignment_method=Experiment.WEIGHTED_PROBABILITY_RANDOM)
experiment_id = experiment.id
num_experiments = Experiment.objects.count()
no_tracks = experiment.tracks.count()
experiment = {
"name": "new_name", "notes": "hi", "uniformRandom": True,
"csvUpload": False,
"tracks": [{"id": None, "weighting": None, "name": "A"},
{"id": None, "weighting": None, "name": "B"},
{"id": None, "weighting": None, "name": "C"}]
}
response = self.client.post(
reverse("ab_testing_tool_submit_edit_experiment", args=(experiment_id,)),
follow=True, content_type="application/json", data=json.dumps(experiment)
)
self.assertOkay(response)
self.assertEquals(num_experiments, Experiment.objects.count())
experiment = Experiment.objects.get(id=experiment_id)
self.assertEquals(experiment.assignment_method, Experiment.WEIGHTED_PROBABILITY_RANDOM)
self.assertEquals(experiment.tracks.count(), no_tracks)
def test_submit_edit_started_experiment_changes_existing_tracks(self):
""" Tests that submit_edit_experiment does change track objects for
an experiment that has not yet been started """
experiment = self.create_test_experiment(name="old_name", tracks_finalized=False,
assignment_method=Experiment.WEIGHTED_PROBABILITY_RANDOM)
track1 = self.create_test_track(experiment=experiment, name="A")
track2 = self.create_test_track(experiment=experiment, name="B")
self.create_test_track_weight(experiment=experiment, track=track1)
self.create_test_track_weight(experiment=experiment, track=track2)
track_count = experiment.tracks.count()
experiment_json = {
"name": "new_name", "notes": "hi", "uniformRandom": False,
"csvUpload": False,
"tracks": [{"id": track1.id, "weighting": 30, "name": "C"},
{"id": track2.id, "weighting": 70, "name": "D"}]
}
response = self.client.post(
reverse("ab_testing_tool_submit_edit_experiment", args=(experiment.id,)),
follow=True, content_type="application/json", data=json.dumps(experiment_json)
)
self.assertOkay(response)
experiment = Experiment.objects.get(id=experiment.id)
self.assertEquals(experiment.assignment_method, Experiment.WEIGHTED_PROBABILITY_RANDOM)
self.assertEquals(experiment.tracks.count(), track_count)
track1 = experiment.tracks.get(id=track1.id)
track2 = experiment.tracks.get(id=track2.id)
self.assertEquals(track1.name, "C") #Checks name has changed
self.assertEquals(track2.name, "D")
self.assertEquals(track1.weight.weighting, 30) #Checks weighting has changed
self.assertEquals(track2.weight.weighting, 70)
def test_delete_experiment(self):
""" Tests that delete_experiment method properly deletes a experiment when authorized"""
first_num_experiments = Experiment.objects.count()
experiment = self.create_test_experiment()
self.assertEqual(first_num_experiments + 1, Experiment.objects.count())
response = self.client.post(reverse("ab_testing_tool_delete_experiment", args=(experiment.id,)),
follow=True)
second_num_experiments = Experiment.objects.count()
self.assertOkay(response)
self.assertEqual(first_num_experiments, second_num_experiments)
def test_delete_experiment_already_finalized(self):
""" Tests that delete experiment doesn't work when experiments are finalized """
experiment = self.create_test_experiment()
experiment.update(tracks_finalized=True)
first_num_experiments = Experiment.objects.count()
response = self.client.post(reverse("ab_testing_tool_delete_experiment", args=(experiment.id,)),
follow=True)
second_num_experiments = Experiment.objects.count()
self.assertError(response, EXPERIMENT_TRACKS_ALREADY_FINALIZED)
self.assertEqual(first_num_experiments, second_num_experiments)
@patch(LIST_MODULES, return_value=APIReturn([{"id": 0}]))
def test_delete_experiment_has_installed_intervention_point(self, _mock1):
""" Tests that delete experiment doesn't work when there is an associated
intervention point is installed """
experiment = self.create_test_experiment()
first_num_experiments = Experiment.objects.count()
ret_val = [True]
with patch("ab_tool.canvas.CanvasModules.experiment_has_installed_intervention",
return_value=ret_val):
response = self.client.post(reverse("ab_testing_tool_delete_experiment", args=(experiment.id,)),
follow=True)
second_num_experiments = Experiment.objects.count()
self.assertError(response, INTERVENTION_POINTS_ARE_INSTALLED)
self.assertEqual(first_num_experiments, second_num_experiments)
def test_delete_experiment_unauthorized(self):
""" Tests that delete_experiment method raises error when unauthorized """
self.set_roles([])
experiment = self.create_test_experiment()
first_num_experiments = Experiment.objects.count()
response = self.client.post(reverse("ab_testing_tool_delete_experiment", args=(experiment.id,)),
follow=True)
second_num_experiments = Experiment.objects.count()
self.assertTemplateUsed(response, "ab_tool/not_authorized.html")
self.assertEqual(first_num_experiments, second_num_experiments)
def test_delete_experiment_nonexistent(self):
""" Tests that delete_experiment method raises successfully redirects
despite non-existent Experiment. This is by design, as the Http404
is caught since multiple users may be editing the A/B dashboard on
in the same course """
self.create_test_experiment()
t_id = NONEXISTENT_EXPERIMENT_ID
first_num_experiments = Experiment.objects.count()
response = self.client.post(reverse("ab_testing_tool_delete_experiment", args=(t_id,)), follow=True)
second_num_experiments = Experiment.objects.count()
self.assertEqual(first_num_experiments, second_num_experiments)
self.assertOkay(response)
def test_delete_experiment_wrong_course(self):
""" Tests that delete_experiment method raises error for existent Experiment but for
wrong course """
experiment = self.create_test_experiment(course_id=TEST_OTHER_COURSE_ID)
first_num_experiments = Experiment.objects.count()
response = self.client.post(reverse("ab_testing_tool_delete_experiment", args=(experiment.id,)),
follow=True)
second_num_experiments = Experiment.objects.count()
self.assertEqual(first_num_experiments, second_num_experiments)
self.assertError(response, UNAUTHORIZED_ACCESS)
def test_delete_experiment_deletes_intervention_point_urls(self):
""" Tests that intervention_point_urls of a experiment are deleted when the experiment is """
experiment = self.create_test_experiment()
track1 = self.create_test_track(name="track1", experiment=experiment)
track2 = self.create_test_track(name="track2", experiment=experiment)
intervention_point = self.create_test_intervention_point()
InterventionPointUrl.objects.create(intervention_point=intervention_point,
track=track1, url="example.com")
InterventionPointUrl.objects.create(intervention_point=intervention_point,
track=track2, url="example.com")
first_num_intervention_point_urls = InterventionPointUrl.objects.count()
response = self.client.post(reverse("ab_testing_tool_delete_experiment", args=(experiment.id,)),
follow=True)
second_num_intervention_point_urls = InterventionPointUrl.objects.count()
self.assertOkay(response)
self.assertEqual(first_num_intervention_point_urls - 2, second_num_intervention_point_urls)
def test_finalize_tracks(self):
""" Tests that the finalize tracks page sets the appropriate course """
experiment = Experiment.get_placeholder_course_experiment(TEST_COURSE_ID)
self.assertFalse(experiment.tracks_finalized)
self.create_test_track()
response = self.client.post(reverse("ab_testing_tool_finalize_tracks", args=(experiment.id,)),
follow=True)
self.assertOkay(response)
experiment = Experiment.get_placeholder_course_experiment(TEST_COURSE_ID)
self.assertTrue(experiment.tracks_finalized)
def test_finalize_tracks_missing_urls(self):
""" Tests that finalize fails if there are missing urls """
experiment = Experiment.get_placeholder_course_experiment(TEST_COURSE_ID)
self.assertFalse(experiment.tracks_finalized)
track1 = self.create_test_track(name="track1", experiment=experiment)
self.create_test_track(name="track2", experiment=experiment)
intervention_point = self.create_test_intervention_point()
InterventionPointUrl.objects.create(intervention_point=intervention_point,
track=track1, url="example.com")
response = self.client.post(reverse("ab_testing_tool_finalize_tracks", args=(experiment.id,)), follow=True)
self.assertOkay(response)
experiment = Experiment.get_placeholder_course_experiment(TEST_COURSE_ID)
self.assertFalse(experiment.tracks_finalized)
def test_finalize_tracks_no_tracks(self):
""" Tests that finalize fails if there are no tracks for an experiment """
experiment = Experiment.get_placeholder_course_experiment(TEST_COURSE_ID)
response = self.client.post(reverse("ab_testing_tool_finalize_tracks", args=(experiment.id,)),
follow=True)
self.assertError(response, NO_TRACKS_FOR_EXPERIMENT)
def test_finalize_tracks_missing_track_weights(self):
""" Tests that finalize fails if there are no track weights for an weighted
probability experiment """
experiment = self.create_test_experiment(assignment_method=Experiment.WEIGHTED_PROBABILITY_RANDOM)
self.create_test_track(name="track1", experiment=experiment)
response = self.client.post(reverse("ab_testing_tool_finalize_tracks", args=(experiment.id,)), follow=True)
self.assertOkay(response)
self.assertFalse(experiment.tracks_finalized)
def test_copy_experiment(self):
""" Tests that copy_experiment creates a new experiment """
experiment = self.create_test_experiment()
num_experiments = Experiment.objects.count()
url = reverse("ab_testing_tool_copy_experiment", args=(experiment.id,))
response = self.client.post(url, follow=True)
self.assertOkay(response)
self.assertEqual(Experiment.objects.count(), num_experiments + 1)
def test_copy_experiment_unauthorized(self):
""" Tests that copy_experiment fails when unauthorized """
self.set_roles([])
experiment = self.create_test_experiment()
url = reverse("ab_testing_tool_copy_experiment", args=(experiment.id,))
response = self.client.post(url, follow=True)
self.assertTemplateUsed(response, "ab_tool/not_authorized.html")
def test_copy_experiment_inavlid_id(self):
""" Tests that copy_experiment fails with bad experiment_id """
url = reverse("ab_testing_tool_copy_experiment", args=(12345,))
response = self.client.post(url, follow=True)
self.assertEquals(response.status_code, 404)
def test_copy_experiment_wrong_course(self):
""" Tests that copy_experiment fails if experiment is different coruse """
experiment = self.create_test_experiment(course_id=TEST_OTHER_COURSE_ID)
url = reverse("ab_testing_tool_copy_experiment", args=(experiment.id,))
response = self.client.post(url, follow=True)
self.assertError(response, UNAUTHORIZED_ACCESS)
def test_delete_track(self):
""" Tests that delete_track method properly deletes a track of an experiment when authorized"""
experiment = self.create_test_experiment()
track = self.create_test_track(experiment=experiment)
self.assertEqual(experiment.tracks.count(), 1)
response = self.client.post(reverse("ab_testing_tool_delete_track", args=(track.id,)),
follow=True)
self.assertEqual(experiment.tracks.count(), 0)
self.assertOkay(response)
def test_delete_nonexistent_track(self):
""" Tests that delete_track method succeeds, by design, when deleting a nonexistent track"""
experiment = self.create_test_experiment()
self.assertEqual(experiment.tracks.count(), 0)
response = self.client.post(reverse("ab_testing_tool_delete_track", args=(NONEXISTENT_TRACK_ID,)),
follow=True)
self.assertEqual(experiment.tracks.count(), 0)
self.assertOkay(response)
| penzance/ab-testing-tool | ab_tool/tests/test_experiment_pages.py | Python | mit | 28,898 |
#-*- coding: utf-8 -*-
from flask import current_app, flash, url_for, request
from flask_admin import expose, BaseView
from logpot.admin.base import AuthenticateView, flash_errors
from logpot.admin.forms import SettingForm
from logpot.utils import ImageUtil, getDirectoryPath, loadSiteConfig, saveSiteConfig
import os
from PIL import Image
class SettingView(AuthenticateView, BaseView):
def saveProfileImage(self, filestorage):
buffer = filestorage.stream
buffer.seek(0)
image = Image.open(buffer)
image = ImageUtil.crop_image(image, 64)
current_app.logger.info(image)
dirpath = getDirectoryPath(current_app, '_settings')
filepath = os.path.join(dirpath, "profile.png")
image.save(filepath, optimize=True)
@expose('/', methods=('GET','POST'))
def index(self):
form = SettingForm()
if form.validate_on_submit():
if form.profile_img.data:
file = form.profile_img.data
self.saveProfileImage(file)
data = {}
data['site_title'] = form.title.data
data['site_subtitle'] = form.subtitle.data
data['site_author'] = form.author.data
data['site_author_profile'] = form.author_profile.data
data['enable_link_github'] = form.enable_link_github.data
data['enable_profile_img'] = form.enable_profile_img.data
data["ogp_app_id"] = form.ogp_app_id.data
data["ga_tracking_id"] = form.ga_tracking_id.data
data["enable_twittercard"] = form.enable_twittercard.data
data["twitter_username"] = form.twitter_username.data
data['display_poweredby'] = form.display_poweredby.data
if saveSiteConfig(current_app, data):
flash('Successfully saved.')
else:
flash_errors('Oops. Save error.')
else:
flash_errors(form)
data = loadSiteConfig(current_app)
form.title.data = data['site_title']
form.subtitle.data = data['site_subtitle']
form.author.data = data['site_author']
form.author_profile.data = data['site_author_profile']
form.enable_link_github.data = data['enable_link_github']
form.enable_profile_img.data = data['enable_profile_img']
form.ogp_app_id.data = data["ogp_app_id"]
form.ga_tracking_id.data = data["ga_tracking_id"]
form.enable_twittercard.data = data["enable_twittercard"]
form.twitter_username.data = data["twitter_username"]
form.display_poweredby.data = data['display_poweredby']
return self.render('admin/setting.html', form=form)
| moremorefor/Logpot | logpot/admin/setting.py | Python | mit | 2,802 |
# TODO When raising an exception pass a lambda function, the function being the module/path/name thing
ERROR = {'default': "Unknown engine error ({0})",
400: "Bad request sent to search API ({0})",
401: "Incorrect API Key ({0})",
403: "Correct API but request refused ({0})",
404: "Bad request sent to search API ({0})"}
class SearchException(Exception):
"""
Abstract class representing an ifind search exception.
"""
def __init__(self, module, message):
"""
SearchException constructor.
Args:
module (str): name of module/class that's raising exception
message (str): exception message to be displayed
Usage:
raise SearchException("Test", "this is an error")
"""
message = "{0} - {1}".format(module, message)
Exception.__init__(self, message)
class EngineConnectionException(SearchException):
"""
Thrown when an Engine connectivity error occurs.
Returns specific response message if status code specified.
"""
def __init__(self, engine, message, code=None):
"""
EngineException constructor.
Args:
engine (str): name of engine that's raising exception
message (str): exception message to be displayed (ignored usually here)
Kwargs:
code (int): response status code of issued request
Usage:
raise EngineException("Bing", "", code=200)
"""
self.message = message
self.code = code
if code:
self.message = ERROR.get(code, ERROR['default']).format(self.code)
SearchException.__init__(self, engine, self.message)
class EngineLoadException(SearchException):
"""
Thrown when an Engine can't be dynamically loaded.
"""
pass
class EngineAPIKeyException(SearchException):
"""
Thrown when an Engine's API key hasn't been provided.
"""
pass
class QueryParamException(SearchException):
"""
Thrown when a query parameters incompatible or missing.
"""
pass
class CacheConnectionException(SearchException):
"""
Thrown when cache connectivity error occurs.
"""
pass
class InvalidQueryException(SearchException):
"""
Thrown when an invalid query is passed to engine's search method.
"""
pass
class RateLimitException(SearchException):
"""
Thrown when an engine's request rate limit has been exceeded.
"""
pass | leifos/ifind | ifind/search/exceptions.py | Python | mit | 2,524 |
#!/usr/bin/python
import os
import re
from lxml import etree as et
import pcbmode.config as config
from . import messages as msg
# pcbmode modules
from . import utils
from .point import Point
def makeExcellon(manufacturer='default'):
"""
"""
ns = {'pcbmode':config.cfg['ns']['pcbmode'],
'svg':config.cfg['ns']['svg']}
# Open the board's SVG
svg_in = utils.openBoardSVG()
drills_layer = svg_in.find("//svg:g[@pcbmode:sheet='drills']",
namespaces=ns)
excellon = Excellon(drills_layer)
# Save to file
base_dir = os.path.join(config.cfg['base-dir'],
config.cfg['locations']['build'],
'production')
base_name = "%s_rev_%s" % (config.brd['config']['name'],
config.brd['config']['rev'])
filename_info = config.cfg['manufacturers'][manufacturer]['filenames']['drills']
add = '_%s.%s' % ('drills',
filename_info['plated'].get('ext') or 'txt')
filename = os.path.join(base_dir, base_name + add)
with open(filename, "wb") as f:
for line in excellon.getExcellon():
f.write(line)
class Excellon():
"""
"""
def __init__(self, svg):
"""
"""
self._svg = svg
self._ns = {'pcbmode':config.cfg['ns']['pcbmode'],
'svg':config.cfg['ns']['svg']}
# Get all drill paths except for the ones used in the
# drill-index
drill_paths = self._svg.findall(".//svg:g[@pcbmode:type='component-shapes']//svg:path",
namespaces=self._ns)
drills_dict = {}
for drill_path in drill_paths:
diameter = drill_path.get('{'+config.cfg['ns']['pcbmode']+'}diameter')
location = self._getLocation(drill_path)
if diameter not in drills_dict:
drills_dict[diameter] = {}
drills_dict[diameter]['locations'] = []
drills_dict[diameter]['locations'].append(location)
self._preamble = self._createPreamble()
self._content = self._createContent(drills_dict)
self._postamble = self._createPostamble()
def getExcellon(self):
return (self._preamble+
self._content+
self._postamble)
def _createContent(self, drills):
"""
"""
ex = []
for i, diameter in enumerate(drills):
# This is probably not necessary, but I'm not 100% certain
# that if the item order of a dict is gurenteed. If not
# the result can be quite devastating where drill
# diameters are wrong!
# Drill index must be greater than 0
drills[diameter]['index'] = i+1
ex.append("T%dC%s\n" % (i+1, diameter))
ex.append('M95\n') # End of a part program header
for diameter in drills:
ex.append("T%s\n" % drills[diameter]['index'])
for coord in drills[diameter]['locations']:
ex.append(self._getPoint(coord))
return ex
def _createPreamble(self):
"""
"""
ex = []
ex.append('M48\n') # Beginning of a part program header
ex.append('METRIC,TZ\n') # Metric, trailing zeros
ex.append('G90\n') # Absolute mode
ex.append('M71\n') # Metric measuring mode
return ex
def _createPostamble(self):
"""
"""
ex = []
ex.append('M30\n') # End of Program, rewind
return ex
def _getLocation(self, path):
"""
Returns the location of a path, factoring in all the transforms of
its ancestors, and its own transform
"""
location = Point()
# We need to get the transforms of all ancestors that have
# one in order to get the location correctly
ancestors = path.xpath("ancestor::*[@transform]")
for ancestor in ancestors:
transform = ancestor.get('transform')
transform_data = utils.parseTransform(transform)
# Add them up
location += transform_data['location']
# Add the transform of the path itself
transform = path.get('transform')
if transform != None:
transform_data = utils.parseTransform(transform)
location += transform_data['location']
return location
def _getPoint(self, point):
"""
Converts a Point type into an Excellon coordinate
"""
return "X%.6fY%.6f\n" % (point.x, -point.y)
| ddm/pcbmode | pcbmode/utils/excellon.py | Python | mit | 4,661 |
"""Run tests for the kmeans portion of the kmeans module"""
import kmeans.kmeans.kmeans as kmeans
import numpy as np
import random
def test_1dim_distance():
"""See if this contraption works in 1 dimension"""
num1 = random.random()
num2 = random.random()
assert kmeans.ndim_euclidean_distance(num1, num2) == abs(num1-num2)
def test_ndim_distance():
"""Test to see if changing val by 1 does what it ought to do
convert to float to integer because floating arithmetic makes testing
analytic functions a mess"""
rand = random.random
point1 = [rand(), rand(), rand(), rand(), rand(), rand()]
point2 = [point1[0]+1] + point1[1:] # just shift x to the right by 1
assert int(round(kmeans.ndim_euclidean_distance(point1, point2))) == 1
def test_maxiters():
"""ensure the iteration ceiling works"""
# assert kmeans.should_iter([], [], iterations=29) == True
assert kmeans.should_iter([], [], iterations=30) == False
assert kmeans.should_iter([], [], iterations=31) == False
def test_random_centroid_dimensions():
"""ensure the correct number of dimensions"""
dimensions = random.randrange(1, 100)
k = random.randrange(1, 100)
centroids = kmeans.random_centroids(k, dimensions)
for centroid in centroids:
assert len(centroid) == dimensions
def test_iterated_centroid():
"""ensure that the average across each dimension is returned"""
new_centroid = kmeans.iterated_centroid([[1, 1, 1], [2, 2, 2]],\
[[100, 200, 300]], [(0, 0), (1, 0)])
np.testing.assert_allclose(new_centroid, np.array([[1.5, 1.5, 1.5]]),\
rtol=1e-5)
| moradology/kmeans | tests/testkmeans.py | Python | mit | 1,642 |
"""
Django settings for ross project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jtn=n8&nq9jgir8_z1ck40^c1s22d%=)z5qsm*q(bku*_=^sg&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ross.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ross.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| rossplt/ross-django-utils | ross/settings.py | Python | mit | 3,090 |
import re
import hashlib
FNAME_MATCH = re.compile(r'/([^/]+)$') # From the last slash to the end of the string
PREFIX = re.compile(r'([^:]+://)(/)?(.+)') # Check for a prefix like data://
def getParentAndBase(path):
match = PREFIX.match(path)
if match is None:
if path.endswith('/'):
stripped_path = path[:-1]
else:
stripped_path = path
base = FNAME_MATCH.search(stripped_path)
if base is None:
raise ValueError('Invalid path')
parent = FNAME_MATCH.sub('', stripped_path)
return parent, base.group(1)
else:
prefix, leading_slash, uri = match.groups()
parts = uri.split('/')
parent_path = '/'.join(parts[:-1])
if leading_slash is not None:
parent_path = '{prefix}/{uri}'.format(prefix=prefix, uri='/'.join(parts[:-1]))
else:
parent_path = '{prefix}{uri}'.format(prefix=prefix, uri='/'.join(parts[:-1]))
return parent_path, parts[-1]
def pathJoin(parent, base):
if parent.endswith('/'):
return parent + base
return parent + '/' + base
def md5_for_file(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return str(hash_md5.hexdigest())
def md5_for_str(content):
hash_md5 = hashlib.md5()
hash_md5.update(content.encode())
return str(hash_md5.hexdigest())
| algorithmiaio/algorithmia-python | Algorithmia/util.py | Python | mit | 1,473 |
"""
train supervised classifier with what's cooking recipe data
objective - determine recipe type categorical value from 20
"""
import time
from features_bow import *
from features_word2vec import *
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.cross_validation import cross_val_score
""" main entry method """
def main(use_idf=False, random_state=None, std=False, n_jobs=-1, verbose=2):
wc_idf_map = None
if use_idf:
# ingredients inverse document frequencies
wc_components = build_tfidf_wc(verbose=(verbose > 0))
wc_idf = wc_components['model'].idf_
wc_idf_words = wc_components['model'].get_feature_names()
wc_idf_map = dict(zip(wc_idf_words, wc_idf))
# word2vec recipe feature vectors
wc_components = build_word2vec_wc(feature_vec_size=120, avg=True, idf=wc_idf_map, verbose=(verbose > 0))
y_train = wc_components['train']['df']['cuisine_code'].as_matrix()
X_train = wc_components['train']['features_matrix']
# standardize features aka mean ~ 0, std ~ 1
if std:
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
# random forest supervised classifier
time_0 = time.time()
clf = RandomForestClassifier(n_estimators=100, max_depth=None,
n_jobs=n_jobs, random_state=random_state, verbose=verbose)
# perform cross validation
cv_n_fold = 8
print 'cross validating %s ways...' % cv_n_fold
scores_cv = cross_val_score(clf, X_train, y_train, cv=cv_n_fold, n_jobs=-1)
print 'accuracy: %0.5f (+/- %0.5f)' % (scores_cv.mean(), scores_cv.std() * 2)
time_1 = time.time()
elapsed_time = time_1 - time_0
print 'cross validation took %.3f seconds' % elapsed_time
if __name__ == '__main__':
main()
| eifuentes/kaggle_whats_cooking | train_word2vec_rf.py | Python | mit | 1,909 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Configuration options for Invenio-Search.
The documentation for the configuration is in docs/configuration.rst.
"""
#
# ELASTIC configuration
#
SEARCH_CLIENT_CONFIG = None
"""Dictionary of options for the Elasticsearch client.
The value of this variable is passed to :py:class:`elasticsearch.Elasticsearch`
as keyword arguments and is used to configure the client. See the available
keyword arguments in the two following classes:
- :py:class:`elasticsearch.Elasticsearch`
- :py:class:`elasticsearch.Transport`
If you specify the key ``hosts`` in this dictionary, the configuration variable
:py:class:`~invenio_search.config.SEARCH_ELASTIC_HOSTS` will have no effect.
"""
SEARCH_ELASTIC_HOSTS = None # default localhost
"""Elasticsearch hosts.
By default, Invenio connects to ``localhost:9200``.
The value of this variable is a list of dictionaries, where each dictionary
represents a host. The available keys in each dictionary is determined by the
connection class:
- :py:class:`elasticsearch.connection.Urllib3HttpConnection` (default)
- :py:class:`elasticsearch.connection.RequestsHttpConnection`
You can change the connection class via the
:py:class:`~invenio_search.config.SEARCH_CLIENT_CONFIG`. If you specified the
``hosts`` key in :py:class:`~invenio_search.config.SEARCH_CLIENT_CONFIG` then
this configuration variable will have no effect.
"""
SEARCH_MAPPINGS = None # loads all mappings and creates aliases for them
"""List of aliases for which, their search mappings should be created.
- If `None` all aliases (and their search mappings) defined through the
``invenio_search.mappings`` entry point in setup.py will be created.
- Provide an empty list ``[]`` if no aliases (or their search mappings)
should be created.
For example if you don't want to create aliases
and their mappings for `authors`:
.. code-block:: python
# in your `setup.py` you would specify:
entry_points={
'invenio_search.mappings': [
'records = invenio_foo_bar.mappings',
'authors = invenio_foo_bar.mappings',
],
}
# and in your config.py
SEARCH_MAPPINGS = ['records']
"""
SEARCH_RESULTS_MIN_SCORE = None
"""If set, the `min_score` parameter is added to each search request body.
The `min_score` parameter excludes results which have a `_score` less than
the minimum specified in `min_score`.
Note that the `max_score` varies depending on the number of results for a given
search query and it is not absolute value. Therefore, setting `min_score` too
high can lead to 0 results because it can be higher than any result's `_score`.
Please refer to `Elasticsearch min_score documentation
<https://www.elastic.co/guide/en/elasticsearch/reference/current/
search-request-min-score.html>`_ for more information.
"""
SEARCH_INDEX_PREFIX = ''
"""Any index, alias and templates will be prefixed with this string.
Useful to host multiple instances of the app on the same Elasticsearch cluster,
for example on one app you can set it to `dev-` and on the other to `prod-`,
and each will create non-colliding indices prefixed with the corresponding
string.
Usage example:
.. code-block:: python
# in your config.py
SEARCH_INDEX_PREFIX = 'prod-'
For templates, ensure that the prefix `__SEARCH_INDEX_PREFIX__` is added to
your index names. This pattern will be replaced by the prefix config value.
Usage example in your template.json:
.. code-block:: json
{
"index_patterns": ["__SEARCH_INDEX_PREFIX__myindex-name-*"]
}
"""
| inveniosoftware/invenio-search | invenio_search/config.py | Python | mit | 3,755 |
from __future__ import unicode_literals
from django.apps import AppConfig
class RfhistoryConfig(AppConfig):
name = 'RFHistory'
| Omenia/RFHistory | ServerApp/apps.py | Python | mit | 134 |
import unittest
from polycircles import polycircles
from nose.tools import assert_equal, assert_almost_equal
class TestDifferentOutputs(unittest.TestCase):
"""Tests the various output methods: KML style, WKT, lat-lon and lon-lat."""
def setUp(self):
self.latitude = 32.074322
self.longitude = 34.792081
self.radius_meters = 100
self.number_of_vertices = 36
self.polycircle = \
polycircles.Polycircle(latitude=self.latitude,
longitude=self.longitude,
radius=self.radius_meters,
number_of_vertices=self.number_of_vertices)
def test_lat_lon_output(self):
"""Asserts that the vertices in the lat-lon output are in the
right order (lat before long)."""
for vertex in self.polycircle.to_lat_lon():
assert_almost_equal(vertex[0], self.latitude, places=2)
assert_almost_equal(vertex[1], self.longitude, places=2)
def test_lon_lat_output(self):
"""Asserts that the vertices in the lat-lon output are in the
right order (lat before long)."""
for vertex in self.polycircle.to_lon_lat():
assert_almost_equal(vertex[0], self.longitude, places=2)
assert_almost_equal(vertex[1], self.latitude, places=2)
def test_vertices_equals_lat_lon(self):
"""Asserts that the "vertices" property is identical to the return
value of to_lat_lon()."""
assert_equal(self.polycircle.vertices, self.polycircle.to_lat_lon())
def test_kml_equals_lon_lat(self):
"""Asserts that the return value of to_kml() property is identical to
the return value of to_lon_lat()."""
assert_equal(self.polycircle.to_kml(), self.polycircle.to_lon_lat())
if __name__ == '__main__':
unittest.main() | adamatan/polycircles | polycircles/test/test_different_outputs.py | Python | mit | 1,881 |
# coding: utf-8
""" General utilities. """
from __future__ import division, print_function
__author__ = "adrn <[email protected]>"
# Standard library
import collections
import sys
import logging
import multiprocessing
# Third-party
import numpy as np
__all__ = ['get_pool']
# Create logger
logger = logging.getLogger(__name__)
class SerialPool(object):
def close(self):
return
def map(self, *args, **kwargs):
return map(*args, **kwargs)
def get_pool(mpi=False, threads=None):
""" Get a pool object to pass to emcee for parallel processing.
If mpi is False and threads is None, pool is None.
Parameters
----------
mpi : bool
Use MPI or not. If specified, ignores the threads kwarg.
threads : int (optional)
If mpi is False and threads is specified, use a Python
multiprocessing pool with the specified number of threads.
"""
if mpi:
from emcee.utils import MPIPool
# Initialize the MPI pool
pool = MPIPool()
# Make sure the thread we're running on is the master
if not pool.is_master():
pool.wait()
sys.exit(0)
logger.debug("Running with MPI...")
elif threads > 1:
logger.debug("Running with multiprocessing on {} cores..."
.format(threads))
pool = multiprocessing.Pool(threads)
else:
logger.debug("Running serial...")
pool = SerialPool()
return pool
def gram_schmidt(y):
""" Modified Gram-Schmidt orthonormalization of the matrix y(n,n) """
n = y.shape[0]
if y.shape[1] != n:
raise ValueError("Invalid shape: {}".format(y.shape))
mo = np.zeros(n)
# Main loop
for i in range(n):
# Remove component in direction i
for j in range(i):
esc = np.sum(y[j]*y[i])
y[i] -= y[j]*esc
# Normalization
mo[i] = np.linalg.norm(y[i])
y[i] /= mo[i]
return mo
class use_backend(object):
def __init__(self, backend):
import matplotlib.pyplot as plt
from IPython.core.interactiveshell import InteractiveShell
from IPython.core.pylabtools import backend2gui
self.shell = InteractiveShell.instance()
self.old_backend = backend2gui[str(plt.get_backend())]
self.new_backend = backend
def __enter__(self):
gui, backend = self.shell.enable_matplotlib(self.new_backend)
def __exit__(self, type, value, tb):
gui, backend = self.shell.enable_matplotlib(self.old_backend)
def inherit_docs(cls):
for name, func in vars(cls).items():
if not func.__doc__:
for parent in cls.__bases__:
try:
parfunc = getattr(parent, name)
except AttributeError: # parent doesn't have function
break
if parfunc and getattr(parfunc, '__doc__', None):
func.__doc__ = parfunc.__doc__
break
return cls
class ImmutableDict(collections.Mapping):
def __init__(self, somedict):
self._dict = dict(somedict) # make a copy
self._hash = None
def __getitem__(self, key):
return self._dict[key]
def __len__(self):
return len(self._dict)
def __iter__(self):
return iter(self._dict)
def __hash__(self):
if self._hash is None:
self._hash = hash(frozenset(self._dict.items()))
return self._hash
def __eq__(self, other):
return self._dict == other._dict
| abonaca/gary | gary/util.py | Python | mit | 3,607 |