code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
from notifications_utils.clients.antivirus.antivirus_client import (
AntivirusClient,
)
from notifications_utils.clients.redis.redis_client import RedisClient
from notifications_utils.clients.zendesk.zendesk_client import ZendeskClient
antivirus_client = AntivirusClient()
zendesk_client = ZendeskClient()
redis_client = RedisClient()
| alphagov/notifications-admin | app/extensions.py | Python | mit | 340 |
#!/usr/bin/env python
import pygame
pygame.display.init()
pygame.font.init()
modes_list = pygame.display.list_modes()
#screen = pygame.display.set_mode(modes_list[0], pygame.FULLSCREEN) # the highest resolution with fullscreen
screen = pygame.display.set_mode(modes_list[-1]) # the lowest resolution
background_color = (255, 255, 255)
screen.fill(background_color)
font = pygame.font.Font(pygame.font.get_default_font(), 22)
text_surface = font.render("Hello world!", True, (0,0,0))
screen.blit(text_surface, (0,0)) # paste the text at the top left corner of the window
pygame.display.flip() # display the image
while True: # main loop (event loop)
event = pygame.event.wait()
if(event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE)):
break
| jeremiedecock/snippets | python/pygame/hello_text.py | Python | mit | 882 |
import asyncio
import asyncio.subprocess
import datetime
import logging
from collections import OrderedDict, defaultdict
from typing import Any, Awaitable, Dict, List, Optional, Union # noqa
from urllib.parse import urlparse
from aiohttp import web
import yacron.version
from yacron.config import (
JobConfig,
parse_config,
ConfigError,
parse_config_string,
WebConfig,
)
from yacron.job import RunningJob, JobRetryState
from crontab import CronTab # noqa
logger = logging.getLogger("yacron")
WAKEUP_INTERVAL = datetime.timedelta(minutes=1)
def naturaltime(seconds: float, future=False) -> str:
assert future
if seconds < 120:
return "in {} second{}".format(
int(seconds), "s" if seconds >= 2 else ""
)
minutes = seconds / 60
if minutes < 120:
return "in {} minute{}".format(
int(minutes), "s" if minutes >= 2 else ""
)
hours = minutes / 60
if hours < 48:
return "in {} hour{}".format(int(hours), "s" if hours >= 2 else "")
days = hours / 24
return "in {} day{}".format(int(days), "s" if days >= 2 else "")
def get_now(timezone: Optional[datetime.tzinfo]) -> datetime.datetime:
return datetime.datetime.now(timezone)
def next_sleep_interval() -> float:
now = get_now(datetime.timezone.utc)
target = now.replace(second=0) + WAKEUP_INTERVAL
return (target - now).total_seconds()
def create_task(coro: Awaitable) -> asyncio.Task:
return asyncio.get_event_loop().create_task(coro)
def web_site_from_url(runner: web.AppRunner, url: str) -> web.BaseSite:
parsed = urlparse(url)
if parsed.scheme == "http":
assert parsed.hostname is not None
assert parsed.port is not None
return web.TCPSite(runner, parsed.hostname, parsed.port)
elif parsed.scheme == "unix":
return web.UnixSite(runner, parsed.path)
else:
logger.warning(
"Ignoring web listen url %s: scheme %r not supported",
url,
parsed.scheme,
)
raise ValueError(url)
class Cron:
def __init__(
self, config_arg: Optional[str], *, config_yaml: Optional[str] = None
) -> None:
# list of cron jobs we /want/ to run
self.cron_jobs = OrderedDict() # type: Dict[str, JobConfig]
# list of cron jobs already running
# name -> list of RunningJob
self.running_jobs = defaultdict(
list
) # type: Dict[str, List[RunningJob]]
self.config_arg = config_arg
if config_arg is not None:
self.update_config()
if config_yaml is not None:
# config_yaml is for unit testing
config, _, _ = parse_config_string(config_yaml, "")
self.cron_jobs = OrderedDict((job.name, job) for job in config)
self._wait_for_running_jobs_task = None # type: Optional[asyncio.Task]
self._stop_event = asyncio.Event()
self._jobs_running = asyncio.Event()
self.retry_state = {} # type: Dict[str, JobRetryState]
self.web_runner = None # type: Optional[web.AppRunner]
self.web_config = None # type: Optional[WebConfig]
async def run(self) -> None:
self._wait_for_running_jobs_task = create_task(
self._wait_for_running_jobs()
)
startup = True
while not self._stop_event.is_set():
try:
web_config = self.update_config()
await self.start_stop_web_app(web_config)
except ConfigError as err:
logger.error(
"Error in configuration file(s), so not updating "
"any of the config.:\n%s",
str(err),
)
except Exception: # pragma: nocover
logger.exception("please report this as a bug (1)")
await self.spawn_jobs(startup)
startup = False
sleep_interval = next_sleep_interval()
logger.debug("Will sleep for %.1f seconds", sleep_interval)
try:
await asyncio.wait_for(self._stop_event.wait(), sleep_interval)
except asyncio.TimeoutError:
pass
logger.info("Shutting down (after currently running jobs finish)...")
while self.retry_state:
cancel_all = [
self.cancel_job_retries(name) for name in self.retry_state
]
await asyncio.gather(*cancel_all)
await self._wait_for_running_jobs_task
if self.web_runner is not None:
logger.info("Stopping http server")
await self.web_runner.cleanup()
def signal_shutdown(self) -> None:
logger.debug("Signalling shutdown")
self._stop_event.set()
def update_config(self) -> Optional[WebConfig]:
if self.config_arg is None:
return None
config, web_config = parse_config(self.config_arg)
self.cron_jobs = OrderedDict((job.name, job) for job in config)
return web_config
async def _web_get_version(self, request: web.Request) -> web.Response:
return web.Response(text=yacron.version.version)
async def _web_get_status(self, request: web.Request) -> web.Response:
out = []
for name, job in self.cron_jobs.items():
running = self.running_jobs.get(name, None)
if running:
out.append(
{
"job": name,
"status": "running",
"pid": [
runjob.proc.pid
for runjob in running
if runjob.proc is not None
],
}
)
else:
crontab = job.schedule # type: Union[CronTab, str]
now = get_now(job.timezone)
out.append(
{
"job": name,
"status": "scheduled",
"scheduled_in": (
crontab.next(now=now, default_utc=job.utc)
if isinstance(crontab, CronTab)
else str(crontab)
),
}
)
if request.headers.get("Accept") == "application/json":
return web.json_response(out)
else:
lines = []
for jobstat in out: # type: Dict[str, Any]
if jobstat["status"] == "running":
status = "running (pid: {pid})".format(
pid=", ".join(str(pid) for pid in jobstat["pid"])
)
else:
status = "scheduled ({})".format(
(
jobstat["scheduled_in"]
if type(jobstat["scheduled_in"]) is str
else naturaltime(
jobstat["scheduled_in"], future=True
)
)
)
lines.append(
"{name}: {status}".format(
name=jobstat["job"], status=status
)
)
return web.Response(text="\n".join(lines))
async def _web_start_job(self, request: web.Request) -> web.Response:
name = request.match_info["name"]
try:
job = self.cron_jobs[name]
except KeyError:
raise web.HTTPNotFound()
await self.maybe_launch_job(job)
return web.Response()
async def start_stop_web_app(self, web_config: Optional[WebConfig]):
if self.web_runner is not None and (
web_config is None or web_config != self.web_config
):
# assert self.web_runner is not None
logger.info("Stopping http server")
await self.web_runner.cleanup()
self.web_runner = None
if (
web_config is not None
and web_config["listen"]
and self.web_runner is None
):
app = web.Application()
app.add_routes(
[
web.get("/version", self._web_get_version),
web.get("/status", self._web_get_status),
web.post("/jobs/{name}/start", self._web_start_job),
]
)
self.web_runner = web.AppRunner(app)
await self.web_runner.setup()
for addr in web_config["listen"]:
site = web_site_from_url(self.web_runner, addr)
logger.info("web: started listening on %s", addr)
try:
await site.start()
except ValueError:
pass
self.web_config = web_config
async def spawn_jobs(self, startup: bool) -> None:
for job in self.cron_jobs.values():
if self.job_should_run(startup, job):
await self.launch_scheduled_job(job)
@staticmethod
def job_should_run(startup: bool, job: JobConfig) -> bool:
if (
startup
and isinstance(job.schedule, str)
and job.schedule == "@reboot"
):
logger.debug(
"Job %s (%s) is scheduled for startup (@reboot)",
job.name,
job.schedule_unparsed,
)
return True
elif isinstance(job.schedule, CronTab):
crontab = job.schedule # type: CronTab
if crontab.test(get_now(job.timezone).replace(second=0)):
logger.debug(
"Job %s (%s) is scheduled for now",
job.name,
job.schedule_unparsed,
)
return True
else:
logger.debug(
"Job %s (%s) not scheduled for now",
job.name,
job.schedule_unparsed,
)
return False
else:
return False
async def launch_scheduled_job(self, job: JobConfig) -> None:
await self.cancel_job_retries(job.name)
assert job.name not in self.retry_state
retry = job.onFailure["retry"]
logger.debug("Job %s retry config: %s", job.name, retry)
if retry["maximumRetries"]:
retry_state = JobRetryState(
retry["initialDelay"],
retry["backoffMultiplier"],
retry["maximumDelay"],
)
self.retry_state[job.name] = retry_state
await self.maybe_launch_job(job)
async def maybe_launch_job(self, job: JobConfig) -> None:
if self.running_jobs[job.name]:
logger.warning(
"Job %s: still running and concurrencyPolicy is %s",
job.name,
job.concurrencyPolicy,
)
if job.concurrencyPolicy == "Allow":
pass
elif job.concurrencyPolicy == "Forbid":
return
elif job.concurrencyPolicy == "Replace":
for running_job in self.running_jobs[job.name]:
await running_job.cancel()
else:
raise AssertionError # pragma: no cover
logger.info("Starting job %s", job.name)
running_job = RunningJob(job, self.retry_state.get(job.name))
await running_job.start()
self.running_jobs[job.name].append(running_job)
logger.info("Job %s spawned", job.name)
self._jobs_running.set()
# continually watches for the running jobs, clean them up when they exit
async def _wait_for_running_jobs(self) -> None:
# job -> wait task
wait_tasks = {} # type: Dict[RunningJob, asyncio.Task]
while self.running_jobs or not self._stop_event.is_set():
try:
for jobs in self.running_jobs.values():
for job in jobs:
if job not in wait_tasks:
wait_tasks[job] = create_task(job.wait())
if not wait_tasks:
try:
await asyncio.wait_for(self._jobs_running.wait(), 1)
except asyncio.TimeoutError:
pass
continue
self._jobs_running.clear()
# wait for at least one task with timeout
done_tasks, _ = await asyncio.wait(
wait_tasks.values(),
timeout=1.0,
return_when=asyncio.FIRST_COMPLETED,
)
done_jobs = set()
for job, task in list(wait_tasks.items()):
if task in done_tasks:
done_jobs.add(job)
for job in done_jobs:
task = wait_tasks.pop(job)
try:
task.result()
except Exception: # pragma: no cover
logger.exception("please report this as a bug (2)")
jobs_list = self.running_jobs[job.config.name]
jobs_list.remove(job)
if not jobs_list:
del self.running_jobs[job.config.name]
fail_reason = job.fail_reason
logger.info(
"Job %s exit code %s; has stdout: %s, "
"has stderr: %s; fail_reason: %r",
job.config.name,
job.retcode,
str(bool(job.stdout)).lower(),
str(bool(job.stderr)).lower(),
fail_reason,
)
if fail_reason is not None:
await self.handle_job_failure(job)
else:
await self.handle_job_success(job)
except asyncio.CancelledError:
raise
except Exception: # pragma: no cover
logger.exception("please report this as a bug (3)")
await asyncio.sleep(1)
async def handle_job_failure(self, job: RunningJob) -> None:
if self._stop_event.is_set():
return
if job.stdout:
logger.info(
"Job %s STDOUT:\n%s", job.config.name, job.stdout.rstrip()
)
if job.stderr:
logger.info(
"Job %s STDERR:\n%s", job.config.name, job.stderr.rstrip()
)
await job.report_failure()
# Handle retries...
state = job.retry_state
if state is None or state.cancelled:
await job.report_permanent_failure()
return
logger.debug(
"Job %s has been retried %i times", job.config.name, state.count
)
if state.task is not None:
if state.task.done():
await state.task
else:
state.task.cancel()
retry = job.config.onFailure["retry"]
if (
state.count >= retry["maximumRetries"]
and retry["maximumRetries"] != -1
):
await self.cancel_job_retries(job.config.name)
await job.report_permanent_failure()
else:
retry_delay = state.next_delay()
state.task = create_task(
self.schedule_retry_job(
job.config.name, retry_delay, state.count
)
)
async def schedule_retry_job(
self, job_name: str, delay: float, retry_num: int
) -> None:
logger.info(
"Cron job %s scheduled to be retried (#%i) " "in %.1f seconds",
job_name,
retry_num,
delay,
)
await asyncio.sleep(delay)
try:
job = self.cron_jobs[job_name]
except KeyError:
logger.warning(
"Cron job %s was scheduled for retry, but "
"disappeared from the configuration",
job_name,
)
await self.maybe_launch_job(job)
async def handle_job_success(self, job: RunningJob) -> None:
await self.cancel_job_retries(job.config.name)
await job.report_success()
async def cancel_job_retries(self, name: str) -> None:
try:
state = self.retry_state.pop(name)
except KeyError:
return
state.cancelled = True
if state.task is not None:
if state.task.done():
await state.task
else:
state.task.cancel()
| gjcarneiro/yacron | yacron/cron.py | Python | mit | 16,835 |
import pytest
@pytest.fixture
def genetic_modification(testapp, lab, award):
item = {
'award': award['@id'],
'lab': lab['@id'],
'modified_site_by_coordinates': {
'assembly': 'GRCh38',
'chromosome': '11',
'start': 20000,
'end': 21000
},
'purpose': 'repression',
'category': 'deletion',
'method': 'CRISPR',
'zygosity': 'homozygous'
}
return testapp.post_json('/genetic_modification', item).json['@graph'][0]
@pytest.fixture
def genetic_modification_RNAi(testapp, lab, award):
item = {
'award': award['@id'],
'lab': lab['@id'],
'modified_site_by_coordinates': {
'assembly': 'GRCh38',
'chromosome': '11',
'start': 20000,
'end': 21000
},
'purpose': 'repression',
'category': 'deletion',
'method': 'RNAi'
}
return testapp.post_json('/genetic_modification', item).json['@graph'][0]
@pytest.fixture
def genetic_modification_source(testapp, lab, award, source, gene):
item = {
'lab': lab['@id'],
'award': award['@id'],
'category': 'insertion',
'introduced_gene': gene['@id'],
'purpose': 'expression',
'method': 'CRISPR',
'reagents': [
{
'source': source['@id'],
'identifier': 'sigma:ABC123'
}
]
}
return testapp.post_json('/genetic_modification', item).json['@graph'][0]
@pytest.fixture
def crispr_deletion(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'deletion',
'purpose': 'repression',
'method': 'CRISPR'
}
@pytest.fixture
def crispr_deletion_1(testapp, lab, award, target):
item = {
'lab': lab['@id'],
'award': award['@id'],
'category': 'deletion',
'purpose': 'repression',
'method': 'CRISPR',
'modified_site_by_target_id': target['@id'],
'guide_rna_sequences': ['ACCGGAGA']
}
return testapp.post_json('/genetic_modification', item).json['@graph'][0]
@pytest.fixture
def tale_deletion(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'deletion',
'purpose': 'repression',
'method': 'TALEN',
'zygosity': 'heterozygous'
}
@pytest.fixture
def crispr_tag(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'insertion',
'purpose': 'tagging',
'method': 'CRISPR'
}
@pytest.fixture
def bombardment_tag(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'insertion',
'purpose': 'tagging',
'nucleic_acid_delivery_method': ['bombardment']
}
@pytest.fixture
def recomb_tag(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'insertion',
'purpose': 'tagging',
'method': 'site-specific recombination'
}
@pytest.fixture
def transfection_tag(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'insertion',
'purpose': 'tagging',
'nucleic_acid_delivery_method': ['stable transfection']
}
@pytest.fixture
def crispri(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'interference',
'purpose': 'repression',
'method': 'CRISPR'
}
@pytest.fixture
def rnai(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'interference',
'purpose': 'repression',
'method': 'RNAi'
}
@pytest.fixture
def mutagen(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'mutagenesis',
'purpose': 'repression',
'method': 'mutagen treatment'
}
@pytest.fixture
def tale_replacement(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'replacement',
'purpose': 'characterization',
'method': 'TALEN',
'zygosity': 'heterozygous'
}
@pytest.fixture
def mpra(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'insertion',
'purpose': 'characterization',
'nucleic_acid_delivery_method': ['transduction']
}
@pytest.fixture
def starr_seq(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'episome',
'purpose': 'characterization',
'nucleic_acid_delivery_method': ['transient transfection']
}
@pytest.fixture
def introduced_elements(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'episome',
'purpose': 'characterization',
'nucleic_acid_delivery_method': ['transient transfection'],
'introduced_elements': 'genomic DNA regions'
}
@pytest.fixture
def crispr_tag_1(testapp, lab, award, ctcf):
item = {
'lab': lab['@id'],
'award': award['@id'],
'category': 'insertion',
'purpose': 'tagging',
'method': 'CRISPR',
'modified_site_by_gene_id': ctcf['@id'],
'introduced_tags': [{'name': 'mAID-mClover', 'location': 'C-terminal'}]
}
return testapp.post_json('/genetic_modification', item).json['@graph'][0]
@pytest.fixture
def mpra_1(testapp, lab, award):
item = {
'lab': lab['@id'],
'award': award['@id'],
'category': 'insertion',
'purpose': 'characterization',
'nucleic_acid_delivery_method': ['transduction'],
'introduced_elements': 'synthesized DNA',
'modified_site_nonspecific': 'random'
}
return testapp.post_json('/genetic_modification', item).json['@graph'][0]
@pytest.fixture
def recomb_tag_1(testapp, lab, award, target, treatment_5, document):
item = {
'lab': lab['@id'],
'award': award['@id'],
'category': 'insertion',
'purpose': 'tagging',
'method': 'site-specific recombination',
'modified_site_by_target_id': target['@id'],
'modified_site_nonspecific': 'random',
'category': 'insertion',
'treatments': [treatment_5['@id']],
'documents': [document['@id']],
'introduced_tags': [{'name': 'eGFP', 'location': 'C-terminal'}]
}
return testapp.post_json('/genetic_modification', item).json['@graph'][0]
@pytest.fixture
def rnai_1(testapp, lab, award, source, target):
item = {
'lab': lab['@id'],
'award': award['@id'],
'category': 'interference',
'purpose': 'repression',
'method': 'RNAi',
'reagents': [{'source': source['@id'], 'identifier': 'addgene:12345'}],
'rnai_sequences': ['ATTACG'],
'modified_site_by_target_id': target['@id']
}
return testapp.post_json('/genetic_modification', item).json['@graph'][0]
@pytest.fixture
def genetic_modification_1(lab, award):
return {
'modification_type': 'deletion',
'award': award['uuid'],
'lab': lab['uuid'],
'modifiction_description': 'some description'
}
@pytest.fixture
def genetic_modification_2(lab, award):
return {
'modification_type': 'deletion',
'award': award['uuid'],
'lab': lab['uuid'],
'modification_description': 'some description',
'modification_zygocity': 'homozygous',
'modification_purpose': 'tagging',
'modification_treatments': [],
'modification_genome_coordinates': [{
'chromosome': '11',
'start': 5309435,
'end': 5309451
}]
}
@pytest.fixture
def crispr_gm(lab, award, source):
return {
'lab': lab['uuid'],
'award': award['uuid'],
'source': source['uuid'],
'guide_rna_sequences': [
"ACA",
"GCG"
],
'insert_sequence': 'TCGA',
'aliases': ['encode:crispr_technique1'],
'@type': ['Crispr', 'ModificationTechnique', 'Item'],
'@id': '/crisprs/79c1ec08-c878-4419-8dba-66aa4eca156b/',
'uuid': '79c1ec08-c878-4419-8dba-66aa4eca156b'
}
@pytest.fixture
def genetic_modification_5(lab, award, crispr_gm):
return {
'modification_type': 'deletion',
'award': award['uuid'],
'lab': lab['uuid'],
'description': 'blah blah description blah',
'zygosity': 'homozygous',
'treatments': [],
'source': 'sigma',
'product_id': '12345',
'modification_techniques': [crispr_gm],
'modified_site': [{
'assembly': 'GRCh38',
'chromosome': '11',
'start': 5309435,
'end': 5309451
}]
}
@pytest.fixture
def genetic_modification_6(lab, award, crispr_gm, source):
return {
'purpose': 'validation',
'category': 'deeltion',
'award': award['uuid'],
'lab': lab['uuid'],
'description': 'blah blah description blah',
"method": "CRISPR",
"modified_site_by_target_id": "/targets/FLAG-ZBTB43-human/",
"reagents": [
{
"identifier": "placeholder_id",
"source": source['uuid']
}
]
}
@pytest.fixture
def genetic_modification_7_invalid_reagent(lab, award, crispr_gm):
return {
'purpose': 'characterization',
'category': 'deletion',
'award': award['uuid'],
'lab': lab['uuid'],
'description': 'blah blah description blah',
"method": "CRISPR",
"modified_site_by_target_id": "/targets/FLAG-ZBTB43-human/",
"reagents": [
{
"identifier": "placeholder_id",
"source": "/sources/sigma/"
}
]
}
@pytest.fixture
def genetic_modification_7_valid_reagent(lab, award, crispr_gm):
return {
'purpose': 'characterization',
'category': 'deletion',
'award': award['uuid'],
'lab': lab['uuid'],
'description': 'blah blah description blah',
"method": "CRISPR",
"modified_site_by_target_id": "/targets/FLAG-ZBTB43-human/",
"reagents": [
{
"identifier": "ABC123",
"source": "/sources/sigma/"
}
]
}
@pytest.fixture
def genetic_modification_7_addgene_source(testapp):
item = {
'name': 'addgene',
'title': 'Addgene',
'status': 'released'
}
return testapp.post_json('/source', item).json['@graph'][0]
@pytest.fixture
def genetic_modification_7_multiple_matched_identifiers(lab, award, crispr_gm):
return {
'purpose': 'characterization',
'category': 'deletion',
'award': award['uuid'],
'lab': lab['uuid'],
'description': 'blah blah description blah',
"method": "CRISPR",
"modified_site_by_target_id": "/targets/FLAG-ZBTB43-human/",
"reagents": [
{
"identifier": "12345",
"source": "/sources/addgene/"
}
]
}
@pytest.fixture
def genetic_modification_7_multiple_reagents(lab, award, crispr_gm):
return {
'purpose': 'characterization',
'category': 'deletion',
'award': award['uuid'],
'lab': lab['uuid'],
'description': 'blah blah description blah',
"method": "CRISPR",
"modified_site_by_target_id": "/targets/FLAG-ZBTB43-human/",
"reagents": [
{
"identifier": "12345",
"source": "/sources/addgene/",
"url": "http://www.addgene.org"
},
{
"identifier": "67890",
"source": "/sources/addgene/",
"url": "http://www.addgene.org"
}
]
}
@pytest.fixture
def genetic_modification_8(lab, award):
return {
'purpose': 'analysis',
'category': 'interference',
'award': award['uuid'],
'lab': lab['uuid'],
"method": "CRISPR",
}
@pytest.fixture
def construct_genetic_modification(
testapp,
lab,
award,
document,
target_ATF5_genes,
target_promoter):
item = {
'award': award['@id'],
'documents': [document['@id']],
'lab': lab['@id'],
'category': 'insertion',
'purpose': 'tagging',
'nucleic_acid_delivery_method': ['stable transfection'],
'introduced_tags': [{'name':'eGFP', 'location': 'C-terminal', 'promoter_used': target_promoter['@id']}],
'modified_site_by_target_id': target_ATF5_genes['@id']
}
return testapp.post_json('/genetic_modification', item).json['@graph'][0]
@pytest.fixture
def construct_genetic_modification_N(
testapp,
lab,
award,
document,
target):
item = {
'award': award['@id'],
'documents': [document['@id']],
'lab': lab['@id'],
'category': 'insertion',
'purpose': 'tagging',
'nucleic_acid_delivery_method': ['stable transfection'],
'introduced_tags': [{'name':'eGFP', 'location': 'N-terminal'}],
'modified_site_by_target_id': target['@id']
}
return testapp.post_json('/genetic_modification', item).json['@graph'][0]
@pytest.fixture
def interference_genetic_modification(
testapp,
lab,
award,
document,
target):
item = {
'award': award['@id'],
'documents': [document['@id']],
'lab': lab['@id'],
'category': 'interference',
'purpose': 'repression',
'method': 'RNAi',
'modified_site_by_target_id': target['@id']
}
return testapp.post_json('/genetic_modification', item).json['@graph'][0]
@pytest.fixture
def crispr_knockout(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'knockout',
'purpose': 'characterization',
'method': 'CRISPR'
}
@pytest.fixture
def recombination_knockout(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'knockout',
'purpose': 'repression',
'method': 'site-specific recombination',
'modified_site_by_coordinates': {
"assembly": "GRCh38",
"chromosome": "11",
"start": 60000,
"end": 62000
}
}
@pytest.fixture
def characterization_insertion_transfection(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'insertion',
'purpose': 'characterization',
'nucleic_acid_delivery_method': ['stable transfection'],
'modified_site_nonspecific': 'random',
'introduced_elements': 'synthesized DNA'
}
@pytest.fixture
def characterization_insertion_CRISPR(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'insertion',
'purpose': 'characterization',
'method': 'CRISPR',
'modified_site_nonspecific': 'random',
'introduced_elements': 'synthesized DNA'
}
@pytest.fixture
def disruption_genetic_modification(testapp, lab, award):
item = {
'lab': lab['@id'],
'award': award['@id'],
'category': 'CRISPR cutting',
'purpose': 'characterization',
'method': 'CRISPR'
}
return testapp.post_json('/genetic_modification', item).json['@graph'][0]
@pytest.fixture
def activation_genetic_modification(testapp, lab, award):
item = {
'lab': lab['@id'],
'award': award['@id'],
'category': 'CRISPRa',
'purpose': 'characterization',
'method': 'CRISPR'
}
return testapp.post_json('/genetic_modification', item).json['@graph'][0]
@pytest.fixture
def binding_genetic_modification(testapp, lab, award):
item = {
'lab': lab['@id'],
'award': award['@id'],
'category': 'CRISPR dCas',
'purpose': 'characterization',
'method': 'CRISPR'
}
return testapp.post_json('/genetic_modification', item).json['@graph'][0]
@pytest.fixture
def HR_knockout(lab, award, target):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'knockout',
'purpose': 'repression',
'method': 'homologous recombination',
'modified_site_by_target_id': target['@id']
}
@pytest.fixture
def CRISPR_introduction(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'insertion',
'purpose': 'expression',
'nucleic_acid_delivery_method': ['transient transfection']
}
@pytest.fixture
def genetic_modification_9(lab, award, human_donor_1):
return {
'lab': lab['@id'],
'award': award['@id'],
'donor': human_donor_1['@id'],
'category': 'insertion',
'purpose': 'expression',
'method': 'transient transfection'
}
@pytest.fixture
def transgene_insertion(testapp, lab, award, ctcf):
item = {
'lab': lab['@id'],
'award': award['@id'],
'category': 'insertion',
'purpose': 'in vivo enhancer characterization',
'nucleic_acid_delivery_method': ['mouse pronuclear microinjection'],
'modified_site_by_gene_id': ctcf['@id'],
'introduced_sequence': 'ATCGTA'
}
return testapp.post_json('/genetic_modification', item).json['@graph'][0]
@pytest.fixture
def guides_transduction_GM(testapp, lab, award):
item = {
'lab': lab['@id'],
'award': award['@id'],
'category': 'insertion',
'purpose': 'expression',
'nucleic_acid_delivery_method': ['transduction'],
'introduced_elements': 'gRNAs and CRISPR machinery',
'MOI': 'high',
'guide_type': 'sgRNA'
}
return testapp.post_json('/genetic_modification', item).json['@graph'][0]
@pytest.fixture
def genetic_modification_10(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'insertion',
'purpose': 'expression',
'nucleic_acid_delivery_method': ['transduction'],
'introduced_elements': 'gRNAs and CRISPR machinery',
}
@pytest.fixture
def genetic_modification_11(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'disruption',
'purpose': 'characterization',
'method': 'CRISPR'
}
@pytest.fixture
def transgene_insertion_2(testapp, lab, award, ctcf):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'transgene insertion',
'purpose': 'in vivo enhancer characterization',
'nucleic_acid_delivery_method': ['mouse pronuclear microinjection'],
'modified_site_by_gene_id': ctcf['@id'],
'introduced_sequence': 'ATCGTA'
}
@pytest.fixture
def activation_genetic_modification_2(testapp, lab, award):
return{
'lab': lab['@id'],
'award': award['@id'],
'category': 'activation',
'purpose': 'characterization',
'method': 'CRISPR'
}
@pytest.fixture
def binding_genetic_modification_2(testapp, lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'binding',
'purpose': 'characterization',
'method': 'CRISPR'
}
| ENCODE-DCC/encoded | src/encoded/tests/fixtures/schemas/genetic_modification.py | Python | mit | 19,673 |
from slm_lab.env.vec_env import make_gym_venv
import numpy as np
import pytest
@pytest.mark.parametrize('name,state_shape,reward_scale', [
('PongNoFrameskip-v4', (1, 84, 84), 'sign'),
('LunarLander-v2', (8,), None),
('CartPole-v0', (4,), None),
])
@pytest.mark.parametrize('num_envs', (1, 4))
def test_make_gym_venv_nostack(name, num_envs, state_shape, reward_scale):
seed = 0
frame_op = None
frame_op_len = None
venv = make_gym_venv(name, num_envs, seed, frame_op=frame_op, frame_op_len=frame_op_len, reward_scale=reward_scale)
venv.reset()
for i in range(5):
state, reward, done, info = venv.step([venv.action_space.sample()] * num_envs)
assert isinstance(state, np.ndarray)
assert state.shape == (num_envs,) + state_shape
assert isinstance(reward, np.ndarray)
assert reward.shape == (num_envs,)
assert isinstance(done, np.ndarray)
assert done.shape == (num_envs,)
assert len(info) == num_envs
venv.close()
@pytest.mark.parametrize('name,state_shape, reward_scale', [
('PongNoFrameskip-v4', (1, 84, 84), 'sign'),
('LunarLander-v2', (8,), None),
('CartPole-v0', (4,), None),
])
@pytest.mark.parametrize('num_envs', (1, 4))
def test_make_gym_concat(name, num_envs, state_shape, reward_scale):
seed = 0
frame_op = 'concat' # used for image, or for concat vector
frame_op_len = 4
venv = make_gym_venv(name, num_envs, seed, frame_op=frame_op, frame_op_len=frame_op_len, reward_scale=reward_scale)
venv.reset()
for i in range(5):
state, reward, done, info = venv.step([venv.action_space.sample()] * num_envs)
assert isinstance(state, np.ndarray)
stack_shape = (num_envs, frame_op_len * state_shape[0],) + state_shape[1:]
assert state.shape == stack_shape
assert isinstance(reward, np.ndarray)
assert reward.shape == (num_envs,)
assert isinstance(done, np.ndarray)
assert done.shape == (num_envs,)
assert len(info) == num_envs
venv.close()
@pytest.mark.skip(reason='Not implemented yet')
@pytest.mark.parametrize('name,state_shape,reward_scale', [
('LunarLander-v2', (8,), None),
('CartPole-v0', (4,), None),
])
@pytest.mark.parametrize('num_envs', (1, 4))
def test_make_gym_stack(name, num_envs, state_shape, reward_scale):
seed = 0
frame_op = 'stack' # used for rnn
frame_op_len = 4
venv = make_gym_venv(name, num_envs, seed, frame_op=frame_op, frame_op_len=frame_op_len, reward_scale=reward_scale)
venv.reset()
for i in range(5):
state, reward, done, info = venv.step([venv.action_space.sample()] * num_envs)
assert isinstance(state, np.ndarray)
stack_shape = (num_envs, frame_op_len,) + state_shape
assert state.shape == stack_shape
assert isinstance(reward, np.ndarray)
assert reward.shape == (num_envs,)
assert isinstance(done, np.ndarray)
assert done.shape == (num_envs,)
assert len(info) == num_envs
venv.close()
@pytest.mark.parametrize('name,state_shape,image_downsize', [
('PongNoFrameskip-v4', (1, 84, 84), (84, 84)),
('PongNoFrameskip-v4', (1, 64, 64), (64, 64)),
])
@pytest.mark.parametrize('num_envs', (1, 4))
def test_make_gym_venv_downsize(name, num_envs, state_shape, image_downsize):
seed = 0
frame_op = None
frame_op_len = None
venv = make_gym_venv(name, num_envs, seed, frame_op=frame_op, frame_op_len=frame_op_len, image_downsize=image_downsize)
venv.reset()
for i in range(5):
state, reward, done, info = venv.step([venv.action_space.sample()] * num_envs)
assert isinstance(state, np.ndarray)
assert state.shape == (num_envs,) + state_shape
assert isinstance(reward, np.ndarray)
assert reward.shape == (num_envs,)
assert isinstance(done, np.ndarray)
assert done.shape == (num_envs,)
assert len(info) == num_envs
venv.close()
| kengz/Unity-Lab | test/env/test_vec_env.py | Python | mit | 3,861 |
"""Classification-based test and kernel two-sample test.
Author: Sandro Vega-Pons, Emanuele Olivetti.
"""
import os
import numpy as np
from sklearn.metrics import pairwise_distances, confusion_matrix
from sklearn.metrics import pairwise_kernels
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, KFold, cross_val_score
from sklearn.grid_search import GridSearchCV
from kernel_two_sample_test import MMD2u, compute_null_distribution
from kernel_two_sample_test import compute_null_distribution_given_permutations
import matplotlib.pylab as plt
from joblib import Parallel, delayed
def compute_rbf_kernel_matrix(X):
"""Compute the RBF kernel matrix with sigma2 as the median pairwise
distance.
"""
sigma2 = np.median(pairwise_distances(X, metric='euclidean'))**2
K = pairwise_kernels(X, X, metric='rbf', gamma=1.0/sigma2, n_jobs=-1)
return K
def balanced_accuracy_scoring(clf, X, y):
"""Scoring function that computes the balanced accuracy to be used
internally in the cross-validation procedure.
"""
y_pred = clf.predict(X)
conf_mat = confusion_matrix(y, y_pred)
bal_acc = 0.
for i in range(len(conf_mat)):
bal_acc += (float(conf_mat[i, i])) / np.sum(conf_mat[i])
bal_acc /= len(conf_mat)
return bal_acc
def compute_svm_cv(K, y, C=100.0, n_folds=5,
scoring=balanced_accuracy_scoring):
"""Compute cross-validated score of SVM with given precomputed kernel.
"""
cv = StratifiedKFold(y, n_folds=n_folds)
clf = SVC(C=C, kernel='precomputed', class_weight='auto')
scores = cross_val_score(clf, K, y,
scoring=scoring, cv=cv)
return scores.mean()
def compute_svm_subjects(K, y, n_folds=5):
"""
"""
cv = KFold(len(K)/2, n_folds)
scores = np.zeros(n_folds)
for i, (train, test) in enumerate(cv):
train_ids = np.concatenate((train, len(K)/2+train))
test_ids = np.concatenate((test, len(K)/2+test))
clf = SVC(kernel='precomputed')
clf.fit(K[train_ids, :][:, train_ids], y[train_ids])
scores[i] = clf.score(K[test_ids, :][:, train_ids], y[test_ids])
return scores.mean()
def permutation_subjects(y):
"""Permute class labels of Contextual Disorder dataset.
"""
y_perm = np.random.randint(0, 2, len(y)/2)
y_perm = np.concatenate((y_perm, np.logical_not(y_perm).astype(int)))
return y_perm
def permutation_subjects_ktst(y):
"""Permute class labels of Contextual Disorder dataset for KTST.
"""
yp = np.random.randint(0, 2, len(y)/2)
yp = np.concatenate((yp, np.logical_not(yp).astype(int)))
y_perm = np.arange(len(y))
for i in range(len(y)/2):
if yp[i] == 1:
y_perm[i] = len(y)/2+i
y_perm[len(y)/2+i] = i
return y_perm
def compute_svm_score_nestedCV(K, y, n_folds,
scoring=balanced_accuracy_scoring,
random_state=None,
param_grid=[{'C': np.logspace(-5, 5, 25)}]):
"""Compute cross-validated score of SVM using precomputed kernel.
"""
cv = StratifiedKFold(y, n_folds=n_folds, shuffle=True,
random_state=random_state)
scores = np.zeros(n_folds)
for i, (train, test) in enumerate(cv):
cvclf = SVC(kernel='precomputed')
y_train = y[train]
cvcv = StratifiedKFold(y_train, n_folds=n_folds,
shuffle=True,
random_state=random_state)
clf = GridSearchCV(cvclf, param_grid=param_grid, scoring=scoring,
cv=cvcv, n_jobs=1)
clf.fit(K[train, :][:, train], y_train)
# print clf.best_params_
scores[i] = clf.score(K[test, :][:, train], y[test])
return scores.mean()
def apply_svm(K, y, n_folds=5, iterations=10000, subjects=False, verbose=True,
random_state=None):
"""
Compute the balanced accuracy, its null distribution and the p-value.
Parameters:
----------
K: array-like
Kernel matrix
y: array_like
class labels
cv: Number of folds in the stratified cross-validation
verbose: bool
Verbosity
Returns:
-------
acc: float
Average balanced accuracy.
acc_null: array
Null distribution of the balanced accuracy.
p_value: float
p-value
"""
# Computing the accuracy
param_grid = [{'C': np.logspace(-5, 5, 20)}]
if subjects:
acc = compute_svm_subjects(K, y, n_folds)
else:
acc = compute_svm_score_nestedCV(K, y, n_folds, param_grid=param_grid,
random_state=random_state)
if verbose:
print("Mean balanced accuracy = %s" % (acc))
print("Computing the null-distribution.")
# Computing the null-distribution
# acc_null = np.zeros(iterations)
# for i in range(iterations):
# if verbose and (i % 1000) == 0:
# print(i),
# stdout.flush()
# y_perm = np.random.permutation(y)
# acc_null[i] = compute_svm_score_nestedCV(K, y_perm, n_folds,
# param_grid=param_grid)
# if verbose:
# print ''
# Computing the null-distribution
if subjects:
yis = [permutation_subjects(y) for i in range(iterations)]
acc_null = Parallel(n_jobs=-1)(delayed(compute_svm_subjects)(K, yis[i], n_folds) for i in range(iterations))
else:
yis = [np.random.permutation(y) for i in range(iterations)]
acc_null = Parallel(n_jobs=-1)(delayed(compute_svm_score_nestedCV)(K, yis[i], n_folds, scoring=balanced_accuracy_scoring, param_grid=param_grid) for i in range(iterations))
# acc_null = Parallel(n_jobs=-1)(delayed(compute_svm_cv)(K, yis[i], C=100., n_folds=n_folds) for i in range(iterations))
p_value = max(1.0 / iterations, (acc_null > acc).sum()
/ float(iterations))
if verbose:
print("p-value ~= %s \t (resolution : %s)" % (p_value, 1.0/iterations))
return acc, acc_null, p_value
def apply_ktst(K, y, iterations=10000, subjects=False, verbose=True):
"""
Compute MMD^2_u, its null distribution and the p-value of the
kernel two-sample test.
Parameters:
----------
K: array-like
Kernel matrix
y: array_like
class labels
verbose: bool
Verbosity
Returns:
-------
mmd2u: float
MMD^2_u value.
acc_null: array
Null distribution of the MMD^2_u
p_value: float
p-value
"""
assert len(np.unique(y)) == 2, 'KTST only works on binary problems'
# Assuming that the first m rows of the kernel matrix are from one
# class and the other n rows from the second class.
m = len(y[y == 0])
n = len(y[y == 1])
mmd2u = MMD2u(K, m, n)
if verbose:
print("MMD^2_u = %s" % mmd2u)
print("Computing the null distribution.")
if subjects:
perms = [permutation_subjects_ktst(y) for i in range(iterations)]
mmd2u_null = compute_null_distribution_given_permutations(K, m, n,
perms,
iterations)
else:
mmd2u_null = compute_null_distribution(K, m, n, iterations,
verbose=verbose)
p_value = max(1.0/iterations, (mmd2u_null > mmd2u).sum()
/ float(iterations))
if verbose:
print("p-value ~= %s \t (resolution : %s)" % (p_value, 1.0/iterations))
return mmd2u, mmd2u_null, p_value
def plot_null_distribution(stats, stats_null, p_value, data_name='',
stats_name='$MMD^2_u$', save_figure=True):
"""Plot the observed value for the test statistic, its null
distribution and p-value.
"""
fig = plt.figure()
ax = fig.add_subplot(111)
prob, bins, patches = plt.hist(stats_null, bins=50, normed=True)
ax.plot(stats, prob.max()/30, 'w*', markersize=15,
markeredgecolor='k', markeredgewidth=2,
label="%s = %s" % (stats_name, stats))
ax.annotate('p-value: %s' % (p_value),
xy=(float(stats), prob.max()/9.), xycoords='data',
xytext=(-105, 30), textcoords='offset points',
bbox=dict(boxstyle="round", fc="1."),
arrowprops={"arrowstyle": "->",
"connectionstyle": "angle,angleA=0,angleB=90,rad=10"},
)
plt.xlabel(stats_name)
plt.ylabel('p(%s)' % stats_name)
plt.legend(numpoints=1)
plt.title('Data: %s' % data_name)
if save_figure:
save_dir = 'figures'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
stn = 'ktst' if stats_name == '$MMD^2_u$' else 'clf'
fig_name = os.path.join(save_dir, '%s_%s.pdf' % (data_name, stn))
fig.savefig(fig_name)
| emanuele/jstsp2015 | classif_and_ktst.py | Python | mit | 9,044 |
from collections import namedtuple
Resolution = namedtuple('Resolution', ['x', 'y'])
class Resolutions(object):
resolutions = [
(1920, 1200),
(1920, 1080),
(1680, 1050),
(1440, 900),
(1360, 768),
(1280, 800),
(1024, 640)
]
@classmethod
def parse(self, x, y):
if (x,y) not in self.resolutions:
resolutions = ', '.join(['%sx%s' % (a, b) for a,b in self.resolutions])
raise Exception('Resolution %s x %s not supported. Available resolutions: %s' % (x,y, resolutions) )
return Resolution(x, y)
class Color(object):
gray = (0.15, 0.15, 0.13, 1.0)
black = (0.0, 0.0, 0.0, 1.0)
white = (1.0, 1.0, 1.0, 1.0)
red = (1.0, 0.2, 0.0, 1.0)
orange = (1.0, 0.4, 0.0, 1.0)
yellow = (1.0, 0.9, 0.0, 1.0)
light_green = (0.4, 1.0, 0.0, 1.0)
green = (0.0, 1.0, 0.2, 1.0)
cyan = (0.0, 1.0, 0.4, 1.0)
light_blue = (0.0, 0.6, 1.0, 1.0)
blue = (0.0, 0.2, 1.0, 1.0)
purple = (0.4, 0.0, 1.0, 1.0)
pink = (1.0, 0.0, 0.8, 1.0)
@classmethod
def __colors(self):
return [key for key in self.__dict__.keys() if not key.startswith('_') and key != 'named']
@classmethod
def named(self, name):
if not hasattr(self, name):
colors = ', '.join(self.__colors())
raise Exception('Unknown color %s. Available colors are: %s' % (name, colors))
return getattr(self, name)
def try_parse(value):
try: return int(value)
except: return { 'true': True, 'false': False }.get(value.lower(), value)
def read_config():
with open('config.cfg', 'r') as cfg_file:
lines = cfg_file.readlines()
lines = [
line.strip().replace(' ', '').split('=')
for line in lines
if line.strip() and '=' in line
]
cfg = {key:try_parse(value) for key,value in lines}
return cfg
cfg = read_config()
NUM_CELLS = cfg.get('CELLS', 100)
RESOLUTION = Resolutions.parse(cfg.get('WINDOW_WIDTH', 1280), cfg.get('WINDOW_HEIGHT', 800))
limit = min(RESOLUTION)
PIXEL_PER_CELL = limit / NUM_CELLS
OFFSET_X = (RESOLUTION.x - (NUM_CELLS * PIXEL_PER_CELL)) / 2
OFFSET_Y = (RESOLUTION.y - (NUM_CELLS * PIXEL_PER_CELL)) / 2
SHOW_FULLSCREEN = cfg.get('FULLSCREEN', False)
SHOW_GRID = cfg.get('SHOW_GRID', True)
BACKGROUND_COLOR = Color.named(cfg.get('BACKGROUND_COLOR', 'black'))
GRID_BACKDROP_COLOR = Color.named(cfg.get('GRID_BACKDROP_COLOR', 'gray'))
GRID_LINE_COLOR = Color.named(cfg.get('GRID_LINE_COLOR', 'black'))
CELL_COLOR = Color.named(cfg.get('CELL_COLOR', 'green'))
CURSOR_COLOR = Color.named(cfg.get('CURSOR_COLOR', 'red')) | cessor/gameoflife | config.py | Python | mit | 2,820 |
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.nic.pw/status_available
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisNicPwStatusAvailable(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.nic.pw/status_available.txt"
host = "whois.nic.pw"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, [])
def test_available(self):
eq_(self.record.available, True)
def test_domain(self):
eq_(self.record.domain, None)
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(self.record.nameservers, [])
def test_admin_contacts(self):
eq_(self.record.admin_contacts.__class__.__name__, 'list')
eq_(self.record.admin_contacts, [])
def test_registered(self):
eq_(self.record.registered, False)
def test_created_on(self):
eq_(self.record.created_on, None)
def test_registrar(self):
eq_(self.record.registrar, None)
def test_registrant_contacts(self):
eq_(self.record.registrant_contacts.__class__.__name__, 'list')
eq_(self.record.registrant_contacts, [])
def test_technical_contacts(self):
eq_(self.record.technical_contacts.__class__.__name__, 'list')
eq_(self.record.technical_contacts, [])
def test_updated_on(self):
eq_(self.record.updated_on, None)
def test_domain_id(self):
eq_(self.record.domain_id, None)
def test_expires_on(self):
eq_(self.record.expires_on, None)
def test_disclaimer(self):
eq_(self.record.disclaimer, None)
| huyphan/pyyawhois | test/record/parser/test_response_whois_nic_pw_status_available.py | Python | mit | 2,000 |
class Solution(object):
def missingNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
xor = len(nums)
for i, n in enumerate(nums):
xor ^= n
xor ^= i
return xor
inputs = [
[0],
[1],
[3,0,1],
[9,6,4,2,3,5,7,0,1]
]
s = Solution()
for i in inputs:
print s.missingNumber(i)
| daicang/Leetcode-solutions | 268-missing-number.py | Python | mit | 388 |
import _plotly_utils.basevalidators
class MinexponentValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="minexponent", parent_name="choropleth.colorbar", **kwargs
):
super(MinexponentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/choropleth/colorbar/_minexponent.py | Python | mit | 477 |
import urllib
import urllib2
from bs4 import BeautifulSoup
textToSearch = 'gorillaz'
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html)
for vid in soup.findAll(attrs={'class':'yt-uix-tile-link'}):
print 'https://www.youtube.com' + vid['href']
| arbakker/yt-daemon | search_yt.py | Python | mit | 380 |
########################################
# Automatically generated, do not edit.
########################################
from pyvisdk.thirdparty import Enum
DatastoreSummaryMaintenanceModeState = Enum(
'enteringMaintenance',
'inMaintenance',
'normal',
)
| xuru/pyvisdk | pyvisdk/enums/datastore_summary_maintenance_mode_state.py | Python | mit | 272 |
from math import floor
def score_syntax_errors(program_lines):
points = {')': 3, ']': 57, '}': 1197, '>': 25137}
s = 0
scores_auto = []
for line in program_lines:
corrupted, stack = corrupted_character(line)
if corrupted:
s += points[corrupted]
else:
scores_auto.append(score_autocomplete(stack))
return s, sorted(scores_auto)[floor(len(scores_auto)/2)]
def corrupted_character(inp):
stack = []
lookup = {'(': ')', '[': ']', '{': '}', '<': '>'}
lookup_close = {v: k for k, v in lookup.items()}
def stack_converter(st):
return [lookup[element] for element in st[::-1]]
for char in inp:
if char in lookup:
stack.append(char)
elif char in lookup_close:
expected = stack.pop()
if expected != lookup_close[char]:
return char, stack_converter(stack)
else:
print(f"INVALID {char}")
return None, stack_converter(stack)
def score_autocomplete(stack):
points_autocomplete = {')': 1, ']': 2, '}': 3, '>': 4}
s_auto = 0
for char in stack:
s_auto *= 5
s_auto += points_autocomplete[char]
return s_auto
def test_corrupted_character():
assert corrupted_character('{([(<{}[<>[]}>{[]{[(<()>')[0] == '}'
assert corrupted_character('[[<[([]))<([[{}[[()]]]')[0] == ')'
assert corrupted_character('[{[{({}]{}}([{[{{{}}([]')[0] == ']'
assert corrupted_character('[<(<(<(<{}))><([]([]()')[0] == ')'
assert corrupted_character('<{([([[(<>()){}]>(<<{{')[0] == '>'
def test_score_syntax_errors():
assert score_syntax_errors(open('input/10.test').read().splitlines()) == (26397, 288957)
def test_corrupted_character_stack():
assert corrupted_character('[({(<(())[]>[[{[]{<()<>>')[1] == ['}', '}', ']', ']', ')', '}', ')', ']']
def test_scoring_autocomplete():
assert score_autocomplete('}}]])})]') == 288957
assert score_autocomplete(')}>]})') == 5566
assert score_autocomplete('}}>}>))))') == 1480781
if __name__ == '__main__':
print(score_syntax_errors(open('input/10').read().splitlines()))
| matslindh/codingchallenges | adventofcode2021/day10.py | Python | mit | 2,156 |
#!/usr/bin/python
#
# Config file test app (together with test.cfg file)
#
import os, sys
sys.path.append("..")
import configfile
cfg = configfile.ConfigFile("test.cfg")
cfg.setCfgValue("name1", "value1")
cfg.setCfgValue("name2", "value2")
cfg.selectSection("user")
cfg.setCfgValue("username", "janis")
cfg.setCfgValue("acceptable_names", ["john", "janis"])
cfg.load()
print cfg.cfg.options("main")
print cfg.cfg.options("user")
print cfg.getCfgValue("username")
print type(cfg.getCfgValue("username"))
print cfg.getCfgValueAsList("acceptable_names")
print cfg.getCfgValueAsList("list_in_list")
cfg.selectSection("main")
print cfg.getCfgValueAsInt("a_number")
print type(cfg.getCfgValueAsInt("a_number"))
print cfg.getCfgValueAsBool("a_bool")
print type(cfg.getCfgValueAsBool("a_bool"))
cfg.filename = "test-mod.cfg"
cfg.selectSection("main")
cfg.setCfgValue("name1", "value1mod2")
cfg.setCfgValue("a_number", 14)
cfg.selectSection("user")
cfg.setCfgValue("acceptable_names", ["john", "janis", "ivan"])
cfg.setCfgValue("list_in_list2", ["[baz]", "[foo, bar]"])
cfg.setCfgValue("list_in_list3", ["first", "[second-one, second-third]"])
cfg.save()
| IECS/MansOS | tools/lib/tests/configtest.py | Python | mit | 1,154 |
#!/usr/bin/env python3
"""
My radio server application
For my eyes only
"""
#CREATE TABLE Radio(id integer primary key autoincrement, radio text, genre text, url text);
uuid='56ty66ba-6kld-9opb-ak29-0t7f5d294686'
# Import CherryPy global namespace
import os
import sys
import time
import socket
import cherrypy
import sqlite3 as lite
import re
import subprocess
from random import shuffle
# Globals
version = "4.2.1"
database = "database.db"
player = 'omxplayer'
header = '''<!DOCTYPE html>
<html lang="en">
<head>
<title>My Radio Web Server</title>
<meta name="generator" content="Vim">
<meta charset="UTF-8">
<link rel="icon" type="image/png" href="/static/css/icon.png" />
<meta name="viewport" content="width=device-width, initial-scale=1">
<script src="/static/js/jquery-2.0.3.min.js"></script>
<script src="/static/js/bootstrap.min.js"></script>
<link rel="stylesheet" href="/static/css/bootstrap.min.css">
<!-- Custom styles for this template -->
<link href="/static/css/sticky-footer.css" rel="stylesheet">
<style media="screen" type="text/css">
#radio-playing { display: none; }
#radio-table { display: none; }
#radio-volume { display: none; }
.jumbotron { padding: 10px 10px; }
</style>
<script type="text/javascript">
function fmodradio(rid) {
$.post('/m/', {id: rid},
function(data){
$("#radio-table").html(data);
$("#radio-table").show();
},
"html"
);
}
function fdelradio(rid) {
var r = confirm("DELETING " + rid);
if (r != true) { return; }
$.post('/d/', {id: rid},
function(data){
$("#radio-table").html(data);
$("#radio-table").show();
},
"html"
);
}
function fplayradio(rid) {
$.post('/p/', {id: rid},
function(data){
$("#radio-playing").html(data);
$("#radio-playing").show();
$("#radio-volume").hide();
},
"html"
);
}
function faddfav(i, g) {
$.post('/haddfav/', {id: i},
function(data){
$("#radio-playing").html(data);
$("#radio-playing").show();
$("#radio-volume").hide();
},
"html"
);
}
function fvolradio(updown) {
$.post('/v/', {vol: updown},
function(data){
$("#radio-volume").html(data);
$("#radio-volume").show();
},
"html"
);
}
function fkilradio() {
$.post('/k/',
function(data){
$("#radio-volume").html(data);
$("#radio-volume").show();
},
"html"
);
}
function fsearch(nam, gen) {
$.post('/g/', {name: nam, genre: gen},
function(data) {
$("#radio-table").html(data);
$("#radio-table").show();
},
"html"
);
}
function frandom(n, g) {
$.post('/g/', {name: n, genre: g, randomlist:'true'},
function(data){
$("#radio-table").html(data);
$("#radio-table").show();
},
"html"
);
}
// ----------------------------------------------------------
$(document).ready(function() {
$('body').on('click', '#button-modify', function(e) {
i = $("#idm").val()
n = $("#namem").val()
g = $("#genrem").val()
u = $("#urlm").val()
$.post("/f/", {id: i, name: n, genre: g, url: u})
.done(function(data) {
$("#radio-table").html(data);
$("#radio-table").show();
});
e.preventDefault();
});
$('#namem').keyup(function(e){
if(e.keyCode == 13) {
$('#button-modify').click();
}
});
$('#genrem').keyup(function(e){
if(e.keyCode == 13) {
$('#button-modify').click();
}
});
$('#urlm').keyup(function(e){
if(e.keyCode == 13) {
$('#button-modify').click();
}
});
$('#button-search').click(function(e) {
n = $("#name").val()
g = $("#genre").val()
$.post("/g/", {name: n, genre: g})
.done(function(data) {
$("#radio-table").html(data);
$("#radio-table").show();
});
e.preventDefault();
});
$('#name').keyup(function(e){
if(e.keyCode == 13) {
$('#button-search').click();
}
});
$('#genre').keyup(function(e){
if(e.keyCode == 13) {
$('#button-search').click();
}
});
$("#button-insert").click(function(e) {
n = $("#namei").val()
g = $("#genrei").val()
u = $("#urli").val()
$.post("/i/", {name: n, genre: g, url: u})
.done(function(data) {
$("#radio-table").html(data);
$("#radio-table").show();
});
e.preventDefault();
});
$("#play-radio").click(function(e) {
i = $("#idp").val()
$.post("/p/", {id: i})
.done(function(data) {
$("#radio-playing").html(data);
$("#radio-playing").show();
});
e.preventDefault();
});
});
</script>
</head>
<body>
<div class="container-fluid">
<div class='jumbotron'>
<h2><a href="/">Radio</a>
<a href="#" onClick="fvolradio('down')"><span class="glyphicon glyphicon-volume-down"></span></a>
<a href="#" onClick="fvolradio('up')"><span class="glyphicon glyphicon-volume-up"></span></a>
<a href="#" onClick="fkilradio('up')"> <span class="glyphicon glyphicon-record"></span></a>
</h2>
<p>
<div class="form-group">
<input type="text" id="name" name="name" placeholder="radio to search">
<input type="text" id="genre" name="genre" placeholder="genre" >
<button id="button-search">Search</button>
</div>
</p>
<p>
<div class="form-group">
<input type="text" id="namei" name="name" placeholder="Radio Name">
<input type="text" id="genrei" name="genre" placeholder="genre">
<input type="text" id="urli" name="url" placeholder="http://radio.com/stream.mp3">
<button id="button-insert">Insert</button>
<p>
[
<a href="#" onClick="fsearch('', 'rai')"> rai </a>|
<a href="#" onClick="fsearch('','fav')"> fav </a> |
<a href="#" onClick="fsearch('','rmc')"> rmc </a> |
<a href="#" onClick="fsearch('','class')"> class </a> |
<a href="#" onClick="fsearch('','jazz')"> jazz </a> |
<a href="#" onClick="fsearch('','chill')"> chill </a> |
<a href="#" onClick="fsearch('','nl')"> nl </a> |
<a href="#" onClick="fsearch('','bbc')"> bbc </a> |
<a href="#" onClick="fsearch('','uk')"> uk </a> |
<a href="#" onClick="fsearch('','italy')"> italy </a>
]
</p>
</div>
<small><div id="radio-playing"> </div></small>
</br>
</div> <!-- Jumbotron END -->
<div id="radio-volume"> </div>
<div id="radio-table"> </div>
'''
footer = '''<p></div></body></html>'''
def isplayfile(pathname) :
if os.path.isfile(pathname) == False:
return False
ext = os.path.splitext(pathname)[1]
ext = ext.lower()
if (ext == '.mp2') : return True;
if (ext == '.mp3') : return True;
if (ext == '.ogg') : return True;
return False
# ------------------------ AUTHENTICATION --------------------------------
from cherrypy.lib import auth_basic
# Password is: webradio
users = {'admin':'29778a9bdb2253dd8650a13b8e685159'}
def validate_password(self, login, password):
if login in users :
if encrypt(password) == users[login] :
cherrypy.session['username'] = login
cherrypy.session['database'] = userdatabase(login)
return True
return False
def encrypt(pw):
from hashlib import md5
return md5(pw).hexdigest()
# ------------------------ CLASS --------------------------------
class Root:
@cherrypy.expose
def index(self):
html = header
(_1, _2, id) = getradio('0')
(radio, genre, url) = getradio(id)
if id != 0:
html += '''<h3><a href="#" onClick="fplayradio('%s')"> ''' % id
html += '''Play Last Radio %s <span class="glyphicon glyphicon-play"></span></a></h3>''' % radio
html += getfooter()
return html
@cherrypy.expose
def music(self, directory='/mnt/Media/Music/'):
html = header
count = 0
html += '''<table class="table table-condensed">'''
filelist = os.listdir(directory)
filelist.sort()
for f in filelist:
file = os.path.join(directory, f)
html += '''<tr>'''
if isplayfile(file):
html += '''<td ><a href="#" onClick="fplayradio('%s')">''' % file
html += '''Play %s<span class="glyphicon glyphicon-play"></span></a></td>''' % (file)
if os.path.isdir(file):
html += '''<td ><a href="/music?directory=%s">%s</a> </td>''' % (file, f)
html += '''</tr>'''
count += 1
html += '''</table>'''
html += '''</div> </div>'''
html += getfooter()
return html
@cherrypy.expose
def g(self, name="", genre="", randomlist='false'):
list = searchradio(name.decode('utf8'), genre)
count = 0
# Randomlist
if randomlist == 'true' : shuffle(list)
listhtml = '''<table class="table table-condensed">'''
for id,radio,gen,url in list:
listhtml += '''<tr>'''
listhtml += '''<td width="200px"><a href="#" onClick="fmodradio('%s')" alt="%s">%s</a></td>''' % (id, url, radio)
listhtml += '''<td width="100px">%s</td>''' % gen
listhtml += '''<td ><a href="#" onClick="fplayradio('%s')">Play <span class="glyphicon glyphicon-play"></span></a></td>''' % (id)
listhtml += '''</tr>'''
count += 1
listhtml += '''</table>'''
listhtml += '''</div> </div>'''
html = ''
html += '''<div class="row"> <div class="col-md-8"> '''
if randomlist == 'false':
html += '''<h2><a href="#" onClick="frandom(name='%s', genre='%s', randomlist='true')">%d Results for '%s' + '%s'</a></h2>''' % (name, genre, count, name, genre)
else:
html += '''<h2><a href="#" onClick="fsearch(name='%s', genre='%s')">%d Random for '%s' + '%s'</a></h2>''' % (name, genre, count, name, genre)
html += listhtml
return html
@cherrypy.expose
def i(self, name="", genre="", url=""):
html = "<h2>Insert</h2>"
if name == "" or name == None :
html += "Error no name"
return html
if insert(name, genre, url) == False:
html += "Error db "
return html
html += '''<h3>This radio has been inserted</h3>'''
html += '''<p><table class="table table-condensed">'''
html += ''' <tr> '''
html += ''' <td>radio: <strong>%s</strong></td> ''' % name
html += ''' <td>genre: <strong>%s</strong></td> ''' % genre
html += ''' <td>url: <strong><a href="%s" target="_blank">%s</a></strong></td> ''' % (url, url)
html += ''' <td width="300px"><a href="#" onClick="fplayradio('%s')"> Play ''' % url
html += '''<span class="glyphicon glyphicon-play"></span></a></td>'''
html += ''' </tr> '''
html += '''</table>'''
return html
@cherrypy.expose
def d(self, id=""):
html = "<h2>Delete</h2>"
if id == "" or id == None :
html += "Error"
return html
if id == "0" :
html += "0 is reserved, sorry"
return html
#if delete(id) == False:
if nonexist(id) == False:
html += "Delete error in id" % id
html += getfooter()
return html
html += "Item %s set as non existent" % id
return html
@cherrypy.expose
def p(self, id):
html = ""
if id == "" or id == None :
html += "Error no radio id"
return html
if id == "0" :
html += "0 is reserved, sorry"
return html
(radio, genre, url) = playradio(id)
if url == '':
html += "Error in parameter %s" % url
return html
cherrypy.session['playing'] = id
html += '''<h3>Now Playing: '''
html += '''<a href="%s">%s</a>''' % (url, radio)
html += '''<a href="#" onClick="fplayradio('%s')">''' % id
html += '''<span class="glyphicon glyphicon-play"></span></a>'''
html += ''' <a href="#" onClick="fmodradio('%s')"><span class="glyphicon glyphicon-pencil"></span></a></small> ''' % id
html += '''<a href="#" onClick="fdelradio('%s')"><span class="glyphicon glyphicon-trash"></span></a> ''' % id
html += '''<a href="#" onClick="faddfav('%s')"><span class="glyphicon glyphicon-star"></span></a>''' % id
html += '''</h3>'''
return html
@cherrypy.expose
def v(self, vol=""):
html = ""
if vol == "" or vol == None :
html += "Error"
v = volume(vol)
html += "<h6>%s (%s) </h6>" % (v, vol)
return html
@cherrypy.expose
def m(self, id):
html = '''<h2>Modify</h2>'''
if id == "" or id == None :
html += "Error"
return html
if id == "0" :
html += "0 is reserved, sorry"
return html
(name, genre, url) = getradio(id)
html += '<h3>%s | %s | %s</h3>' % (name, genre, url)
html += '''<input type="hidden" id="idm" name="id" value="%s">''' % id
html += '''<input type="text" id="namem" name="name" value="%s">''' % name
html += '''genre: <input type="text" id="genrem" name="genre" value="%s"> ''' % genre
html += '''url: <input type="text" style="min-width: 280px" id="urlm" name="url" value="%s"> ''' % url
html += '''<button id="button-modify">Change</button>'''
html += '''<h3><a href="#" onClick="fdelradio('%s')">Delete? <span class="glyphicon glyphicon-trash"></span></a></h3>''' % id
html += '''<h3><a href="%s" target="_blank">Play in browser <span class="glyphicon glyphicon-music"></span></a>''' % url
return html
@cherrypy.expose
def f(self, id="", name="", genre="", url=""):
html = '''<h2>Modified</h2>'''
if id == "" or id == None :
html += "Error missing id"
return html
if id == "0" :
html += "0 is reserved, sorry"
return html
if modify(id, name, url, genre) == False:
html += "Error in DB"
return html
(name, genre, url) = getradio(id)
html += '''<p><table class="table table-condensed">'''
html += '''<tr>'''
html += '''<td width="100px"><a href="#" onClick="fmodradio('%s')">''' % id
html += '''Mod <span class="glyphicon glyphicon-pencil"></span></a></td>'''
html += '''<td width="200px">%s</td>''' % name
html += '''<td width="200px">%s</td>''' % genre
html += '''<td><a href="%s" target="_blank">%s</a></td>''' % (url, url)
html += '''<td width="300px"><a href="#" onClick="fplayradio('%s')">'''% url
html += '''Play <span class="glyphicon glyphicon-play"></span></a></td>'''
html += '''</tr>'''
html += '''</table>'''
return html
@cherrypy.expose
def haddfav(self, id=""):
if id == "" or id == None :
html += "Error missing id"
return html
if id == "0" :
html += "0 is reserved, sorry"
return html
(name, genre, url) = getradio(id)
if 'Fav' in genre:
genre = genre.replace(', Fav', '')
star = False
else:
genre += ', Fav'
star = True
if addgen(id, genre) == False:
return ''
(name, genre, url) = getradio(id)
cherrypy.session['playing'] = id
html = '<h3>Now Playing: '
html += '''<a href="%s">%s</a>''' % (url, name)
html += '''<a href="#" onClick="fplayradio('%s')">''' % url
html += '''<span class="glyphicon glyphicon-play"></span></a>'''
html += ''' <a href="#" onClick="fmodradio('%s')"><span class="glyphicon glyphicon-pencil"></span></a></small> ''' % id
html += '''<a href="#" onClick="fdelradio('%s')"><span class="glyphicon glyphicon-trash"></span></a> ''' % id
html += '''<a href="#" onClick="faddfav('%s')"><span class="glyphicon glyphicon-star"></span></a>''' % id
if star:
html += '''Starred'''
html += '''</h3>'''
return html
@cherrypy.expose
def k(self):
html = "<h2>Stopping</h2>"
killall()
return html
# ------------------------ DATABASE --------------------------------
def getfooter() :
global footer, version
db = cherrypy.session['database']
try:
con = lite.connect( db )
cur = con.cursor()
sql = "select radio, genre, url from Radio where id=0"
cur.execute(sql)
(radio, genre, url) = cur.fetchone()
except:
(radio, genre, url) = ('ERROR', sql, '')
con.close()
hostname = socket.gethostname()
f = '''<footer class="footer"> <div class="container">'''
f += '''<p class="text-muted">'''
f += '''Session id: %s - Session Database %s<br>''' % (cherrypy.session.id, cherrypy.session['database'])
f += '''Host: %s - Version: %s - Updated: %s // Last: %s''' % (hostname, version, genre, url)
f += '''</p>'''
f += '''</div></footer>'''
return f + footer
def updateversiondb(cur) :
db = cherrypy.session['database']
username = cherrypy.session['username']
dt = time.strftime("%Y-%m-%d %H:%M:%S")
try:
sql = "UPDATE Radio SET radio='%s', genre='%s' WHERE id = 0" % (hostname, dt)
cur.execute(sql)
except:
return
def delete(id) :
db = cherrypy.session['database']
try:
con = lite.connect( db )
cur = con.cursor()
sql = "DELETE from Radio WHERE id = '%s'" % (id)
cur.execute(sql)
ret = True
except:
ret = False
updateversiondb(cur)
con.commit()
con.close()
return ret
def nonexist(id) :
db = cherrypy.session['database']
sql = "UPDATE Radio set exist = 0 WHERE id = '%s'" % (id)
try:
con = lite.connect( db )
cur = con.cursor()
cur.execute(sql)
ret = True
except:
ret = False
updateversiondb(cur)
con.commit()
con.close()
return ret
def insert(radio, genre, url) :
db = cherrypy.session['database']
sql = "INSERT INTO Radio (radio, genre, url, exist) VALUES('%s', '%s', '%s', 1)" % (radio, genre, url)
try:
con = lite.connect( db )
cur = con.cursor()
cur.execute(sql)
ret = True
except:
ret = False
updateversiondb(cur)
con.commit()
con.close()
return ret
def modify(id, radio, url, genre) :
db = cherrypy.session['database']
sql = "UPDATE Radio SET radio='%s', url='%s', genre='%s', exist=1 WHERE id = %s" % (radio, url, genre, id)
try:
con = lite.connect( db )
cur = con.cursor()
cur.execute(sql)
ret = True
except:
ret = False
updateversiondb(cur)
con.commit()
con.close()
return ret
def addgen(id, genre) :
db = cherrypy.session['database']
sql = "UPDATE Radio SET genre='%s' WHERE id = %s" % (genre, id)
try:
con = lite.connect( db )
cur = con.cursor()
cur.execute(sql)
ret = True
except:
ret = False
updateversiondb(cur)
con.commit()
con.close()
return ret
def getradio(id) :
db = cherrypy.session['database']
if id.isdigit() :
sql = "select radio, genre, url from Radio where id=%s" % id
else:
sql = "select radio, genre, url from Radio where url=%s" % id
try:
con = lite.connect( db )
cur = con.cursor()
cur.execute(sql)
except:
rows = [('Not Found', '', '')]
rows = cur.fetchone()
if rows == None:
rows = ('Not Found', '', '')
con.close()
return rows
def searchradio(radio, genre) :
db = cherrypy.session['database']
#o = 'order by radio'
o = ''
sql = "select id, radio, genre, url from Radio where exist > 0 and radio like '%%%s%%' and genre like '%%%s%%' and id > 0 %s" % (radio, genre, o)
try:
con = lite.connect( db )
cur = con.cursor()
cur.execute(sql)
except:
return [(0, sql, o, genre)]
rows = cur.fetchall()
con.close()
return rows
def updatelastradio(url) :
db = cherrypy.session['database']
sql = "UPDATE Radio SET url='%s' WHERE id=0" % (url)
try:
con = lite.connect( db )
cur = con.cursor()
cur.execute(sql)
con.commit()
con.close()
except:
return
def userdatabase(user) :
db = database
if not os.path.isfile(db):
return None
return db
def getshort(code) :
maxl = 5
newcode = code.replace('http://', '')
if len(newcode) > maxl :
newcode = newcode[0:maxl]
return str(newcode)
def setplayer(p):
global player
player = p
def playradio(urlid):
global player
(radio, genre, url) = getradio(urlid)
status = 0
killall()
if player == 'mpg123':
command = "/usr/bin/mpg123 -q %s" % url
pidplayer = subprocess.Popen(command, shell=True).pid
if player == 'mplayer':
command = "/usr/bin/mplayer -really-quiet %s" % url
pidplayer = subprocess.Popen(command, shell=True).pid
if player == 'omxplayer':
# Process is in background
p = 'omxplayer'
subprocess.Popen([p, url])
updatelastradio(urlid)
return (radio, genre, urlid)
def killall():
global player
status = 0
if player == 'omxplayer':
control = "/usr/local/bin/omxcontrol"
status = subprocess.call([control, "stop"])
status = subprocess.call(["pkill", player])
return status
def volume(vol) :
global player
if player == 'omxplayer':
return volume_omxplayer(vol)
else:
return volume_alsa(vol)
def volume_alsa(vol):
# With ALSA on CHIP
if vol == 'up':
db = subprocess.check_output(["amixer set 'Power Amplifier' 5%+"], shell=True)
#db = os.system("amixer set 'Power Amplifier' 5%+")
if vol == 'down':
db = subprocess.check_output(["amixer set 'Power Amplifier' 5%-"], shell=True)
#db = os.system("amixer set 'Power Amplifier' 5%-")
i = db.rfind(':')
return db[i+1:]
def volume_omxplayer(vol) :
import math
control = "/usr/local/bin/omxcontrol"
if vol == 'up' :
db = subprocess.check_output([control, "volumeup"])
else :
db = subprocess.check_output([control, "volumedown"])
v = subprocess.check_output([control, "volume"])
i = v.rfind(':')
db = 10.0 * math.log(float(v[i+1:]), 10)
volstring = "%-2.2f dB" % db
return volstring
# ------------------------ SYSTEM --------------------------------
def writemypid(pidfile):
pid = str(os.getpid())
with open(pidfile, 'w') as f:
f.write(pid)
f.close
# Cherrypy Management
def error_page_404(status, message, traceback, version):
html = header
html += "%s<br>" % (status)
html += "%s" % (traceback)
html += getfooter()
return html
def error_page_401(status, message, traceback, version):
html = '''<!DOCTYPE html>
<html lang="en">
<head>
<title>My Radio Web Server</title>
<meta name="generator" content="Vim">
<meta charset="UTF-8">
</head>
<body>
'''
html += "<h1>%s</h1>" % (status)
html += "%s<br>" % (message)
return html
# Secure headers!
def secureheaders():
headers = cherrypy.response.headers
headers['X-Frame-Options'] = 'DENY'
headers['X-XSS-Protection'] = '1; mode=block'
headers['Content-Security-Policy'] = "default-src='self'"
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--player', action="store", dest="player", default="mplayer")
parser.add_argument('--stage', action="store", dest="stage", default="production")
parser.add_argument('--database', action="store", dest="database", default="database.db")
parser.add_argument('--root', action="store", dest="root", default=".")
parser.add_argument('--pid', action="store", dest="pid", default="/tmp/8804.pid")
parser.add_argument('--port', action="store", dest="port", type=int, default=8804)
# get args
args = parser.parse_args()
# Where to start, what to get
root = os.path.abspath(args.root)
database = os.path.join(root, args.database)
os.chdir(root)
current_dir = os.path.dirname(os.path.abspath(__file__))
setplayer(args.player)
writemypid(args.pid)
settings = {'global': {'server.socket_host': "0.0.0.0",
'server.socket_port' : args.port,
'log.screen': True,
},
}
conf = {'/static': {'tools.staticdir.on': True,
'tools.staticdir.root': current_dir,
'tools.staticfile.filename': 'icon.png',
'tools.staticdir.dir': 'static'
},
'/': {
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'localhost',
'tools.auth_basic.checkpassword': validate_password,
'tools.secureheaders.on' : True,
'tools.sessions.on': True,
},
}
cherrypy.config.update(settings)
cherrypy.config.update({'error_page.404': error_page_404})
cherrypy.config.update({'error_page.401': error_page_401})
cherrypy.tools.secureheaders = cherrypy.Tool('before_finalize', secureheaders, priority=60)
# To make it ZERO CPU usage
#cherrypy.engine.timeout_monitor.unsubscribe()
#cherrypy.engine.autoreload.unsubscribe()
# Cherry insert pages
serverroot = Root()
# Start the CherryPy server.
cherrypy.quickstart(serverroot, config=conf)
| ernitron/radio-server | radio-server/server.py | Python | mit | 26,943 |
'''
Test cases for pyclbr.py
Nick Mathewson
'''
from test.test_support import run_unittest, import_module
import sys
from types import ClassType, FunctionType, MethodType, BuiltinFunctionType
import pyclbr
from unittest import TestCase
StaticMethodType = type(staticmethod(lambda: None))
ClassMethodType = type(classmethod(lambda c: None))
# Silence Py3k warning
import_module('commands', deprecated=True)
# This next line triggers an error on old versions of pyclbr.
from commands import getstatus
# Here we test the python class browser code.
#
# The main function in this suite, 'testModule', compares the output
# of pyclbr with the introspected members of a module. Because pyclbr
# is imperfect (as designed), testModule is called with a set of
# members to ignore.
class PyclbrTest(TestCase):
def assertListEq(self, l1, l2, ignore):
''' succeed iff {l1} - {ignore} == {l2} - {ignore} '''
missing = (set(l1) ^ set(l2)) - set(ignore)
if missing:
print >>sys.stderr, "l1=%r\nl2=%r\nignore=%r" % (l1, l2, ignore)
self.fail("%r missing" % missing.pop())
def assertHasattr(self, obj, attr, ignore):
''' succeed iff hasattr(obj,attr) or attr in ignore. '''
if attr in ignore: return
if not hasattr(obj, attr): print "???", attr
self.failUnless(hasattr(obj, attr),
'expected hasattr(%r, %r)' % (obj, attr))
def assertHaskey(self, obj, key, ignore):
''' succeed iff key in obj or key in ignore. '''
if key in ignore: return
if key not in obj:
print >>sys.stderr, "***", key
self.assertTrue(key in obj)
def assertEqualsOrIgnored(self, a, b, ignore):
''' succeed iff a == b or a in ignore or b in ignore '''
if a not in ignore and b not in ignore:
self.assertEqual(a, b)
def checkModule(self, moduleName, module=None, ignore=()):
''' succeed iff pyclbr.readmodule_ex(modulename) corresponds
to the actual module object, module. Any identifiers in
ignore are ignored. If no module is provided, the appropriate
module is loaded with __import__.'''
if module is None:
# Import it.
# ('<silly>' is to work around an API silliness in __import__)
module = __import__(moduleName, globals(), {}, ['<silly>'])
dict = pyclbr.readmodule_ex(moduleName)
def ismethod(oclass, obj, name):
classdict = oclass.__dict__
if isinstance(obj, FunctionType):
if not isinstance(classdict[name], StaticMethodType):
return False
else:
if not isinstance(obj, MethodType):
return False
if obj.im_self is not None:
if (not isinstance(classdict[name], ClassMethodType) or
obj.im_self is not oclass):
return False
else:
if not isinstance(classdict[name], FunctionType):
return False
objname = obj.__name__
if objname.startswith("__") and not objname.endswith("__"):
objname = "_%s%s" % (obj.im_class.__name__, objname)
return objname == name
# Make sure the toplevel functions and classes are the same.
for name, value in dict.items():
if name in ignore:
continue
self.assertHasattr(module, name, ignore)
py_item = getattr(module, name)
if isinstance(value, pyclbr.Function):
self.assert_(isinstance(py_item, (FunctionType, BuiltinFunctionType)))
if py_item.__module__ != moduleName:
continue # skip functions that came from somewhere else
self.assertEquals(py_item.__module__, value.module)
else:
self.failUnless(isinstance(py_item, (ClassType, type)))
if py_item.__module__ != moduleName:
continue # skip classes that came from somewhere else
real_bases = [base.__name__ for base in py_item.__bases__]
pyclbr_bases = [ getattr(base, 'name', base)
for base in value.super ]
try:
self.assertListEq(real_bases, pyclbr_bases, ignore)
except:
print >>sys.stderr, "class=%s" % py_item
raise
actualMethods = []
for m in py_item.__dict__.keys():
if ismethod(py_item, getattr(py_item, m), m):
actualMethods.append(m)
foundMethods = []
for m in value.methods.keys():
if m[:2] == '__' and m[-2:] != '__':
foundMethods.append('_'+name+m)
else:
foundMethods.append(m)
try:
self.assertListEq(foundMethods, actualMethods, ignore)
self.assertEquals(py_item.__module__, value.module)
self.assertEqualsOrIgnored(py_item.__name__, value.name,
ignore)
# can't check file or lineno
except:
print >>sys.stderr, "class=%s" % py_item
raise
# Now check for missing stuff.
def defined_in(item, module):
if isinstance(item, ClassType):
return item.__module__ == module.__name__
if isinstance(item, FunctionType):
return item.func_globals is module.__dict__
return False
for name in dir(module):
item = getattr(module, name)
if isinstance(item, (ClassType, FunctionType)):
if defined_in(item, module):
self.assertHaskey(dict, name, ignore)
def test_easy(self):
self.checkModule('pyclbr')
self.checkModule('doctest')
# Silence Py3k warning
rfc822 = import_module('rfc822', deprecated=True)
self.checkModule('rfc822', rfc822)
self.checkModule('difflib')
def test_decorators(self):
# XXX: See comment in pyclbr_input.py for a test that would fail
# if it were not commented out.
#
self.checkModule('test.pyclbr_input')
def test_others(self):
cm = self.checkModule
# These were once about the 10 longest modules
cm('random', ignore=('Random',)) # from _random import Random as CoreGenerator
cm('cgi', ignore=('log',)) # set with = in module
cm('urllib', ignore=('_CFNumberToInt32',
'_CStringFromCFString',
'_CFSetup',
'getproxies_registry',
'proxy_bypass_registry',
'proxy_bypass_macosx_sysconf',
'open_https',
'getproxies_macosx_sysconf',
'getproxies_internetconfig',)) # not on all platforms
cm('pickle')
cm('aifc', ignore=('openfp',)) # set with = in module
cm('Cookie')
cm('sre_parse', ignore=('dump',)) # from sre_constants import *
cm('pdb')
cm('pydoc')
# Tests for modules inside packages
cm('email.parser')
cm('test.test_pyclbr')
def test_main():
run_unittest(PyclbrTest)
if __name__ == "__main__":
test_main()
| babyliynfg/cross | tools/project-creator/Python2.6.6/Lib/test/test_pyclbr.py | Python | mit | 7,874 |
from django.db import models
from django.core.urlresolvers import reverse
class Software(models.Model):
name = models.CharField(max_length=200)
def __unicode__(self):
return self.name
def get_absolute_url(self):
return reverse('software_edit', kwargs={'pk': self.pk})
| htlcnn/pyrevitscripts | HTL.tab/Test.panel/Test.pushbutton/keyman/keyman/keys/models.py | Python | mit | 300 |
#!/usr/bin/python3
"""
This bot uploads text from djvu files onto pages in the "Page" namespace.
It is intended to be used for Wikisource.
The following parameters are supported:
-index:... name of the index page (without the Index: prefix)
-djvu:... path to the djvu file, it shall be:
- path to a file name
- dir where a djvu file name as index is located
optional, by default is current dir '.'
-pages:<start>-<end>,...<start>-<end>,<start>-<end>
Page range to upload;
optional, start=1, end=djvu file number of images.
Page ranges can be specified as:
A-B -> pages A until B
A- -> pages A until number of images
A -> just page A
-B -> pages 1 until B
This script is a :py:obj:`ConfigParserBot <pywikibot.bot.ConfigParserBot>`.
The following options can be set within a settings file which is scripts.ini
by default:
-summary: custom edit summary.
Use quotes if edit summary contains spaces.
-force overwrites existing text
optional, default False
-always do not bother asking to confirm any of the changes.
"""
#
# (C) Pywikibot team, 2008-2022
#
# Distributed under the terms of the MIT license.
#
import os.path
from typing import Optional
import pywikibot
from pywikibot import i18n
from pywikibot.bot import SingleSiteBot
from pywikibot.exceptions import NoPageError
from pywikibot.proofreadpage import ProofreadPage
from pywikibot.tools.djvu import DjVuFile
class DjVuTextBot(SingleSiteBot):
"""
A bot that uploads text-layer from djvu files to Page:namespace.
Works only on sites with Proofread Page extension installed.
.. versionchanged:: 7.0
CheckerBot is a ConfigParserBot
"""
update_options = {
'force': False,
'summary': '',
}
def __init__(
self,
djvu,
index,
pages: Optional[tuple] = None,
**kwargs
) -> None:
"""
Initializer.
:param djvu: djvu from where to fetch the text layer
:type djvu: DjVuFile object
:param index: index page in the Index: namespace
:type index: Page object
:param pages: page interval to upload (start, end)
"""
super().__init__(**kwargs)
self._djvu = djvu
self._index = index
self._prefix = self._index.title(with_ns=False)
self._page_ns = self.site._proofread_page_ns.custom_name
if not pages:
self._pages = (1, self._djvu.number_of_images())
else:
self._pages = pages
# Get edit summary message if it's empty.
if not self.opt.summary:
self.opt.summary = i18n.twtranslate(self._index.site,
'djvutext-creating')
def page_number_gen(self):
"""Generate pages numbers from specified page intervals."""
last = 0
for start, end in sorted(self._pages):
start = max(last, start)
last = end + 1
yield from range(start, last)
@property
def generator(self):
"""Generate pages from specified page interval."""
for page_number in self.page_number_gen():
title = '{page_ns}:{prefix}/{number}'.format(
page_ns=self._page_ns,
prefix=self._prefix,
number=page_number)
page = ProofreadPage(self._index.site, title)
page.page_number = page_number # remember page number in djvu file
yield page
def treat(self, page) -> None:
"""Process one page."""
old_text = page.text
# Overwrite body of the page with content from djvu
page.body = self._djvu.get_page(page.page_number)
new_text = page.text
if page.exists() and not self.opt.force:
pywikibot.output(
'Page {} already exists, not adding!\n'
'Use -force option to overwrite the output page.'
.format(page))
else:
self.userPut(page, old_text, new_text, summary=self.opt.summary)
def main(*args: str) -> None:
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
:param args: command line arguments
"""
index = None
djvu_path = '.' # default djvu file directory
pages = '1-'
options = {}
# Parse command line arguments.
local_args = pywikibot.handle_args(args)
for arg in local_args:
opt, _, value = arg.partition(':')
if opt == '-index':
index = value
elif opt == '-djvu':
djvu_path = value
elif opt == '-pages':
pages = value
elif opt == '-summary':
options['summary'] = value
elif opt in ('-force', '-always'):
options[opt[1:]] = True
else:
pywikibot.output('Unknown argument ' + arg)
# index is mandatory.
if not index:
pywikibot.bot.suggest_help(missing_parameters=['-index'])
return
# If djvu_path is not a file, build djvu_path from dir+index.
djvu_path = os.path.expanduser(djvu_path)
djvu_path = os.path.abspath(djvu_path)
if not os.path.exists(djvu_path):
pywikibot.error('No such file or directory: ' + djvu_path)
return
if os.path.isdir(djvu_path):
djvu_path = os.path.join(djvu_path, index)
# Check the djvu file exists and, if so, create the DjVuFile wrapper.
djvu = DjVuFile(djvu_path)
if not djvu.has_text():
pywikibot.error('No text layer in djvu file {}'.format(djvu.file))
return
# Parse pages param.
pages = pages.split(',')
for i, page_interval in enumerate(pages):
start, sep, end = page_interval.partition('-')
start = int(start or 1)
end = int(end or djvu.number_of_images()) if sep else start
pages[i] = (start, end)
site = pywikibot.Site()
if not site.has_extension('ProofreadPage'):
pywikibot.error('Site {} must have ProofreadPage extension.'
.format(site))
return
index_page = pywikibot.Page(site, index, ns=site.proofread_index_ns)
if not index_page.exists():
raise NoPageError(index)
pywikibot.output('uploading text from {} to {}'
.format(djvu.file, index_page.title(as_link=True)))
bot = DjVuTextBot(djvu, index_page, pages=pages, site=site, **options)
bot.run()
if __name__ == '__main__':
try:
main()
except Exception:
pywikibot.error('Fatal error:', exc_info=True)
| wikimedia/pywikibot-core | scripts/djvutext.py | Python | mit | 6,822 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
resource_group_name: str,
managed_instance_name: str,
database_name: str,
query_id: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/queries/{queryId}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"managedInstanceName": _SERIALIZER.url("managed_instance_name", managed_instance_name, 'str'),
"databaseName": _SERIALIZER.url("database_name", database_name, 'str'),
"queryId": _SERIALIZER.url("query_id", query_id, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_query_request(
resource_group_name: str,
managed_instance_name: str,
database_name: str,
query_id: str,
subscription_id: str,
*,
start_time: Optional[str] = None,
end_time: Optional[str] = None,
interval: Optional[Union[str, "_models.QueryTimeGrainType"]] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/queries/{queryId}/statistics')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"managedInstanceName": _SERIALIZER.url("managed_instance_name", managed_instance_name, 'str'),
"databaseName": _SERIALIZER.url("database_name", database_name, 'str'),
"queryId": _SERIALIZER.url("query_id", query_id, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if start_time is not None:
query_parameters['startTime'] = _SERIALIZER.query("start_time", start_time, 'str')
if end_time is not None:
query_parameters['endTime'] = _SERIALIZER.query("end_time", end_time, 'str')
if interval is not None:
query_parameters['interval'] = _SERIALIZER.query("interval", interval, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class ManagedDatabaseQueriesOperations(object):
"""ManagedDatabaseQueriesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.sql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name: str,
managed_instance_name: str,
database_name: str,
query_id: str,
**kwargs: Any
) -> "_models.ManagedInstanceQuery":
"""Get query by query id.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance.
:type managed_instance_name: str
:param database_name: The name of the database.
:type database_name: str
:param query_id:
:type query_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedInstanceQuery, or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.ManagedInstanceQuery
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedInstanceQuery"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
database_name=database_name,
query_id=query_id,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedInstanceQuery', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/queries/{queryId}'} # type: ignore
@distributed_trace
def list_by_query(
self,
resource_group_name: str,
managed_instance_name: str,
database_name: str,
query_id: str,
start_time: Optional[str] = None,
end_time: Optional[str] = None,
interval: Optional[Union[str, "_models.QueryTimeGrainType"]] = None,
**kwargs: Any
) -> Iterable["_models.ManagedInstanceQueryStatistics"]:
"""Get query execution statistics by query id.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance.
:type managed_instance_name: str
:param database_name: The name of the database.
:type database_name: str
:param query_id:
:type query_id: str
:param start_time: Start time for observed period.
:type start_time: str
:param end_time: End time for observed period.
:type end_time: str
:param interval: The time step to be used to summarize the metric values.
:type interval: str or ~azure.mgmt.sql.models.QueryTimeGrainType
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedInstanceQueryStatistics or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.sql.models.ManagedInstanceQueryStatistics]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedInstanceQueryStatistics"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_query_request(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
database_name=database_name,
query_id=query_id,
subscription_id=self._config.subscription_id,
start_time=start_time,
end_time=end_time,
interval=interval,
template_url=self.list_by_query.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_query_request(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
database_name=database_name,
query_id=query_id,
subscription_id=self._config.subscription_id,
start_time=start_time,
end_time=end_time,
interval=interval,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ManagedInstanceQueryStatistics", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_query.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/queries/{queryId}/statistics'} # type: ignore
| Azure/azure-sdk-for-python | sdk/sql/azure-mgmt-sql/azure/mgmt/sql/operations/_managed_database_queries_operations.py | Python | mit | 12,909 |
import redis
import logging
import simplejson as json
import sys
from msgpack import Unpacker
from flask import Flask, request, render_template
from daemon import runner
from os.path import dirname, abspath
# add the shared settings file to namespace
sys.path.insert(0, dirname(dirname(abspath(__file__))))
import settings
REDIS_CONN = redis.StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)
app = Flask(__name__)
app.config['PROPAGATE_EXCEPTIONS'] = True
@app.route("/")
def index():
return render_template('index.html'), 200
@app.route("/app_settings")
def app_settings():
app_settings = {'GRAPHITE_HOST': settings.GRAPHITE_HOST,
'OCULUS_HOST': settings.OCULUS_HOST,
'FULL_NAMESPACE': settings.FULL_NAMESPACE,
}
resp = json.dumps(app_settings)
return resp, 200
@app.route("/api", methods=['GET'])
def data():
metric = request.args.get('metric', None)
try:
raw_series = REDIS_CONN.get(metric)
if not raw_series:
resp = json.dumps({'results': 'Error: No metric by that name'})
return resp, 404
else:
unpacker = Unpacker(use_list = False)
unpacker.feed(raw_series)
timeseries = [item[:2] for item in unpacker]
resp = json.dumps({'results': timeseries})
return resp, 200
except Exception as e:
error = "Error: " + e
resp = json.dumps({'results': error})
return resp, 500
class App():
def __init__(self):
self.stdin_path = '/dev/null'
self.stdout_path = settings.LOG_PATH + '/webapp.log'
self.stderr_path = settings.LOG_PATH + '/webapp.log'
self.pidfile_path = settings.PID_PATH + '/webapp.pid'
self.pidfile_timeout = 5
def run(self):
logger.info('starting webapp')
logger.info('hosted at %s' % settings.WEBAPP_IP)
logger.info('running on port %d' % settings.WEBAPP_PORT)
app.run(settings.WEBAPP_IP, settings.WEBAPP_PORT)
if __name__ == "__main__":
"""
Start the server
"""
webapp = App()
logger = logging.getLogger("AppLog")
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s :: %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
handler = logging.FileHandler(settings.LOG_PATH + '/webapp.log')
handler.setFormatter(formatter)
logger.addHandler(handler)
if len(sys.argv) > 1 and sys.argv[1] == 'run':
webapp.run()
else:
daemon_runner = runner.DaemonRunner(webapp)
daemon_runner.daemon_context.files_preserve = [handler.stream]
daemon_runner.do_action()
| MyNameIsMeerkat/skyline | src/webapp/webapp.py | Python | mit | 2,673 |
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
img1 = Image.open('multipage.tif')
# The following approach seems to be having issue with the
# current TIFF format data
print('The size of each frame is:')
print(img1.size)
# Plots first frame
print('Frame 1')
fig1 = plt.figure(1)
img1.seek(0)
# for i in range(250):
# pixA11 = img1.getpixel((1,i))
# print(pixA11)
f1 = list(img1.getdata())
print(f1[1000])
plt.imshow(img1)
fig1.show()
input()
# Plots eleventh frame
# print('Frame 11')
# fig2 = plt.figure(2)
# img1.seek(10)
# # for i in range(250):
# # pixB11 = img1.getpixel((1,i))
# # print(pixB11)
# f2 = list(img1.getdata())
# print(f2[10000])
# plt.imshow(img1)
# fig2.show()
# input()
# Create a new image
fig3 = plt.figure(3)
imgAvg = Image.new(img1.mode, img1.size)
print(img1.mode)
print(img1.size)
fAvg = list()
pix = imgAvg.load()
for i in range(512):
for j in range(512):
pixVal = (f1[i*512+j] + f1[i*512+j]) / 2
# fAvg.append(pixVal)
fAvg.insert(i*512+j,pixVal)
imgAvg.putdata(fAvg)
imgAvg.save('avg.tiff')
plt.imshow(imgAvg)
fig3.show()
print('Average')
# The following is necessary to keep the above figures 'alive'
input()
# data = random.random((256, 256))
# img1 = Image.fromarray(data)
# img1.save('test.tiff')
| johnrocamora/ImagePy | max_tiff.py | Python | mit | 1,346 |
class R:
def __init__(self, c):
self.c = c
self.is_star = False
def match(self, c):
return self.c == '.' or self.c == c
class Solution(object):
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
rs = []
""":type: list[R]"""
for c in p:
if c == '*':
rs[-1].is_star = True
else:
rs.append(R(c))
lr = len(rs)
ls = len(s)
s += '\0'
dp = [[False] * (ls + 1) for _ in range(lr + 1)]
dp[0][0] = True
for i, r in enumerate(rs):
for j in range(ls + 1):
c = s[j - 1]
if r.is_star:
dp[i + 1][j] = dp[i][j]
if j and r.match(c):
dp[i + 1][j] |= dp[i + 1][j - 1]
else:
if j and r.match(c):
dp[i + 1][j] = dp[i][j - 1]
return dp[-1][-1]
| SF-Zhou/LeetCode.Solutions | solutions/regular_expression_matching.py | Python | mit | 1,032 |
from flask import Flask, render_template, flash
from flask_material_lite import Material_Lite
from flask_appconfig import AppConfig
from flask_wtf import Form, RecaptchaField
from flask_wtf.file import FileField
from wtforms import TextField, HiddenField, ValidationError, RadioField,\
BooleanField, SubmitField, IntegerField, FormField, validators
from wtforms.validators import Required
# straight from the wtforms docs:
class TelephoneForm(Form):
country_code = IntegerField('Country Code', [validators.required()])
area_code = IntegerField('Area Code/Exchange', [validators.required()])
number = TextField('Number')
class ExampleForm(Form):
field1 = TextField('First Field', description='This is field one.')
field2 = TextField('Second Field', description='This is field two.',
validators=[Required()])
hidden_field = HiddenField('You cannot see this', description='Nope')
recaptcha = RecaptchaField('A sample recaptcha field')
radio_field = RadioField('This is a radio field', choices=[
('head_radio', 'Head radio'),
('radio_76fm', "Radio '76 FM"),
('lips_106', 'Lips 106'),
('wctr', 'WCTR'),
])
checkbox_field = BooleanField('This is a checkbox',
description='Checkboxes can be tricky.')
# subforms
mobile_phone = FormField(TelephoneForm)
# you can change the label as well
office_phone = FormField(TelephoneForm, label='Your office phone')
ff = FileField('Sample upload')
submit_button = SubmitField('Submit Form')
def validate_hidden_field(form, field):
raise ValidationError('Always wrong')
def create_app(configfile=None):
app = Flask(__name__)
AppConfig(app, configfile) # Flask-Appconfig is not necessary, but
# highly recommend =)
# https://github.com/mbr/flask-appconfig
Material_Lite(app)
# in a real app, these should be configured through Flask-Appconfig
app.config['SECRET_KEY'] = 'devkey'
app.config['RECAPTCHA_PUBLIC_KEY'] = \
'6Lfol9cSAAAAADAkodaYl9wvQCwBMr3qGR_PPHcw'
@app.route('/', methods=('GET', 'POST'))
def index():
form = ExampleForm()
form.validate_on_submit() # to get error messages to the browser
flash('critical message', 'critical')
flash('error message', 'error')
flash('warning message', 'warning')
flash('info message', 'info')
flash('debug message', 'debug')
flash('different message', 'different')
flash('uncategorized message')
return render_template('index.html', form=form)
return app
if __name__ == '__main__':
create_app().run(debug=True)
| HellerCommaA/flask-material-lite | sample_application/__init__.py | Python | mit | 2,763 |
#!/usr/bin/env python3
from __future__ import print_function, division
import numpy as np
from sht.grids import standard_grid, get_cartesian_grid
def test_grids():
L = 10
thetas, phis = standard_grid(L)
# Can't really test much here
assert thetas.size == L
assert phis.size == L**2
grid = get_cartesian_grid(thetas, phis)
assert grid.shape == (L**2, 3)
| praveenv253/sht | tests/test_grids.py | Python | mit | 386 |
"""
Visualize possible stitches with the outcome of the validator.
"""
import math
import random
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import stitcher
SPACE = 25
TYPE_FORMAT = {'a': '^', 'b': 's', 'c': 'v'}
def show(graphs, request, titles, prog='neato', size=None,
type_format=None, filename=None):
"""
Display the results using matplotlib.
"""
if not size:
size = _get_size(len(graphs))
fig, axarr = plt.subplots(size[0], size[1], figsize=(18, 10))
fig.set_facecolor('white')
x_val = 0
y_val = 0
index = 0
if size[0] == 1:
axarr = np.array(axarr).reshape((1, size[1]))
for candidate in graphs:
# axarr[x_val, y_val].axis('off')
axarr[x_val, y_val].xaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].yaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].xaxis.set_ticks([])
axarr[x_val, y_val].yaxis.set_ticks([])
axarr[x_val, y_val].set_title(titles[index])
# axarr[x_val, y_val].set_axis_bgcolor("white")
if not type_format:
type_format = TYPE_FORMAT
_plot_subplot(candidate, request.nodes(), prog, type_format,
axarr[x_val, y_val])
y_val += 1
if y_val > size[1] - 1:
y_val = 0
x_val += 1
index += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_subplot(graph, new_nodes, prog, type_format, axes):
"""
Plot a single candidate graph.
"""
pos = nx.nx_agraph.graphviz_layout(graph, prog=prog)
# draw the nodes
for node, values in graph.nodes(data=True):
shape = 'o'
if values[stitcher.TYPE_ATTR] in type_format:
shape = type_format[values[stitcher.TYPE_ATTR]]
color = 'g'
alpha = 0.8
if node in new_nodes:
color = 'b'
alpha = 0.2
elif 'rank' in values and values['rank'] > 7:
color = 'r'
elif 'rank' in values and values['rank'] < 7 and values['rank'] > 3:
color = 'y'
nx.draw_networkx_nodes(graph, pos, nodelist=[node], node_color=color,
node_shape=shape, alpha=alpha, ax=axes)
# draw the edges
dotted_line = []
normal_line = []
for src, trg in graph.edges():
if src in new_nodes and trg not in new_nodes:
dotted_line.append((src, trg))
else:
normal_line.append((src, trg))
nx.draw_networkx_edges(graph, pos, edgelist=dotted_line, style='dotted',
ax=axes)
nx.draw_networkx_edges(graph, pos, edgelist=normal_line, ax=axes)
# draw labels
nx.draw_networkx_labels(graph, pos, ax=axes)
def show_3d(graphs, request, titles, prog='neato', filename=None):
"""
Show the candidates in 3d - the request elevated above the container.
"""
fig = plt.figure(figsize=(18, 10))
fig.set_facecolor('white')
i = 0
size = _get_size(len(graphs))
for graph in graphs:
axes = fig.add_subplot(size[0], size[1], i+1,
projection=Axes3D.name)
axes.set_title(titles[i])
axes._axis3don = False
_plot_3d_subplot(graph, request, prog, axes)
i += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_3d_subplot(graph, request, prog, axes):
"""
Plot a single candidate graph in 3d.
"""
cache = {}
tmp = graph.copy()
for node in request.nodes():
tmp.remove_node(node)
pos = nx.nx_agraph.graphviz_layout(tmp, prog=prog)
# the container
for item in tmp.nodes():
axes.plot([pos[item][0]], [pos[item][1]], [0], linestyle="None",
marker="o", color='gray')
axes.text(pos[item][0], pos[item][1], 0, item)
for src, trg in tmp.edges():
axes.plot([pos[src][0], pos[trg][0]],
[pos[src][1], pos[trg][1]],
[0, 0], color='gray')
# the new nodes
for item in graph.nodes():
if item in request.nodes():
for nghb in graph.neighbors(item):
if nghb in tmp.nodes():
x_val = pos[nghb][0]
y_val = pos[nghb][1]
if (x_val, y_val) in list(cache.values()):
x_val = pos[nghb][0] + random.randint(10, SPACE)
y_val = pos[nghb][0] + random.randint(10, SPACE)
cache[item] = (x_val, y_val)
# edge
axes.plot([x_val, pos[nghb][0]],
[y_val, pos[nghb][1]],
[SPACE, 0], color='blue')
axes.plot([x_val], [y_val], [SPACE], linestyle="None", marker="o",
color='blue')
axes.text(x_val, y_val, SPACE, item)
for src, trg in request.edges():
if trg in cache and src in cache:
axes.plot([cache[src][0], cache[trg][0]],
[cache[src][1], cache[trg][1]],
[SPACE, SPACE], color='blue')
def _get_size(n_items):
"""
Calculate the size of the subplot layouts based on number of items.
"""
n_cols = math.ceil(math.sqrt(n_items))
n_rows = math.floor(math.sqrt(n_items))
if n_cols * n_rows < n_items:
n_cols += 1
return int(n_rows), int(n_cols)
| tmetsch/graph_stitcher | stitcher/vis.py | Python | mit | 5,618 |
# -*- coding: utf-8 -*-
# django-simple-help
# simple_help/admin.py
from __future__ import unicode_literals
from django.contrib import admin
try: # add modeltranslation
from modeltranslation.translator import translator
from modeltranslation.admin import TabbedDjangoJqueryTranslationAdmin
except ImportError:
pass
from simple_help.models import PageHelp
from simple_help.forms import PageHelpAdminForm
from simple_help.utils import modeltranslation
try:
from simple_help.translation import PageHelpTranslationOptions
except ImportError:
pass
__all__ = [
"PageHelpAdmin",
]
class PageHelpAdmin(TabbedDjangoJqueryTranslationAdmin if modeltranslation() else admin.ModelAdmin):
"""
Customize PageHelp model for admin area.
"""
list_display = ["page", "title", ]
search_fields = ["title", ]
list_filter = ["page", ]
form = PageHelpAdminForm
if modeltranslation():
# registering translation options
translator.register(PageHelp, PageHelpTranslationOptions)
# registering admin custom classes
admin.site.register(PageHelp, PageHelpAdmin)
| DCOD-OpenSource/django-simple-help | simple_help/admin.py | Python | mit | 1,108 |
# -*- coding: utf-8 -*-
import sys
from io import BytesIO
import argparse
from PIL import Image
from .api import crop_resize
parser = argparse.ArgumentParser(
description='crop and resize an image without aspect ratio distortion.')
parser.add_argument('image')
parser.add_argument('-w', '-W', '--width', metavar='<width>', type=int,
help='desired width of image in pixels')
parser.add_argument('-H', '--height', metavar='<height>', type=int,
help='desired height of image in pixels')
parser.add_argument('-f', '--force', action='store_true',
help='whether to scale up for smaller images')
parser.add_argument('-d', '--display', action='store_true', default=False,
help='display the new image (don\'t write to file)')
parser.add_argument('-o', '--output', metavar='<file>',
help='Write output to <file> instead of stdout.')
def main():
parsed_args = parser.parse_args()
image = Image.open(parsed_args.image)
size = (parsed_args.width, parsed_args.height)
new_image = crop_resize(image, size, parsed_args.force)
if parsed_args.display:
new_image.show()
elif parsed_args.output:
new_image.save(parsed_args.output)
else:
f = BytesIO()
new_image.save(f, image.format)
try:
stdout = sys.stdout.buffer
except AttributeError:
stdout = sys.stdout
stdout.write(f.getvalue())
| codeif/crimg | crimg/bin.py | Python | mit | 1,481 |
from __future__ import print_function
import os
import sys
import subprocess
import pkg_resources
try:
import pkg_resources
_has_pkg_resources = True
except:
_has_pkg_resources = False
try:
import svn.local
_has_svn_local = True
except:
_has_svn_local = False
def test_helper():
return "test helper text"
def dict_to_str(d):
"""
Given a dictionary d, return a string with
each entry in the form 'key: value' and entries
separated by newlines.
"""
vals = []
for k in d.keys():
vals.append('{}: {}'.format(k, d[k]))
v = '\n'.join(vals)
return v
def module_version(module, label=None):
"""
Helper function for getting the module ("module") in the current
namespace and their versions.
The optional argument 'label' allows you to set the
string used as the dictionary key in the returned dictionary.
By default the key is '[module] version'.
"""
if not _has_pkg_resources:
return {}
version = pkg_resources.get_distribution(module).version
if label:
k = '{}'.format(label)
else:
k = '{} version'.format(module)
return {k: '{}'.format(version)}
def file_contents(filename, label=None):
"""
Helper function for getting the contents of a file,
provided the filename.
Returns a dictionary keyed (by default) with the filename
where the value is a string containing the contents of the file.
The optional argument 'label' allows you to set the
string used as the dictionary key in the returned dictionary.
"""
if not os.path.isfile(filename):
print('ERROR: {} NOT FOUND.'.format(filename))
return {}
else:
fin = open(filename, 'r')
contents = ''
for l in fin:
contents += l
if label:
d = {'{}'.format(label): contents}
else:
d = {filename: contents}
return d
def svn_information(svndir=None, label=None):
"""
Helper function for obtaining the SVN repository
information for the current directory (default)
or the directory supplied in the svndir argument.
Returns a dictionary keyed (by default) as 'SVN INFO'
where the value is a string containing essentially what
is returned by 'svn info'.
The optional argument 'label' allows you to set the
string used as the dictionary key in the returned dictionary.
"""
if not _has_svn_local:
print('SVN information unavailable.')
print('You do not have the "svn" package installed.')
print('Install "svn" from pip using "pip install svn"')
return {}
if svndir:
repo = svn.local.LocalClient(svndir)
else:
repo = svn.local.LocalClient(os.getcwd())
try:
# Get a dictionary of the SVN repository information
info = repo.info()
except:
print('ERROR: WORKING DIRECTORY NOT AN SVN REPOSITORY.')
return {}
v = dict_to_str(info)
if label:
k = '{}'.format(label)
else:
k = 'SVN INFO'
return {k: v}
def get_git_hash(gitpath=None, label=None):
"""
Helper function for obtaining the git repository hash.
for the current directory (default)
or the directory supplied in the gitpath argument.
Returns a dictionary keyed (by default) as 'GIT HASH'
where the value is a string containing essentially what
is returned by subprocess.
The optional argument 'label' allows you to set the string
used as the dictionary key in the returned dictionary.
"""
if gitpath:
thisdir = os.getcwd()
os.chdir(gitpath)
try:
sha = subprocess.check_output(['git','rev-parse','HEAD'],shell=False).strip()
except subprocess.CalledProcessError as e:
print("ERROR: WORKING DIRECTORY NOT A GIT REPOSITORY")
return {}
if label:
l = '{}'.format(label)
else:
l = 'GIT HASH'
return {l:sha}
def get_source_code(scode,sourcepath=None, label=None):
"""
Helper function for obtaining the source code.
for the current directory (default) or the directory
supplied in the sourcepath argument.
Returns a dictionary keyed (by default) as 'source code'
where the value is a string containing the source code.
The optional argument 'label' allows you to set the string
used as the dictionary key in the returned dictionary.
"""
if sourcepath:
os.chdir(sourcepath)
if not os.path.isfile(scode):
print('ERROR: {} NOT FOUND.'.format(scode))
return {}
else:
with open(scode,'r') as f:
s = f.read()
if label:
n = {'{}'.format(label):s}
else:
n = {'source code':s}
return n
| MetaPlot/MetaPlot | metaplot/helpers.py | Python | mit | 4,900 |
from django.db import models
from .workflow import TestStateMachine
class TestModel(models.Model):
name = models.CharField(max_length=100)
state = models.CharField(max_length=20, null=True, blank=True)
state_num = models.IntegerField(null=True, blank=True)
other_state = models.CharField(max_length=20, null=True, blank=True)
message = models.CharField(max_length=250, null=True, blank=True)
class Meta:
permissions = TestStateMachine.get_permissions('testmodel', 'Test')
| andrewebdev/django-ostinato | ostinato/tests/statemachine/models.py | Python | mit | 509 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('geokey_sapelli', '0005_sapellifield_truefalse'),
]
operations = [
migrations.AddField(
model_name='sapelliproject',
name='sapelli_fingerprint',
field=models.IntegerField(default=-1),
preserve_default=False,
),
]
| ExCiteS/geokey-sapelli | geokey_sapelli/migrations/0006_sapelliproject_sapelli_fingerprint.py | Python | mit | 468 |
from __future__ import division, print_function #, unicode_literals
"""
Multiples of 3 and 5
If we list all the natural numbers below 10 that are multiples
of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
"""
import numpy as np
# Setup.
num_max = 1000
basis = [3, 5]
factors = []
for i in range(num_max):
for k in basis:
if not i % k:
factors.append(i)
break
print('\nRange: {:d}'.format(num_max))
print('Number of factors: {:d}'.format(len(factors)))
print('The answer: {:d}'.format(np.sum(factors)))
# Done.
| Who8MyLunch/euler | problem_001.py | Python | mit | 632 |
s="the quick brown fox jumped over the lazy dog"
t = s.split(" ")
for v in t:
print(v)
r = s.split("e")
for v in r:
print(v)
x = s.split()
for v in x:
print(v)
# 2-arg version of split not supported
# y = s.split(" ",7)
# for v in y:
# print v
| naitoh/py2rb | tests/strings/split.py | Python | mit | 266 |
import torch
from hypergan.train_hooks.base_train_hook import BaseTrainHook
class NegativeMomentumTrainHook(BaseTrainHook):
def __init__(self, gan=None, config=None, trainer=None):
super().__init__(config=config, gan=gan, trainer=trainer)
self.d_grads = None
self.g_grads = None
def gradients(self, d_grads, g_grads):
if self.d_grads is None:
self.d_grads = [torch.zeros_like(_g) for _g in d_grads]
self.g_grads = [torch.zeros_like(_g) for _g in g_grads]
new_d_grads = [g.clone() for g in d_grads]
new_g_grads = [g.clone() for g in g_grads]
d_grads = [_g - self.config.gamma * _g2 for _g, _g2 in zip(d_grads, self.d_grads)]
g_grads = [_g - self.config.gamma * _g2 for _g, _g2 in zip(g_grads, self.g_grads)]
self.d_grads = new_d_grads
self.g_grads = new_g_grads
return [d_grads, g_grads]
| 255BITS/HyperGAN | hypergan/train_hooks/negative_momentum_train_hook.py | Python | mit | 887 |
import numpy as np
__author__ = 'David John Gagne <[email protected]>'
def main():
# Contingency Table from Wilks (2011) Table 8.3
table = np.array([[50, 91, 71],
[47, 2364, 170],
[54, 205, 3288]])
mct = MulticlassContingencyTable(table, n_classes=table.shape[0],
class_names=np.arange(table.shape[0]).astype(str))
print(mct.peirce_skill_score())
print(mct.gerrity_score())
class MulticlassContingencyTable(object):
"""
This class is a container for a contingency table containing more than 2 classes.
The contingency table is stored in table as a numpy array with the rows corresponding to forecast categories,
and the columns corresponding to observation categories.
"""
def __init__(self, table=None, n_classes=2, class_names=("1", "0")):
self.table = table
self.n_classes = n_classes
self.class_names = class_names
if table is None:
self.table = np.zeros((self.n_classes, self.n_classes), dtype=int)
def __add__(self, other):
assert self.n_classes == other.n_classes, "Number of classes does not match"
return MulticlassContingencyTable(self.table + other.table,
n_classes=self.n_classes,
class_names=self.class_names)
def peirce_skill_score(self):
"""
Multiclass Peirce Skill Score (also Hanssen and Kuipers score, True Skill Score)
"""
n = float(self.table.sum())
nf = self.table.sum(axis=1)
no = self.table.sum(axis=0)
correct = float(self.table.trace())
return (correct / n - (nf * no).sum() / n ** 2) / (1 - (no * no).sum() / n ** 2)
def gerrity_score(self):
"""
Gerrity Score, which weights each cell in the contingency table by its observed relative frequency.
:return:
"""
k = self.table.shape[0]
n = float(self.table.sum())
p_o = self.table.sum(axis=0) / n
p_sum = np.cumsum(p_o)[:-1]
a = (1.0 - p_sum) / p_sum
s = np.zeros(self.table.shape, dtype=float)
for (i, j) in np.ndindex(*s.shape):
if i == j:
s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:j]) + np.sum(a[j:k - 1]))
elif i < j:
s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:i]) - (j - i) + np.sum(a[j:k - 1]))
else:
s[i, j] = s[j, i]
return np.sum(self.table / float(self.table.sum()) * s)
def heidke_skill_score(self):
n = float(self.table.sum())
nf = self.table.sum(axis=1)
no = self.table.sum(axis=0)
correct = float(self.table.trace())
return (correct / n - (nf * no).sum() / n ** 2) / (1 - (nf * no).sum() / n ** 2)
if __name__ == "__main__":
main()
| djgagne/hagelslag | hagelslag/evaluation/MulticlassContingencyTable.py | Python | mit | 2,908 |
from django.contrib.admin.models import LogEntry
from django.contrib.auth.models import User, Group, Permission
from simple_history import register
from celsius.tools import register_for_permission_handling
register(User)
register(Group)
register_for_permission_handling(User)
register_for_permission_handling(Group)
register_for_permission_handling(Permission)
register_for_permission_handling(LogEntry)
| cytex124/celsius-cloud-backend | src/addons/management_user/admin.py | Python | mit | 408 |
from django import forms
from miniURL.models import Redirection
#Pour faire un formulaire depuis un modèle. (/!\ héritage différent)
class RedirectionForm(forms.ModelForm):
class Meta:
model = Redirection
fields = ('real_url', 'pseudo')
# Pour récupérer des données cel apeut ce faire avec un POST
# ou directement en donnant un objet du modele :
#form = ArticleForm(instance=article) # article est bien entendu un objet d'Article quelconque dans la base de données
# Le champs est ainsi préremplit.
# Quand on a recu une bonne formeModele il suffit de save() pour la mettre en base | guillaume-havard/testdjango | sitetest/miniURL/forms.py | Python | mit | 615 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 25 21:11:45 2017
@author: hubert
"""
import numpy as np
import matplotlib.pyplot as plt
class LiveBarGraph(object):
"""
"""
def __init__(self, band_names=['delta', 'theta', 'alpha', 'beta'],
ch_names=['TP9', 'AF7', 'AF8', 'TP10']):
"""
"""
self.band_names = band_names
self.ch_names = ch_names
self.n_bars = self.band_names * self.ch_names
self.x =
self.fig, self.ax = plt.subplots()
self.ax.set_ylim((0, 1))
y = np.zeros((self.n_bars,))
x = range(self.n_bars)
self.rects = self.ax.bar(x, y)
def update(self, new_y):
[rect.set_height(y) for rect, y in zip(self.rects, new_y)]
if __name__ == '__main__':
bar = LiveBarGraph()
plt.show()
while True:
bar.update(np.random.random(10))
plt.pause(0.1)
| bcimontreal/bci_workshop | python/extra_stuff/livebargraph.py | Python | mit | 940 |
# -*- coding: utf-8 -*-
from modules import Robot
import time
r = Robot.Robot()
state = [0, 1000, 1500]
(run, move, write) = range(3)
i = run
slowdown = 1
flag_A = 0
flag_C = 0
lock = [0, 0, 0, 0]
while(True):
a = r.Read()
for it in range(len(lock)):
if lock[it]:
lock[it] = lock[it] - 1
if a[0]: # kontrolka ciągła
flag_A = 0
flag_C = 0
if a[0] == 1 or a[0] == 5 or a[0] == 6:
r.A.run_forever(r.S/slowdown)
elif a[0] == 2 or a[0] == 7 or a[0] == 8:
r.A.run_forever(-r.S/slowdown)
else:
r.A.stop()
if a[0] == 3 or a[0] == 5 or a[0] == 7:
r.C.run_forever(r.S/slowdown)
elif a[0] == 4 or a[0] == 6 or a[0] == 8:
r.C.run_forever(-r.S/slowdown)
else:
r.C.stop()
elif a[1] and not lock[1]: # kontrolka lewa: dyskretna
if a[1] == 1 and i is not run: # kontrolka prawa: ciągła
r.changestate(state[i]-state[i-1])
i = i-1
time.sleep(0.5) # (state[i]-state[i-1])/r.S
if i is run:
slowdown = 1
elif a[1] == 2 and i is not write:
r.changestate(state[i]-state[i+1])
i = i+1
slowdown = 5
time.sleep(0.5) # (state[i+1]-state[i])/r.S
elif a[1] == 3:
r.B.run_forever(r.S)
elif a[1] == 4:
r.B.run_forever(-r.S)
elif a[1] == 9:
r.B.stop()
else:
pass
elif a[2]: # kontrolka one-klick
if a[2] == 1 or a[2] == 5 or a[2] == 6: # stop na 9 (beacon)
if flag_A == -1:
r.A.stop()
flag_A = 0
lock[0] = 30 # lock = 30
elif not lock[0]:
r.A.run_forever(r.S/slowdown)
flag_A = 1
elif a[2] == 2 or a[2] == 7 or a[2] == 8:
if flag_A == 1:
r.A.stop()
flag_A = 0
lock[1] = 30 # lock = 30
elif not lock[1]:
r.A.run_forever(-r.S/slowdown)
flag_A = -1
if a[2] == 3 or a[2] == 5 or a[2] == 7:
if flag_C == -1:
r.C.stop()
flag_C = 0
lock[2] = 30 # lock = 30
elif not lock[2]:
r.C.run_forever(r.S/slowdown)
flag_C = 1
elif a[2] == 4 or a[2] == 6 or a[2] == 8:
if flag_C == 1:
r.C.stop
flag_C = 0
lock[3] = 30 # lock = 30
elif not lock[3]:
r.C.run_forever(-r.S/slowdown)
flag_C = -1
if a[2] == 9:
r.stop()
flag_A = 0
flag_C = 0
elif a[3]: # alternatywna one-klick
if a[3] == 1: # 1 przycisk - oba silniki
if flag_A == -1 and flag_C == -1:
r.stop()
flag_A = 0
flag_C = 0
lock[0] = 30 # lock = 30
elif not lock[0]:
r.run(r.S/slowdown, r.S/slowdown)
flag_A = 1
flag_C = 1
elif a[3] == 2:
if flag_A == 1 and flag_C == 1:
r.stop()
flag_A = 0
flag_C = 0
lock[1] = 30 # lock = 30
elif not lock[1]:
r.run(-r.S/slowdown, -r.S/slowdown)
flag_A = -1
flag_C = -1
elif a[3] == 3:
if flag_A == 1 and flag_C == -1:
r.stop()
flag_A = 0
flag_C = 0
lock[2] = 30 # lock = 30
elif not lock[2]:
r.run(-r.S/slowdown, r.S/slowdown)
flag_A = -1
flag_C = 1
elif a[3] == 4:
if flag_A == -1 and flag_C == 1:
r.stop()
flag_A = 0
flag_C = 0
lock[3] = 30 # lock = 30
elif not lock[3]:
r.run(r.S/slowdown, -r.S/slowdown)
flag_A = 1
flag_C = -1
elif a[3] == 9:
r.stop()
flag_A = 0
flag_C = 0
else:
if not flag_A:
r.A.stop()
if not flag_C:
r.C.stop()
| KMPSUJ/lego_robot | pilot.py | Python | mit | 4,781 |
# -*- coding: utf-8 -*-
from django.db import models
from Corretor.base import CorretorException
from Corretor.base import ExecutorException
from Corretor.base import CompiladorException
from Corretor.base import ComparadorException
from Corretor.base import LockException
from model_utils import Choices
class RetornoCorrecao(models.Model):
"""Um modelo que possui informacoes sobre o retorno da correcao de uma questao(ou questao de avaliacao).
"""
TIPOS = Choices(
(0,'loading',u'Loading'),
(1,'compilacao',u'Compilação'),
(2,'execucao',u'Execução'),
(3,'comparacao',u'Comparação'),
(4,'lock',u'Lock'),
(5,'correto',u'Correto'),
)
tipo = models.SmallIntegerField(u"Tipo",choices=TIPOS, default=TIPOS.loading)
msg = models.TextField(u"Mensagem",blank=True,null=True)
task_id = models.CharField(max_length=350,blank=True,null=True)
class Meta:
verbose_name = u'Retorno Correção'
app_label = 'Corretor'
def __unicode__(self):
return "%s: %s" %(self.TIPOS[self.tipo][1],self.msg)
def altera_dados(self,sucesso=True,erroException=None):
"""
Altera os dados do retorno atual para pegar os dados de erro ou para por a mensagem
que foi com sucesso.
"""
tipo = RetornoCorrecao.TIPOS.correto
correcao_msg = "Correto!"
# print ">>altera_dados"
# print ">>isinstance(erroException,CorretorException)",isinstance(erroException,CorretorException)
if sucesso == True:
# print ">>retorno.successful()"
tipo = RetornoCorrecao.TIPOS.correto
correcao_msg = "Correto!"
elif isinstance(erroException,CorretorException):
# print "erro: %s" % erroException.message
if isinstance(erroException,ExecutorException):
correcao_msg = erroException.message
tipo = RetornoCorrecao.TIPOS.execucao
if isinstance(erroException,CompiladorException):
correcao_msg = erroException.message
tipo = RetornoCorrecao.TIPOS.compilacao
if isinstance(erroException,ComparadorException):
correcao_msg = erroException.message
tipo = RetornoCorrecao.TIPOS.comparacao
if isinstance(erroException,LockException):
correcao_msg = erroException.message
tipo = RetornoCorrecao.TIPOS.lock
self.tipo = tipo
self.msg = correcao_msg
| arruda/amao | AMAO/apps/Corretor/models/retorno.py | Python | mit | 2,633 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewaySslPredefinedPolicy(SubResource):
"""An Ssl predefined policy.
:param id: Resource ID.
:type id: str
:param name: Name of Ssl predefined policy.
:type name: str
:param cipher_suites: Ssl cipher suites to be enabled in the specified
order for application gateway.
:type cipher_suites: list[str or
~azure.mgmt.network.v2017_10_01.models.ApplicationGatewaySslCipherSuite]
:param min_protocol_version: Minimum version of Ssl protocol to be
supported on application gateway. Possible values include: 'TLSv1_0',
'TLSv1_1', 'TLSv1_2'
:type min_protocol_version: str or
~azure.mgmt.network.v2017_10_01.models.ApplicationGatewaySslProtocol
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'cipher_suites': {'key': 'properties.cipherSuites', 'type': '[str]'},
'min_protocol_version': {'key': 'properties.minProtocolVersion', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ApplicationGatewaySslPredefinedPolicy, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.cipher_suites = kwargs.get('cipher_suites', None)
self.min_protocol_version = kwargs.get('min_protocol_version', None)
| lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/application_gateway_ssl_predefined_policy.py | Python | mit | 1,826 |
from util.tipo import tipo
class S_PARTY_MEMBER_INTERVAL_POS_UPDATE(object):
def __init__(self, tracker, time, direction, opcode, data):
print(str(type(self)).split('.')[3]+'('+str(len(data))+'): '+ str(data.get_array_hex(1))[1:-1])
| jeff-alves/Tera | game/message/unused/S_PARTY_MEMBER_INTERVAL_POS_UPDATE.py | Python | mit | 246 |
"""Auto-generated file, do not edit by hand. BG metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_BG = PhoneMetadata(id='BG', country_code=359, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[23567]\\d{5,7}|[489]\\d{6,8}', possible_number_pattern='\\d{5,9}'),
fixed_line=PhoneNumberDesc(national_number_pattern='2(?:[0-8]\\d{5,6}|9\\d{4,6})|(?:[36]\\d|5[1-9]|8[1-6]|9[1-7])\\d{5,6}|(?:4(?:[124-7]\\d|3[1-6])|7(?:0[1-9]|[1-9]\\d))\\d{4,5}', possible_number_pattern='\\d{5,8}', example_number='2123456'),
mobile=PhoneNumberDesc(national_number_pattern='(?:8[7-9]|98)\\d{7}|4(?:3[0789]|8\\d)\\d{5}', possible_number_pattern='\\d{8,9}', example_number='48123456'),
toll_free=PhoneNumberDesc(national_number_pattern='800\\d{5}', possible_number_pattern='\\d{8}', example_number='80012345'),
premium_rate=PhoneNumberDesc(national_number_pattern='90\\d{6}', possible_number_pattern='\\d{8}', example_number='90123456'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='700\\d{5}', possible_number_pattern='\\d{5,9}', example_number='70012345'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='1(?:12|50|6[06])', possible_number_pattern='\\d{3}', example_number='112'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
short_code=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(2)(\\d{5})', format='\\1 \\2', leading_digits_pattern=['29'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(2)(\\d{3})(\\d{3,4})', format='\\1 \\2 \\3', leading_digits_pattern=['2'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{3})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['43[124-7]|70[1-9]'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{2})', format='\\1 \\2 \\3', leading_digits_pattern=['43[124-7]|70[1-9]'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{3})(\\d{2})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['[78]00'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{2,3})', format='\\1 \\2 \\3', leading_digits_pattern=['[356]|4[124-7]|7[1-9]|8[1-6]|9[1-7]'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{3,4})', format='\\1 \\2 \\3', leading_digits_pattern=['48|8[7-9]|9[08]'], national_prefix_formatting_rule='0\\1')])
| ayushgoel/FixGoogleContacts | phonenumbers/data/region_BG.py | Python | mit | 3,204 |
from itertools import product
import numpy as np
from sympy import And
import pytest
from conftest import skipif, opts_tiling
from devito import (ConditionalDimension, Grid, Function, TimeFunction, SparseFunction, # noqa
Eq, Operator, Constant, Dimension, SubDimension, switchconfig,
SubDomain, Lt, Le, Gt, Ge, Ne, Buffer)
from devito.ir.iet import (Conditional, Expression, Iteration, FindNodes,
retrieve_iteration_tree)
from devito.symbolics import indexify, retrieve_functions, IntDiv
from devito.types import Array
class TestBufferedDimension(object):
def test_multi_buffer(self):
grid = Grid((3, 3))
f = TimeFunction(name="f", grid=grid)
g = TimeFunction(name="g", grid=grid, save=Buffer(7))
op = Operator([Eq(f.forward, 1), Eq(g, f.forward)])
op(time_M=3)
# f looped all time_order buffer and is 1 everywhere
assert np.allclose(f.data, 1)
# g looped indices 0 to 3, rest is still 0
assert np.allclose(g.data[0:4], 1)
assert np.allclose(g.data[4:], 0)
def test_multi_buffer_long_time(self):
grid = Grid((3, 3))
time = grid.time_dim
f = TimeFunction(name="f", grid=grid)
g = TimeFunction(name="g", grid=grid, save=Buffer(7))
op = Operator([Eq(f.forward, time), Eq(g, time+1)])
op(time_M=20)
# f[0] is time=19, f[1] is time=20
assert np.allclose(f.data[0], 19)
assert np.allclose(f.data[1], 20)
# g is time 15 to 21 (loop twice the 7 buffer then 15->21)
for i in range(7):
assert np.allclose(g.data[i], 14+i+1)
class TestSubDimension(object):
@pytest.mark.parametrize('opt', opts_tiling)
def test_interior(self, opt):
"""
Tests application of an Operator consisting of a single equation
over the ``interior`` subdomain.
"""
grid = Grid(shape=(4, 4, 4))
x, y, z = grid.dimensions
interior = grid.interior
u = TimeFunction(name='u', grid=grid)
eqn = [Eq(u.forward, u + 2, subdomain=interior)]
op = Operator(eqn, opt=opt)
op.apply(time_M=2)
assert np.all(u.data[1, 1:-1, 1:-1, 1:-1] == 6.)
assert np.all(u.data[1, :, 0] == 0.)
assert np.all(u.data[1, :, -1] == 0.)
assert np.all(u.data[1, :, :, 0] == 0.)
assert np.all(u.data[1, :, :, -1] == 0.)
def test_domain_vs_interior(self):
"""
Tests application of an Operator consisting of two equations, one
over the whole domain (default), and one over the ``interior`` subdomain.
"""
grid = Grid(shape=(4, 4, 4))
x, y, z = grid.dimensions
t = grid.stepping_dim # noqa
interior = grid.interior
u = TimeFunction(name='u', grid=grid) # noqa
eqs = [Eq(u.forward, u + 1),
Eq(u.forward, u.forward + 2, subdomain=interior)]
op = Operator(eqs, opt='noop')
trees = retrieve_iteration_tree(op)
assert len(trees) == 2
op.apply(time_M=1)
assert np.all(u.data[1, 0, :, :] == 1)
assert np.all(u.data[1, -1, :, :] == 1)
assert np.all(u.data[1, :, 0, :] == 1)
assert np.all(u.data[1, :, -1, :] == 1)
assert np.all(u.data[1, :, :, 0] == 1)
assert np.all(u.data[1, :, :, -1] == 1)
assert np.all(u.data[1, 1:3, 1:3, 1:3] == 3)
@pytest.mark.parametrize('opt', opts_tiling)
def test_subdim_middle(self, opt):
"""
Tests that instantiating SubDimensions using the classmethod
constructors works correctly.
"""
grid = Grid(shape=(4, 4, 4))
x, y, z = grid.dimensions
t = grid.stepping_dim # noqa
u = TimeFunction(name='u', grid=grid) # noqa
xi = SubDimension.middle(name='xi', parent=x,
thickness_left=1,
thickness_right=1)
eqs = [Eq(u.forward, u + 1)]
eqs = [e.subs(x, xi) for e in eqs]
op = Operator(eqs, opt=opt)
u.data[:] = 1.0
op.apply(time_M=1)
assert np.all(u.data[1, 0, :, :] == 1)
assert np.all(u.data[1, -1, :, :] == 1)
assert np.all(u.data[1, 1:3, :, :] == 2)
def test_symbolic_size(self):
"""Check the symbolic size of all possible SubDimensions is as expected."""
grid = Grid(shape=(4,))
x, = grid.dimensions
thickness = 4
xleft = SubDimension.left(name='xleft', parent=x, thickness=thickness)
assert xleft.symbolic_size == xleft.thickness.left[0]
xi = SubDimension.middle(name='xi', parent=x,
thickness_left=thickness, thickness_right=thickness)
assert xi.symbolic_size == (x.symbolic_max - x.symbolic_min -
xi.thickness.left[0] - xi.thickness.right[0] + 1)
xright = SubDimension.right(name='xright', parent=x, thickness=thickness)
assert xright.symbolic_size == xright.thickness.right[0]
@pytest.mark.parametrize('opt', opts_tiling)
def test_bcs(self, opt):
"""
Tests application of an Operator consisting of multiple equations
defined over different sub-regions, explicitly created through the
use of SubDimensions.
"""
grid = Grid(shape=(20, 20))
x, y = grid.dimensions
t = grid.stepping_dim
thickness = 4
u = TimeFunction(name='u', save=None, grid=grid, space_order=0, time_order=1)
xleft = SubDimension.left(name='xleft', parent=x, thickness=thickness)
xi = SubDimension.middle(name='xi', parent=x,
thickness_left=thickness, thickness_right=thickness)
xright = SubDimension.right(name='xright', parent=x, thickness=thickness)
yi = SubDimension.middle(name='yi', parent=y,
thickness_left=thickness, thickness_right=thickness)
t_in_centre = Eq(u[t+1, xi, yi], 1)
leftbc = Eq(u[t+1, xleft, yi], u[t+1, xleft+1, yi] + 1)
rightbc = Eq(u[t+1, xright, yi], u[t+1, xright-1, yi] + 1)
op = Operator([t_in_centre, leftbc, rightbc], opt=opt)
op.apply(time_m=1, time_M=1)
assert np.all(u.data[0, :, 0:thickness] == 0.)
assert np.all(u.data[0, :, -thickness:] == 0.)
assert all(np.all(u.data[0, i, thickness:-thickness] == (thickness+1-i))
for i in range(thickness))
assert all(np.all(u.data[0, -i, thickness:-thickness] == (thickness+2-i))
for i in range(1, thickness + 1))
assert np.all(u.data[0, thickness:-thickness, thickness:-thickness] == 1.)
def test_flow_detection_interior(self):
"""
Test detection of flow directions when SubDimensions are used
(in this test they are induced by the ``interior`` subdomain).
Stencil uses values at new timestep as well as those at previous ones
This forces an evaluation order onto x.
Weights are:
x=0 x=1 x=2 x=3
t=N 2 ---3
v /
t=N+1 o--+----4
Flow dependency should traverse x in the negative direction
x=2 x=3 x=4 x=5 x=6
t=0 0 --- 0 -- 1 -- 0
v / v / v /
t=1 44 -+--- 11 -+--- 2--+ -- 0
"""
grid = Grid(shape=(10, 10))
x, y = grid.dimensions
interior = grid.interior
u = TimeFunction(name='u', grid=grid, save=10, time_order=1, space_order=0)
step = Eq(u.forward, 2*u
+ 3*u.subs(x, x+x.spacing)
+ 4*u.forward.subs(x, x+x.spacing),
subdomain=interior)
op = Operator(step)
u.data[0, 5, 5] = 1.0
op.apply(time_M=0)
assert u.data[1, 5, 5] == 2
assert u.data[1, 4, 5] == 11
assert u.data[1, 3, 5] == 44
assert u.data[1, 2, 5] == 4*44
assert u.data[1, 1, 5] == 4*4*44
# This point isn't updated because of the `interior` selection
assert u.data[1, 0, 5] == 0
assert np.all(u.data[1, 6:, :] == 0)
assert np.all(u.data[1, :, 0:5] == 0)
assert np.all(u.data[1, :, 6:] == 0)
@pytest.mark.parametrize('exprs,expected,', [
# Carried dependence in both /t/ and /x/
(['Eq(u[t+1, x, y], u[t+1, x-1, y] + u[t, x, y])'], 'y'),
(['Eq(u[t+1, x, y], u[t+1, x-1, y] + u[t, x, y], subdomain=interior)'], 'i0y'),
# Carried dependence in both /t/ and /y/
(['Eq(u[t+1, x, y], u[t+1, x, y-1] + u[t, x, y])'], 'x'),
(['Eq(u[t+1, x, y], u[t+1, x, y-1] + u[t, x, y], subdomain=interior)'], 'i0x'),
# Carried dependence in /y/, leading to separate /y/ loops, one
# going forward, the other backward
(['Eq(u[t+1, x, y], u[t+1, x, y-1] + u[t, x, y], subdomain=interior)',
'Eq(u[t+1, x, y], u[t+1, x, y+1] + u[t, x, y], subdomain=interior)'], 'i0x'),
])
def test_iteration_property_parallel(self, exprs, expected):
"""Tests detection of sequental and parallel Iterations when applying
equations over different subdomains."""
grid = Grid(shape=(20, 20))
x, y = grid.dimensions # noqa
t = grid.time_dim # noqa
interior = grid.interior # noqa
u = TimeFunction(name='u', grid=grid, save=10, time_order=1) # noqa
# List comprehension would need explicit locals/globals mappings to eval
for i, e in enumerate(list(exprs)):
exprs[i] = eval(e)
op = Operator(exprs, opt='noop')
iterations = FindNodes(Iteration).visit(op)
assert all(i.is_Sequential for i in iterations if i.dim.name != expected)
assert all(i.is_Parallel for i in iterations if i.dim.name == expected)
@skipif(['device'])
@pytest.mark.parametrize('exprs,expected,', [
# All parallel, the innermost Iteration gets vectorized
(['Eq(u[time, x, yleft], u[time, x, yleft] + 1.)'], ['yleft']),
# All outers are parallel, carried dependence in `yleft`, so the middle
# Iteration over `x` gets vectorized
(['Eq(u[time, x, yleft], u[time, x, yleft+1] + 1.)'], ['x']),
# Only the middle Iteration is parallel, so no vectorization (the Iteration
# is left non-vectorised for OpenMP parallelism)
(['Eq(u[time+1, x, yleft], u[time, x, yleft+1] + u[time+1, x, yleft+1])'], [])
])
def test_iteration_property_vector(self, exprs, expected):
"""Tests detection of vector Iterations when using subdimensions."""
grid = Grid(shape=(20, 20))
x, y = grid.dimensions # noqa
time = grid.time_dim # noqa
# The leftmost 10 elements
yleft = SubDimension.left(name='yleft', parent=y, thickness=10) # noqa
u = TimeFunction(name='u', grid=grid, save=10, time_order=0, space_order=1) # noqa
# List comprehension would need explicit locals/globals mappings to eval
for i, e in enumerate(list(exprs)):
exprs[i] = eval(e)
op = Operator(exprs, opt='simd')
iterations = FindNodes(Iteration).visit(op)
vectorized = [i.dim.name for i in iterations if i.is_Vectorized]
assert set(vectorized) == set(expected)
@pytest.mark.parametrize('opt', opts_tiling)
def test_subdimmiddle_parallel(self, opt):
"""
Tests application of an Operator consisting of a subdimension
defined over different sub-regions, explicitly created through the
use of SubDimensions.
"""
grid = Grid(shape=(20, 20))
x, y = grid.dimensions
t = grid.stepping_dim
thickness = 4
u = TimeFunction(name='u', save=None, grid=grid, space_order=0, time_order=1)
xi = SubDimension.middle(name='xi', parent=x,
thickness_left=thickness, thickness_right=thickness)
yi = SubDimension.middle(name='yi', parent=y,
thickness_left=thickness, thickness_right=thickness)
# a 5 point stencil that can be computed in parallel
centre = Eq(u[t+1, xi, yi], u[t, xi, yi] + u[t, xi-1, yi]
+ u[t, xi+1, yi] + u[t, xi, yi-1] + u[t, xi, yi+1])
u.data[0, 10, 10] = 1.0
op = Operator([centre], opt=opt)
print(op.ccode)
iterations = FindNodes(Iteration).visit(op)
assert all(i.is_Affine and i.is_Parallel for i in iterations if i.dim in [xi, yi])
op.apply(time_m=0, time_M=0)
assert np.all(u.data[1, 9:12, 10] == 1.0)
assert np.all(u.data[1, 10, 9:12] == 1.0)
# Other than those, it should all be 0
u.data[1, 9:12, 10] = 0.0
u.data[1, 10, 9:12] = 0.0
assert np.all(u.data[1, :] == 0)
def test_subdimleft_parallel(self):
"""
Tests application of an Operator consisting of a subdimension
defined over different sub-regions, explicitly created through the
use of SubDimensions.
This tests that flow direction is not being automatically inferred
from whether the subdimension is on the left or right boundary.
"""
grid = Grid(shape=(20, 20))
x, y = grid.dimensions
t = grid.stepping_dim
thickness = 4
u = TimeFunction(name='u', save=None, grid=grid, space_order=0, time_order=1)
xl = SubDimension.left(name='xl', parent=x, thickness=thickness)
yi = SubDimension.middle(name='yi', parent=y,
thickness_left=thickness, thickness_right=thickness)
# Can be done in parallel
eq = Eq(u[t+1, xl, yi], u[t, xl, yi] + 1)
op = Operator([eq])
iterations = FindNodes(Iteration).visit(op)
assert all(i.is_Affine and i.is_Parallel for i in iterations if i.dim in [xl, yi])
op.apply(time_m=0, time_M=0)
assert np.all(u.data[1, 0:thickness, 0:thickness] == 0)
assert np.all(u.data[1, 0:thickness, -thickness:] == 0)
assert np.all(u.data[1, 0:thickness, thickness:-thickness] == 1)
assert np.all(u.data[1, thickness+1:, :] == 0)
def test_subdimmiddle_notparallel(self):
"""
Tests application of an Operator consisting of a subdimension
defined over different sub-regions, explicitly created through the
use of SubDimensions.
Different from ``test_subdimmiddle_parallel`` because an interior
dimension cannot be evaluated in parallel.
"""
grid = Grid(shape=(20, 20))
x, y = grid.dimensions
t = grid.stepping_dim
thickness = 4
u = TimeFunction(name='u', save=None, grid=grid, space_order=0, time_order=1)
xi = SubDimension.middle(name='xi', parent=x,
thickness_left=thickness, thickness_right=thickness)
yi = SubDimension.middle(name='yi', parent=y,
thickness_left=thickness, thickness_right=thickness)
# flow dependencies in x and y which should force serial execution
# in reverse direction
centre = Eq(u[t+1, xi, yi], u[t, xi, yi] + u[t+1, xi+1, yi+1])
u.data[0, 10, 10] = 1.0
op = Operator([centre])
iterations = FindNodes(Iteration).visit(op)
assert all(i.is_Affine and i.is_Sequential for i in iterations if i.dim == xi)
assert all(i.is_Affine and i.is_Parallel for i in iterations if i.dim == yi)
op.apply(time_m=0, time_M=0)
for i in range(4, 11):
assert u.data[1, i, i] == 1.0
u.data[1, i, i] = 0.0
assert np.all(u.data[1, :] == 0)
def test_subdimleft_notparallel(self):
"""
Tests application of an Operator consisting of a subdimension
defined over different sub-regions, explicitly created through the
use of SubDimensions.
This tests that flow direction is not being automatically inferred
from whether the subdimension is on the left or right boundary.
"""
grid = Grid(shape=(20, 20))
x, y = grid.dimensions
t = grid.stepping_dim
thickness = 4
u = TimeFunction(name='u', save=None, grid=grid, space_order=1, time_order=0)
xl = SubDimension.left(name='xl', parent=x, thickness=thickness)
yi = SubDimension.middle(name='yi', parent=y,
thickness_left=thickness, thickness_right=thickness)
# Flows inward (i.e. forward) rather than outward
eq = Eq(u[t+1, xl, yi], u[t+1, xl-1, yi] + 1)
op = Operator([eq])
iterations = FindNodes(Iteration).visit(op)
assert all(i.is_Affine and i.is_Sequential for i in iterations if i.dim == xl)
assert all(i.is_Affine and i.is_Parallel for i in iterations if i.dim == yi)
op.apply(time_m=1, time_M=1)
assert all(np.all(u.data[0, :thickness, thickness+i] == [1, 2, 3, 4])
for i in range(12))
assert np.all(u.data[0, thickness:] == 0)
assert np.all(u.data[0, :, thickness+12:] == 0)
def test_subdim_fd(self):
"""
Test that the FD shortcuts are handled correctly with SubDimensions
"""
grid = Grid(shape=(20, 20))
x, y = grid.dimensions
u = TimeFunction(name='u', save=None, grid=grid, space_order=1, time_order=1)
u.data[:] = 2.
# Flows inward (i.e. forward) rather than outward
eq = [Eq(u.forward, u.dx + u.dy, subdomain=grid.interior)]
op = Operator(eq)
op.apply(time_M=0)
assert np.all(u.data[1, -1, :] == 2.)
assert np.all(u.data[1, :, 0] == 2.)
assert np.all(u.data[1, :, -1] == 2.)
assert np.all(u.data[1, 0, :] == 2.)
assert np.all(u.data[1, 1:18, 1:18] == 0.)
def test_arrays_defined_over_subdims(self):
"""
Check code generation when an Array uses a SubDimension.
"""
grid = Grid(shape=(3,))
x, = grid.dimensions
xi, = grid.interior.dimensions
f = Function(name='f', grid=grid)
a = Array(name='a', dimensions=(xi,), dtype=grid.dtype)
op = Operator([Eq(a[xi], 1), Eq(f, f + a[xi + 1], subdomain=grid.interior)],
openmp=False)
assert len(op.parameters) == 6
# neither `x_size` nor `xi_size` are expected here
assert not any(i.name in ('x_size', 'xi_size') for i in op.parameters)
# Try running it -- regardless of what it will produce, this should run
# ie, this checks this error isn't raised:
# "ValueError: No value found for parameter xi_size"
op()
@pytest.mark.parametrize('opt', opts_tiling)
def test_expandingbox_like(self, opt):
"""
Make sure SubDimensions aren't an obstacle to expanding boxes.
"""
grid = Grid(shape=(8, 8))
x, y = grid.dimensions
u = TimeFunction(name='u', grid=grid)
xi = SubDimension.middle(name='xi', parent=x, thickness_left=2, thickness_right=2)
yi = SubDimension.middle(name='yi', parent=y, thickness_left=2, thickness_right=2)
eqn = Eq(u.forward, u + 1)
eqn = eqn.subs({x: xi, y: yi})
op = Operator(eqn, opt=opt)
op.apply(time=3, x_m=2, x_M=5, y_m=2, y_M=5,
xi_ltkn=0, xi_rtkn=0, yi_ltkn=0, yi_rtkn=0)
assert np.all(u.data[0, 2:-2, 2:-2] == 4.)
assert np.all(u.data[1, 2:-2, 2:-2] == 3.)
assert np.all(u.data[:, :2] == 0.)
assert np.all(u.data[:, -2:] == 0.)
assert np.all(u.data[:, :, :2] == 0.)
assert np.all(u.data[:, :, -2:] == 0.)
class TestConditionalDimension(object):
"""
A collection of tests to check the correct functioning of ConditionalDimensions.
"""
def test_basic(self):
nt = 19
grid = Grid(shape=(11, 11))
time = grid.time_dim
u = TimeFunction(name='u', grid=grid)
assert(grid.stepping_dim in u.indices)
u2 = TimeFunction(name='u2', grid=grid, save=nt)
assert(time in u2.indices)
factor = 4
time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,
time_dim=time_subsampled)
assert(time_subsampled in usave.indices)
eqns = [Eq(u.forward, u + 1.), Eq(u2.forward, u2 + 1.), Eq(usave, u)]
op = Operator(eqns)
op.apply(t_M=nt-2)
assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1))
assert np.all([np.allclose(u2.data[i], i) for i in range(nt)])
assert np.all([np.allclose(usave.data[i], i*factor)
for i in range((nt+factor-1)//factor)])
def test_basic_shuffles(self):
"""
Like ``test_basic``, but with different equation orderings. Nevertheless,
we assert against the same exact values as in ``test_basic``, since we
save `u`, not `u.forward`.
"""
nt = 19
grid = Grid(shape=(11, 11))
time = grid.time_dim
u = TimeFunction(name='u', grid=grid)
u2 = TimeFunction(name='u2', grid=grid, save=nt)
factor = 4
time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,
time_dim=time_subsampled)
# Shuffle 1
eqns = [Eq(usave, u), Eq(u.forward, u + 1.), Eq(u2.forward, u2 + 1.)]
op = Operator(eqns)
op.apply(t_M=nt-2)
assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1))
assert np.all([np.allclose(u2.data[i], i) for i in range(nt)])
assert np.all([np.allclose(usave.data[i], i*factor)
for i in range((nt+factor-1)//factor)])
# Shuffle 2
usave.data[:] = 0.
u.data[:] = 0.
u2.data[:] = 0.
eqns = [Eq(u.forward, u + 1.), Eq(usave, u), Eq(u2.forward, u2 + 1.)]
op = Operator(eqns)
op.apply(t_M=nt-2)
assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1))
assert np.all([np.allclose(u2.data[i], i) for i in range(nt)])
assert np.all([np.allclose(usave.data[i], i*factor)
for i in range((nt+factor-1)//factor)])
@pytest.mark.parametrize('opt', opts_tiling)
def test_spacial_subsampling(self, opt):
"""
Test conditional dimension for the spatial ones.
This test saves u every two grid points :
u2[x, y] = u[2*x, 2*y]
"""
nt = 19
grid = Grid(shape=(11, 11))
time = grid.time_dim
u = TimeFunction(name='u', grid=grid, save=nt)
assert(grid.time_dim in u.indices)
# Creates subsampled spatial dimensions and accordine grid
dims = tuple([ConditionalDimension(d.name+'sub', parent=d, factor=2)
for d in u.grid.dimensions])
grid2 = Grid((6, 6), dimensions=dims, time_dimension=time)
u2 = TimeFunction(name='u2', grid=grid2, save=nt)
assert(time in u2.indices)
eqns = [Eq(u.forward, u + 1.), Eq(u2, u)]
op = Operator(eqns, opt=opt)
op.apply(time_M=nt-2)
# Verify that u2[x,y]= u[2*x, 2*y]
assert np.allclose(u.data[:-1, 0::2, 0::2], u2.data[:-1, :, :])
def test_time_subsampling_fd(self):
nt = 19
grid = Grid(shape=(11, 11))
x, y = grid.dimensions
time = grid.time_dim
factor = 4
time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,
time_dim=time_subsampled, time_order=2)
dx2 = [indexify(i) for i in retrieve_functions(usave.dt2.evaluate)]
assert dx2 == [usave[time_subsampled - 1, x, y],
usave[time_subsampled + 1, x, y],
usave[time_subsampled, x, y]]
def test_issue_1592(self):
grid = Grid(shape=(11, 11))
time = grid.time_dim
time_sub = ConditionalDimension('t_sub', parent=time, factor=2)
v = TimeFunction(name="v", grid=grid, space_order=4, time_dim=time_sub, save=5)
w = Function(name="w", grid=grid, space_order=4)
Operator(Eq(w, v.dx))(time=6)
op = Operator(Eq(v.forward, v.dx))
op.apply(time=6)
exprs = FindNodes(Expression).visit(op)
assert exprs[-1].expr.lhs.indices[0] == IntDiv(time, 2) + 1
def test_subsampled_fd(self):
"""
Test that the FD shortcuts are handled correctly with ConditionalDimensions
"""
grid = Grid(shape=(11, 11))
time = grid.time_dim
# Creates subsampled spatial dimensions and accordine grid
dims = tuple([ConditionalDimension(d.name+'sub', parent=d, factor=2)
for d in grid.dimensions])
grid2 = Grid((6, 6), dimensions=dims, time_dimension=time)
u2 = TimeFunction(name='u2', grid=grid2, space_order=2, time_order=1)
u2.data.fill(2.)
eqns = [Eq(u2.forward, u2.dx + u2.dy)]
op = Operator(eqns)
op.apply(time_M=0, x_M=11, y_M=11)
# Verify that u2 contains subsampled fd values
assert np.all(u2.data[0, :, :] == 2.)
assert np.all(u2.data[1, 0, 0] == 0.)
assert np.all(u2.data[1, -1, -1] == -20.)
assert np.all(u2.data[1, 0, -1] == -10.)
assert np.all(u2.data[1, -1, 0] == -10.)
assert np.all(u2.data[1, 1:-1, 0] == 0.)
assert np.all(u2.data[1, 0, 1:-1] == 0.)
assert np.all(u2.data[1, 1:-1, -1] == -10.)
assert np.all(u2.data[1, -1, 1:-1] == -10.)
assert np.all(u2.data[1, 1:4, 1:4] == 0.)
# This test generates an openmp loop form which makes older gccs upset
@switchconfig(openmp=False)
def test_nothing_in_negative(self):
"""Test the case where when the condition is false, there is nothing to do."""
nt = 4
grid = Grid(shape=(11, 11))
time = grid.time_dim
u = TimeFunction(name='u', save=nt, grid=grid)
assert(grid.time_dim in u.indices)
factor = 4
time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,
time_dim=time_subsampled)
assert(time_subsampled in usave.indices)
eqns = [Eq(usave, u)]
op = Operator(eqns)
u.data[:] = 1.0
usave.data[:] = 0.0
op.apply(time_m=1, time_M=1)
assert np.allclose(usave.data, 0.0)
op.apply(time_m=0, time_M=0)
assert np.allclose(usave.data, 1.0)
def test_laplace(self):
grid = Grid(shape=(20, 20, 20))
x, y, z = grid.dimensions
time = grid.time_dim
t = grid.stepping_dim
tsave = ConditionalDimension(name='tsave', parent=time, factor=2)
u = TimeFunction(name='u', grid=grid, save=None, time_order=2)
usave = TimeFunction(name='usave', grid=grid, time_dim=tsave,
time_order=0, space_order=0)
steps = []
# save of snapshot
steps.append(Eq(usave, u))
# standard laplace-like thing
steps.append(Eq(u[t+1, x, y, z],
u[t, x, y, z] - u[t-1, x, y, z]
+ u[t, x-1, y, z] + u[t, x+1, y, z]
+ u[t, x, y-1, z] + u[t, x, y+1, z]
+ u[t, x, y, z-1] + u[t, x, y, z+1]))
op = Operator(steps)
u.data[:] = 0.0
u.data[0, 10, 10, 10] = 1.0
op.apply(time_m=0, time_M=0)
assert np.sum(u.data[0, :, :, :]) == 1.0
assert np.sum(u.data[1, :, :, :]) == 7.0
assert np.all(usave.data[0, :, :, :] == u.data[0, :, :, :])
def test_as_expr(self):
nt = 19
grid = Grid(shape=(11, 11))
time = grid.time_dim
u = TimeFunction(name='u', grid=grid)
assert(grid.stepping_dim in u.indices)
u2 = TimeFunction(name='u2', grid=grid, save=nt)
assert(time in u2.indices)
factor = 4
time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,
time_dim=time_subsampled)
assert(time_subsampled in usave.indices)
eqns = [Eq(u.forward, u + 1.), Eq(u2.forward, u2 + 1.),
Eq(usave, time_subsampled * u)]
op = Operator(eqns)
op.apply(t=nt-2)
assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1))
assert np.all([np.allclose(u2.data[i], i) for i in range(nt)])
assert np.all([np.allclose(usave.data[i], i*factor*i)
for i in range((nt+factor-1)//factor)])
def test_shifted(self):
nt = 19
grid = Grid(shape=(11, 11))
time = grid.time_dim
u = TimeFunction(name='u', grid=grid)
assert(grid.stepping_dim in u.indices)
u2 = TimeFunction(name='u2', grid=grid, save=nt)
assert(time in u2.indices)
factor = 4
time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
usave = TimeFunction(name='usave', grid=grid, save=2, time_dim=time_subsampled)
assert(time_subsampled in usave.indices)
t_sub_shift = Constant(name='t_sub_shift', dtype=np.int32)
eqns = [Eq(u.forward, u + 1.), Eq(u2.forward, u2 + 1.),
Eq(usave.subs(time_subsampled, time_subsampled - t_sub_shift), u)]
op = Operator(eqns)
# Starting at time_m=10, so time_subsampled - t_sub_shift is in range
op.apply(time_m=10, time_M=nt-2, t_sub_shift=3)
assert np.all(np.allclose(u.data[0], 8))
assert np.all([np.allclose(u2.data[i], i - 10) for i in range(10, nt)])
assert np.all([np.allclose(usave.data[i], 2+i*factor) for i in range(2)])
def test_no_index(self):
"""Test behaviour when the ConditionalDimension is used as a symbol in
an expression."""
nt = 19
grid = Grid(shape=(11, 11))
time = grid.time_dim
u = TimeFunction(name='u', grid=grid)
assert(grid.stepping_dim in u.indices)
v = Function(name='v', grid=grid)
factor = 4
time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
eqns = [Eq(u.forward, u + 1), Eq(v, v + u*u*time_subsampled)]
op = Operator(eqns)
op.apply(t_M=nt-2)
assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1))
# expected result is 1024
# v = u[0]**2 * 0 + u[4]**2 * 1 + u[8]**2 * 2 + u[12]**2 * 3 + u[16]**2 * 4
# with u[t] = t
# v = 16 * 1 + 64 * 2 + 144 * 3 + 256 * 4 = 1600
assert np.all(np.allclose(v.data, 1600))
def test_no_index_sparse(self):
"""Test behaviour when the ConditionalDimension is used as a symbol in
an expression over sparse data objects."""
grid = Grid(shape=(4, 4), extent=(3.0, 3.0))
time = grid.time_dim
f = TimeFunction(name='f', grid=grid, save=1)
f.data[:] = 0.
coordinates = [(0.5, 0.5), (0.5, 2.5), (2.5, 0.5), (2.5, 2.5)]
sf = SparseFunction(name='sf', grid=grid, npoint=4, coordinates=coordinates)
sf.data[:] = 1.
sd = sf.dimensions[sf._sparse_position]
# We want to write to `f` through `sf` so that we obtain the
# following 4x4 grid (the '*' show the position of the sparse points)
# We do that by emulating an injection
#
# 0 --- 0 --- 0 --- 0
# | * | | * |
# 0 --- 1 --- 1 --- 0
# | | | |
# 0 --- 1 --- 1 --- 0
# | * | | * |
# 0 --- 0 --- 0 --- 0
radius = 1
indices = [(i, i+radius) for i in sf._coordinate_indices]
bounds = [i.symbolic_size - radius for i in grid.dimensions]
eqs = []
for e, i in enumerate(product(*indices)):
args = [j > 0 for j in i]
args.extend([j < k for j, k in zip(i, bounds)])
condition = And(*args, evaluate=False)
cd = ConditionalDimension('sfc%d' % e, parent=sd, condition=condition)
index = [time] + list(i)
eqs.append(Eq(f[index], f[index] + sf[cd]))
op = Operator(eqs)
op.apply(time=0)
assert np.all(f.data[0, 1:-1, 1:-1] == 1.)
assert np.all(f.data[0, 0] == 0.)
assert np.all(f.data[0, -1] == 0.)
assert np.all(f.data[0, :, 0] == 0.)
assert np.all(f.data[0, :, -1] == 0.)
def test_symbolic_factor(self):
"""
Test ConditionalDimension with symbolic factor (provided as a Constant).
"""
g = Grid(shape=(4, 4, 4))
u = TimeFunction(name='u', grid=g, time_order=0)
fact = Constant(name='fact', dtype=np.int32, value=4)
tsub = ConditionalDimension(name='tsub', parent=g.time_dim, factor=fact)
usave = TimeFunction(name='usave', grid=g, time_dim=tsub, save=4)
op = Operator([Eq(u, u + 1), Eq(usave, u)])
op.apply(time=7) # Use `fact`'s default value, 4
assert np.all(usave.data[0] == 1)
assert np.all(usave.data[1] == 5)
u.data[:] = 0.
op.apply(time=7, fact=2)
assert np.all(usave.data[0] == 1)
assert np.all(usave.data[1] == 3)
assert np.all(usave.data[2] == 5)
assert np.all(usave.data[3] == 7)
def test_implicit_dims(self):
"""
Test ConditionalDimension as an implicit dimension for an equation.
"""
# This test makes an Operator that should create a vector of increasing
# integers, but stop incrementing when a certain stop value is reached
shape = (50,)
stop_value = 20
time = Dimension(name='time')
f = TimeFunction(name='f', shape=shape, dimensions=[time])
# The condition to stop incrementing
cond = ConditionalDimension(name='cond',
parent=time, condition=f[time] < stop_value)
eqs = [Eq(f.forward, f), Eq(f.forward, f.forward + 1, implicit_dims=[cond])]
op = Operator(eqs)
op.apply(time_M=shape[0] - 2)
# Make the same calculation in python to assert the result
F = np.zeros(shape[0])
for i in range(shape[0]):
F[i] = i if i < stop_value else stop_value
assert np.all(f.data == F)
def test_grouping(self):
"""
Test that Clusters over the same set of ConditionalDimensions fall within
the same Conditional. This is a follow up to issue #1610.
"""
grid = Grid(shape=(10, 10))
time = grid.time_dim
cond = ConditionalDimension(name='cond', parent=time, condition=time < 5)
u = TimeFunction(name='u', grid=grid, space_order=4)
# We use a SubDomain only to keep the two Eqs separated
eqns = [Eq(u.forward, u + 1, subdomain=grid.interior),
Eq(u.forward, u.dx.dx + 1., implicit_dims=[cond])]
op = Operator(eqns, opt=('advanced-fsg', {'cire-mincost-sops': 1}))
conds = FindNodes(Conditional).visit(op)
assert len(conds) == 1
assert len(retrieve_iteration_tree(conds[0].then_body)) == 2
def test_stepping_dim_in_condition_lowering(self):
"""
Check that the compiler performs lowering on conditions
with TimeDimensions and generates the expected code::
if (g[t][x + 1][y + 1] <= 10){ if (g[t0][x + 1][y + 1] <= 10){
... --> ...
} }
This test increments a function by one at every timestep until it is
less-or-equal to 10 (g<=10) while although operator runs for 13 timesteps.
"""
grid = Grid(shape=(4, 4))
_, y = grid.dimensions
ths = 10
g = TimeFunction(name='g', grid=grid)
ci = ConditionalDimension(name='ci', parent=y, condition=Le(g, ths))
op = Operator(Eq(g.forward, g + 1, implicit_dims=ci))
op.apply(time_M=ths+3)
assert np.all(g.data[0, :, :] == ths)
assert np.all(g.data[1, :, :] == ths + 1)
assert 'if (g[t0][x + 1][y + 1] <= 10)\n'
'{\n g[t1][x + 1][y + 1] = g[t0][x + 1][y + 1] + 1' in str(op.ccode)
def test_expr_like_lowering(self):
"""
Test the lowering of an expr-like ConditionalDimension's condition.
This test makes an Operator that should indexify and lower the condition
passed in the Conditional Dimension
"""
grid = Grid(shape=(3, 3))
g1 = Function(name='g1', grid=grid)
g2 = Function(name='g2', grid=grid)
g1.data[:] = 0.49
g2.data[:] = 0.49
x, y = grid.dimensions
ci = ConditionalDimension(name='ci', parent=y, condition=Le((g1 + g2),
1.01*(g1 + g2)))
f = Function(name='f', shape=grid.shape, dimensions=(x, ci))
Operator(Eq(f, g1+g2)).apply()
assert np.all(f.data[:] == g1.data[:] + g2.data[:])
@pytest.mark.parametrize('setup_rel, rhs, c1, c2, c3, c4', [
# Relation, RHS, c1 to c4 used as indexes in assert
(Lt, 3, 2, 4, 4, -1), (Le, 2, 2, 4, 4, -1), (Ge, 3, 4, 6, 1, 4),
(Gt, 2, 4, 6, 1, 4), (Ne, 5, 2, 6, 1, 2)
])
def test_relational_classes(self, setup_rel, rhs, c1, c2, c3, c4):
"""
Test ConditionalDimension using conditions based on Relations over SubDomains.
"""
class InnerDomain(SubDomain):
name = 'inner'
def define(self, dimensions):
return {d: ('middle', 2, 2) for d in dimensions}
inner_domain = InnerDomain()
grid = Grid(shape=(8, 8), subdomains=(inner_domain,))
g = Function(name='g', grid=grid)
g2 = Function(name='g2', grid=grid)
for i in [g, g2]:
i.data[:4, :4] = 1
i.data[4:, :4] = 2
i.data[4:, 4:] = 3
i.data[:4, 4:] = 4
xi, yi = grid.subdomains['inner'].dimensions
cond = setup_rel(0.25*g + 0.75*g2, rhs, subdomain=grid.subdomains['inner'])
ci = ConditionalDimension(name='ci', parent=yi, condition=cond)
f = Function(name='f', shape=grid.shape, dimensions=(xi, ci))
eq1 = Eq(f, 0.4*g + 0.6*g2)
eq2 = Eq(f, 5)
Operator([eq1, eq2]).apply()
assert np.all(f.data[2:6, c1:c2] == 5.)
assert np.all(f.data[:, c3:c4] < 5.)
def test_from_cond_to_param(self):
"""
Test that Functions appearing in the condition of a ConditionalDimension
but not explicitly in an Eq are actually part of the Operator input
(stems from issue #1298).
"""
grid = Grid(shape=(8, 8))
x, y = grid.dimensions
g = Function(name='g', grid=grid)
h = Function(name='h', grid=grid)
ci = ConditionalDimension(name='ci', parent=y, condition=Lt(g, 2 + h))
f = Function(name='f', shape=grid.shape, dimensions=(x, ci))
for _ in range(5):
# issue #1298 was non deterministic
Operator(Eq(f, 5)).apply()
@skipif('device')
def test_no_fusion_simple(self):
"""
If ConditionalDimensions are present, then Clusters must not be fused so
that ultimately Eqs get scheduled to different loop nests.
"""
grid = Grid(shape=(4, 4, 4))
time = grid.time_dim
f = TimeFunction(name='f', grid=grid)
g = Function(name='g', grid=grid)
h = Function(name='h', grid=grid)
# No ConditionalDimensions yet. Will be fused and optimized
eqns = [Eq(f.forward, f + 1),
Eq(h, f + 1),
Eq(g, f + 1)]
op = Operator(eqns)
exprs = FindNodes(Expression).visit(op._func_table['bf0'].root)
assert len(exprs) == 4
assert exprs[1].expr.rhs is exprs[0].output
assert exprs[2].expr.rhs is exprs[0].output
assert exprs[3].expr.rhs is exprs[0].output
# Now with a ConditionalDimension. No fusion, no optimization
ctime = ConditionalDimension(name='ctime', parent=time, condition=time > 4)
eqns = [Eq(f.forward, f + 1),
Eq(h, f + 1),
Eq(g, f + 1, implicit_dims=[ctime])]
op = Operator(eqns)
exprs = FindNodes(Expression).visit(op._func_table['bf0'].root)
assert len(exprs) == 3
assert exprs[1].expr.rhs is exprs[0].output
assert exprs[2].expr.rhs is exprs[0].output
exprs = FindNodes(Expression).visit(op._func_table['bf1'].root)
assert len(exprs) == 1
@skipif('device')
def test_no_fusion_convoluted(self):
"""
Conceptually like `test_no_fusion_simple`, but with more expressions
and non-trivial data flow.
"""
grid = Grid(shape=(4, 4, 4))
time = grid.time_dim
f = TimeFunction(name='f', grid=grid)
g = Function(name='g', grid=grid)
h = Function(name='h', grid=grid)
ctime = ConditionalDimension(name='ctime', parent=time, condition=time > 4)
eqns = [Eq(f.forward, f + 1),
Eq(h, f + 1),
Eq(g, f + 1, implicit_dims=[ctime]),
Eq(f.forward, f + 1, implicit_dims=[ctime]),
Eq(f.forward, f + 1),
Eq(g, f + 1)]
op = Operator(eqns)
exprs = FindNodes(Expression).visit(op._func_table['bf0'].root)
assert len(exprs) == 3
assert exprs[1].expr.rhs is exprs[0].output
assert exprs[2].expr.rhs is exprs[0].output
exprs = FindNodes(Expression).visit(op._func_table['bf1'].root)
assert len(exprs) == 3
exprs = FindNodes(Expression).visit(op._func_table['bf2'].root)
assert len(exprs) == 3
assert exprs[1].expr.rhs is exprs[0].output
assert exprs[2].expr.rhs is exprs[0].output
def test_affiness(self):
"""
Test for issue #1616.
"""
nt = 19
grid = Grid(shape=(11, 11))
time = grid.time_dim
factor = 4
time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
u = TimeFunction(name='u', grid=grid)
usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,
time_dim=time_subsampled)
eqns = [Eq(u.forward, u + 1.), Eq(usave, u)]
op = Operator(eqns)
iterations = [i for i in FindNodes(Iteration).visit(op) if i.dim is not time]
assert all(i.is_Affine for i in iterations)
class TestMashup(object):
"""
Check the correct functioning of the compiler in presence of many Dimension types.
"""
def test_topofusion_w_subdims_conddims(self):
"""
Check that topological fusion works across guarded Clusters over different
iteration spaces and in presence of anti-dependences.
This test uses both SubDimensions (via SubDomains) and ConditionalDimensions.
"""
grid = Grid(shape=(4, 4, 4))
time = grid.time_dim
f = TimeFunction(name='f', grid=grid, time_order=2)
g = TimeFunction(name='g', grid=grid, time_order=2)
h = TimeFunction(name='h', grid=grid, time_order=2)
fsave = TimeFunction(name='fsave', grid=grid, time_order=2, save=5)
gsave = TimeFunction(name='gsave', grid=grid, time_order=2, save=5)
ctime = ConditionalDimension(name='ctime', parent=time, condition=time > 4)
eqns = [Eq(f.forward, f + 1),
Eq(g.forward, g + 1),
Eq(fsave, f.dt2, implicit_dims=[ctime]),
Eq(h, f + g, subdomain=grid.interior),
Eq(gsave, g.dt2, implicit_dims=[ctime])]
op = Operator(eqns)
# Check generated code -- expect the gsave equation to be scheduled together
# in the same loop nest with the fsave equation
assert len(op._func_table) == 3
exprs = FindNodes(Expression).visit(op._func_table['bf0'].root)
assert len(exprs) == 2
assert exprs[0].write is f
assert exprs[1].write is g
exprs = FindNodes(Expression).visit(op._func_table['bf1'].root)
assert len(exprs) == 3
assert exprs[1].write is fsave
assert exprs[2].write is gsave
exprs = FindNodes(Expression).visit(op._func_table['bf2'].root)
assert len(exprs) == 1
assert exprs[0].write is h
def test_topofusion_w_subdims_conddims_v2(self):
"""
Like `test_topofusion_w_subdims_conddims` but with more SubDomains,
so we expect fewer loop nests.
"""
grid = Grid(shape=(4, 4, 4))
time = grid.time_dim
f = TimeFunction(name='f', grid=grid, time_order=2)
g = TimeFunction(name='g', grid=grid, time_order=2)
h = TimeFunction(name='h', grid=grid, time_order=2)
fsave = TimeFunction(name='fsave', grid=grid, time_order=2, save=5)
gsave = TimeFunction(name='gsave', grid=grid, time_order=2, save=5)
ctime = ConditionalDimension(name='ctime', parent=time, condition=time > 4)
eqns = [Eq(f.forward, f + 1, subdomain=grid.interior),
Eq(g.forward, g + 1, subdomain=grid.interior),
Eq(fsave, f.dt2, implicit_dims=[ctime]),
Eq(h, f + g, subdomain=grid.interior),
Eq(gsave, g.dt2, implicit_dims=[ctime])]
op = Operator(eqns)
# Check generated code -- expect the gsave equation to be scheduled together
# in the same loop nest with the fsave equation
assert len(op._func_table) == 2
assert len(FindNodes(Expression).visit(op._func_table['bf0'].root)) == 3
assert len(FindNodes(Expression).visit(op._func_table['bf1'].root)) == 2 + 1 # r0
def test_topofusion_w_subdims_conddims_v3(self):
"""
Like `test_topofusion_w_subdims_conddims_v2` but with an extra anti-dependence,
which causes scheduling over more loop nests.
"""
grid = Grid(shape=(4, 4, 4))
time = grid.time_dim
f = TimeFunction(name='f', grid=grid, time_order=2)
g = TimeFunction(name='g', grid=grid, time_order=2)
h = TimeFunction(name='h', grid=grid, time_order=2)
fsave = TimeFunction(name='fsave', grid=grid, time_order=2, save=5)
gsave = TimeFunction(name='gsave', grid=grid, time_order=2, save=5)
ctime = ConditionalDimension(name='ctime', parent=time, condition=time > 4)
eqns = [Eq(f.forward, f + 1, subdomain=grid.interior),
Eq(g.forward, g + 1, subdomain=grid.interior),
Eq(fsave, f.dt2, implicit_dims=[ctime]),
Eq(h, f.dt2.dx + g, subdomain=grid.interior),
Eq(gsave, g.dt2, implicit_dims=[ctime])]
op = Operator(eqns)
# Check generated code -- expect the gsave equation to be scheduled together
# in the same loop nest with the fsave equation
assert len(op._func_table) == 3
exprs = FindNodes(Expression).visit(op._func_table['bf0'].root)
assert len(exprs) == 2
assert exprs[0].write is f
assert exprs[1].write is g
exprs = FindNodes(Expression).visit(op._func_table['bf1'].root)
assert len(exprs) == 3
assert exprs[1].write is fsave
assert exprs[2].write is gsave
exprs = FindNodes(Expression).visit(op._func_table['bf2'].root)
assert len(exprs) == 2
assert exprs[1].write is h
| opesci/devito | tests/test_dimension.py | Python | mit | 47,982 |
"""Kytos SDN Platform."""
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
| kytos/kytos-utils | kytos/__init__.py | Python | mit | 102 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pigame.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| MoonCheesez/stack | PiGame/pigame/manage.py | Python | mit | 804 |
from .. import Provider as CompanyProvider
class Provider(CompanyProvider):
formats = (
"{{last_name}} {{company_suffix}}",
"{{last_name}} {{last_name}} {{company_suffix}}",
"{{large_company}}",
)
large_companies = (
"AZAL",
"Azergold",
"SOCAR",
"Socar Polymer",
"Global Export Fruits",
"Baku Steel Company",
"Azersun",
"Sun Food",
"Azərbaycan Şəkər İstehsalat Birliyi",
"Azərsu",
"Xəzər Dəniz Gəmiçiliyi",
"Azərenerji",
"Bakıelektrikşəbəkə",
"Azəralüminium",
"Bravo",
"Azərpambıq Aqrar Sənaye Kompleksi",
"CTS-Agro",
"Azərtütün Aqrar Sənaye Kompleksi",
"Azəripək",
"Azfruittrade",
"AF Holding",
"Azinko Holding",
"Gilan Holding",
"Azpetrol",
"Azərtexnolayn",
"Bakı Gəmiqayırma Zavodu",
"Gəncə Tekstil Fabriki",
"Mətanət A",
"İrşad Electronics",
)
company_suffixes = (
"ASC",
"QSC",
"MMC",
)
def large_company(self):
"""
:example: 'SOCAR'
"""
return self.random_element(self.large_companies)
| joke2k/faker | faker/providers/company/az_AZ/__init__.py | Python | mit | 1,274 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles backports of the standard library's `fractions.py`.
The fractions module in 2.6 does not handle being instantiated using a
float and then calculating an approximate fraction based on that.
This functionality is required by the FITS unit format generator,
since the FITS unit format handles only rational, not decimal point,
powers.
"""
from __future__ import absolute_import
import sys
if sys.version_info[:2] == (2, 6):
from ._fractions_py2 import *
else:
from fractions import *
| piotroxp/scibibscan | scib/lib/python3.5/site-packages/astropy/utils/compat/fractions.py | Python | mit | 568 |
# -*- coding: utf-8 -*-
import os.path
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings as django_settings
from django.db.models import signals
from know.plugins.attachments import settings
from know import managers
from know.models.pluginbase import ReusablePlugin
from know.models.article import BaseRevisionMixin
class IllegalFileExtension(Exception):
"""File extension on upload is not allowed"""
pass
class Attachment(ReusablePlugin):
objects = managers.ArticleFkManager()
current_revision = models.OneToOneField(
'AttachmentRevision',
verbose_name=_(u'current revision'),
blank=True,
null=True,
related_name='current_set',
help_text=_(u'The revision of this attachment currently in use (on all articles using the attachment)'),
)
original_filename = models.CharField(
max_length=256,
verbose_name=_(u'original filename'),
blank=True,
null=True,
)
def can_write(self, **kwargs):
user = kwargs.get('user', None)
if not settings.ANONYMOUS and (not user or user.is_anonymous()):
return False
return ReusablePlugin.can_write(self, **kwargs)
def can_delete(self, user):
return self.can_write(user=user)
class Meta:
verbose_name = _(u'attachment')
verbose_name_plural = _(u'attachments')
app_label = settings.APP_LABEL
def __unicode__(self):
return "%s: %s" % (self.article.current_revision.title, self.original_filename)
def extension_allowed(filename):
try:
extension = filename.split(".")[-1]
except IndexError:
# No extension
raise IllegalFileExtension("No file extension found in filename. That's not okay!")
if not extension.lower() in map(lambda x: x.lower(), settings.FILE_EXTENSIONS):
raise IllegalFileExtension("The following filename is illegal: %s. Extension has to be one of %s" %
(filename, ", ".join(settings.FILE_EXTENSIONS)))
return extension
def upload_path(instance, filename):
from os import path
extension = extension_allowed(filename)
# Has to match original extension filename
if instance.id and instance.attachment and instance.attachment.original_filename:
original_extension = instance.attachment.original_filename.split(".")[-1]
if not extension.lower() == original_extension:
raise IllegalFileExtension("File extension has to be '%s', not '%s'." %
(original_extension, extension.lower()))
elif instance.attachment:
instance.attachment.original_filename = filename
upload_path = settings.UPLOAD_PATH
upload_path = upload_path.replace('%aid', str(instance.attachment.article.id))
if settings.UPLOAD_PATH_OBSCURIFY:
import random
import hashlib
m = hashlib.md5(str(random.randint(0, 100000000000000)))
upload_path = path.join(upload_path, m.hexdigest())
if settings.APPEND_EXTENSION:
filename += '.upload'
return path.join(upload_path, filename)
class AttachmentRevision(BaseRevisionMixin, models.Model):
attachment = models.ForeignKey('Attachment')
file = models.FileField(
upload_to=upload_path,
max_length=255,
verbose_name=_(u'file'),
storage=settings.STORAGE_BACKEND,
)
description = models.TextField(
blank=True,
)
class Meta:
verbose_name = _(u'attachment revision')
verbose_name_plural = _(u'attachment revisions')
ordering = ('created',)
get_latest_by = ('revision_number',)
app_label = settings.APP_LABEL
def get_filename(self):
"""Used to retrieve the filename of a revision.
But attachment.original_filename should always be used in the frontend
such that filenames stay consistent."""
# TODO: Perhaps we can let file names change when files are replaced?
if not self.file:
return None
filename = self.file.name.split("/")[-1]
return ".".join(filename.split(".")[:-1])
def get_size(self):
"""Used to retrieve the file size and not cause exceptions."""
try:
return self.file.size
except OSError:
return None
except ValueError:
return None
def save(self, *args, **kwargs):
if (not self.id and
not self.previous_revision and
self.attachment and
self.attachment.current_revision and
self.attachment.current_revision != self):
self.previous_revision = self.attachment.current_revision
if not self.revision_number:
try:
previous_revision = self.attachment.attachmentrevision_set.latest()
self.revision_number = previous_revision.revision_number + 1
# NB! The above should not raise the below exception, but somehow it does.
except AttachmentRevision.DoesNotExist, Attachment.DoesNotExist:
self.revision_number = 1
super(AttachmentRevision, self).save(*args, **kwargs)
if not self.attachment.current_revision:
# If I'm saved from Django admin, then article.current_revision is me!
self.attachment.current_revision = self
self.attachment.save()
def __unicode__(self):
return "%s: %s (r%d)" % (self.attachment.article.current_revision.title,
self.attachment.original_filename,
self.revision_number)
def on_revision_delete(instance, *args, **kwargs):
if not instance.file:
return
# Remove file
path = instance.file.path.split("/")[:-1]
instance.file.delete(save=False)
# Clean up empty directories
# Check for empty folders in the path. Delete the first two.
if len(path[-1]) == 32:
# Path was (most likely) obscurified so we should look 2 levels down
max_depth = 2
else:
max_depth = 1
for depth in range(0, max_depth):
delete_path = "/".join(path[:-depth] if depth > 0 else path)
try:
if len(os.listdir(os.path.join(django_settings.MEDIA_ROOT, delete_path))) == 0:
os.rmdir(delete_path)
except OSError:
# Raised by os.listdir if directory is missing
pass
signals.pre_delete.connect(on_revision_delete, AttachmentRevision)
| indexofire/gork | src/gork/application/know/plugins/attachments/models.py | Python | mit | 6,582 |
# reads uniprot core file and generates core features
from features_helpers import score_differences
def build_uniprot_to_index_to_core(sable_db_obj):
uniprot_to_index_to_core = {}
for line in sable_db_obj:
tokens = line.split()
try:
# PARSING ID
prot = tokens[0]
index = int(tokens[1])
core = tokens[2]
# PARSING ID
if uniprot_to_index_to_core.has_key(prot):
uniprot_to_index_to_core[prot][index] = core
else:
uniprot_to_index_to_core[prot] = {index: core}
except ValueError:
print "Cannot parse: " + line[0:len(line) - 1]
return uniprot_to_index_to_core
def get_sable_scores(map_file, f_sable_db_location, uniprot_core_output_location):
map_file_obj = open(map_file, 'r')
sable_db_obj = open(f_sable_db_location, 'r')
write_to = open(uniprot_core_output_location, 'w')
uniprot_to_index_to_core = build_uniprot_to_index_to_core(sable_db_obj)
for line in map_file_obj:
tokens = line.split()
asid = tokens[0].split("_")[0]
prot = tokens[1]
sstart = int(tokens[2])
start = int(tokens[3])
end = int(tokens[4])
eend = int(tokens[5])
rough_a_length = int(int(tokens[0].split("_")[-1].split("=")[1]) / 3)
if asid[0] == "I":
rough_a_length = 0
c1_count = 0
a_count = 0
c2_count = 0
canonical_absolute = 0
if prot in uniprot_to_index_to_core:
c1_count = score_differences(uniprot_to_index_to_core, prot, sstart, start)
a_count = score_differences(uniprot_to_index_to_core, prot, start, end)
c2_count = score_differences(uniprot_to_index_to_core, prot, end, eend)
prot_len = int(line.split("\t")[7].strip())
canonical_absolute = score_differences(uniprot_to_index_to_core, prot, 1, prot_len)
print >> write_to, tokens[0] + "\t" + prot + "\t" + repr(c1_count) + "\t" + repr(a_count) + "\t" + repr(
c2_count) + "\t" + repr(canonical_absolute)
write_to.close() | wonjunetai/pulse | features/uniprot_core.py | Python | mit | 2,151 |
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test block processing."""
import copy
import struct
import time
from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script, get_legacy_sigopcount_block
from test_framework.key import CECKey
from test_framework.messages import (
CBlock,
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
MAX_BLOCK_BASE_SIZE,
uint256_from_compact,
uint256_from_str,
)
from test_framework.mininode import P2PDataStore
from test_framework.script import (
CScript,
MAX_SCRIPT_ELEMENT_SIZE,
OP_2DUP,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_CHECKSIG,
OP_CHECKSIGVERIFY,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_DROP,
OP_FALSE,
OP_HASH160,
OP_IF,
OP_INVALIDOPCODE,
OP_RETURN,
OP_TRUE,
SIGHASH_ALL,
SignatureHash,
hash160,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
MAX_BLOCK_SIGOPS = 20000
# Use this class for tests that require behavior other than normal "mininode" behavior.
# For now, it is used to serialize a bloated varint (b64).
class CBrokenBlock(CBlock):
def initialize(self, base_block):
self.vtx = copy.deepcopy(base_block.vtx)
self.hashMerkleRoot = self.calc_merkle_root()
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
r += struct.pack("<BQ", 255, len(self.vtx))
for tx in self.vtx:
if with_witness:
r += tx.serialize_with_witness()
else:
r += tx.serialize_without_witness()
return r
def normal_serialize(self):
return super().serialize()
class FullBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [[]]
def run_test(self):
node = self.nodes[0] # convenience reference to the node
self.bootstrap_p2p() # Add one p2p connection to the node
self.block_heights = {}
self.coinbase_key = CECKey()
self.coinbase_key.set_secretbytes(b"horsebattery")
self.coinbase_pubkey = self.coinbase_key.get_pubkey()
self.tip = None
self.blocks = {}
self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
self.spendable_outputs = []
# Create a new block
b0 = self.next_block(0)
self.save_spendable_output()
self.sync_blocks([b0])
# Allow the block to mature
blocks = []
for i in range(99):
blocks.append(self.next_block(5000 + i))
self.save_spendable_output()
self.sync_blocks(blocks)
# collect spendable outputs now to avoid cluttering the code later on
out = []
for i in range(33):
out.append(self.get_spendable_output())
# Start by building a couple of blocks on top (which output is spent is
# in parentheses):
# genesis -> b1 (0) -> b2 (1)
b1 = self.next_block(1, spend=out[0])
self.save_spendable_output()
b2 = self.next_block(2, spend=out[1])
self.save_spendable_output()
self.sync_blocks([b1, b2])
# Fork like this:
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1)
#
# Nothing should happen at this point. We saw b2 first so it takes priority.
self.log.info("Don't reorg to a chain of the same length")
self.move_tip(1)
b3 = self.next_block(3, spend=out[1])
txout_b3 = b3.vtx[1]
self.sync_blocks([b3], False)
# Now we add another block to make the alternative chain longer.
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1) -> b4 (2)
self.log.info("Reorg to a longer chain")
b4 = self.next_block(4, spend=out[2])
self.sync_blocks([b4])
# ... and back to the first chain.
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b3 (1) -> b4 (2)
self.move_tip(2)
b5 = self.next_block(5, spend=out[2])
self.save_spendable_output()
self.sync_blocks([b5], False)
self.log.info("Reorg back to the original chain")
b6 = self.next_block(6, spend=out[3])
self.sync_blocks([b6], True)
# Try to create a fork that double-spends
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b7 (2) -> b8 (4)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a chain with a double spend, even if it is longer")
self.move_tip(5)
b7 = self.next_block(7, spend=out[2])
self.sync_blocks([b7], False)
b8 = self.next_block(8, spend=out[4])
self.sync_blocks([b8], False, reconnect=True)
# Try to create a block that has too much fee
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b9 (4)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block where the miner creates too much coinbase reward")
self.move_tip(6)
b9 = self.next_block(9, spend=out[4], additional_coinbase_value=1)
self.sync_blocks([b9], False, 16, b'bad-cb-amount', reconnect=True)
# Create a fork that ends in a block with too much fee (the one that causes the reorg)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b10 (3) -> b11 (4)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a chain where the miner creates too much coinbase reward, even if the chain is longer")
self.move_tip(5)
b10 = self.next_block(10, spend=out[3])
self.sync_blocks([b10], False)
b11 = self.next_block(11, spend=out[4], additional_coinbase_value=1)
self.sync_blocks([b11], False, 16, b'bad-cb-amount', reconnect=True)
# Try again, but with a valid fork first
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b14 (5)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a chain where the miner creates too much coinbase reward, even if the chain is longer (on a forked chain)")
self.move_tip(5)
b12 = self.next_block(12, spend=out[3])
self.save_spendable_output()
b13 = self.next_block(13, spend=out[4])
self.save_spendable_output()
b14 = self.next_block(14, spend=out[5], additional_coinbase_value=1)
self.sync_blocks([b12, b13, b14], False, 16, b'bad-cb-amount', reconnect=True)
# New tip should be b13.
assert_equal(node.getbestblockhash(), b13.hash)
# Add a block with MAX_BLOCK_SIGOPS and one with one more sigop
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6)
# \-> b3 (1) -> b4 (2)
self.log.info("Accept a block with lots of checksigs")
lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS - 1))
self.move_tip(13)
b15 = self.next_block(15, spend=out[5], script=lots_of_checksigs)
self.save_spendable_output()
self.sync_blocks([b15], True)
self.log.info("Reject a block with too many checksigs")
too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS))
b16 = self.next_block(16, spend=out[6], script=too_many_checksigs)
self.sync_blocks([b16], False, 16, b'bad-blk-sigops', reconnect=True)
# Attempt to spend a transaction created on a different fork
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1])
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block with a spend from a re-org'ed out tx")
self.move_tip(15)
b17 = self.next_block(17, spend=txout_b3)
self.sync_blocks([b17], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
# Attempt to spend a transaction created on a different fork (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b18 (b3.vtx[1]) -> b19 (6)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block with a spend from a re-org'ed out tx (on a forked chain)")
self.move_tip(13)
b18 = self.next_block(18, spend=txout_b3)
self.sync_blocks([b18], False)
b19 = self.next_block(19, spend=out[6])
self.sync_blocks([b19], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
# Attempt to spend a coinbase at depth too low
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block spending an immature coinbase.")
self.move_tip(15)
b20 = self.next_block(20, spend=out[7])
self.sync_blocks([b20], False, 16, b'bad-txns-premature-spend-of-coinbase')
# Attempt to spend a coinbase at depth too low (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b21 (6) -> b22 (5)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block spending an immature coinbase (on a forked chain)")
self.move_tip(13)
b21 = self.next_block(21, spend=out[6])
self.sync_blocks([b21], False)
b22 = self.next_block(22, spend=out[5])
self.sync_blocks([b22], False, 16, b'bad-txns-premature-spend-of-coinbase')
# Create a block on either side of MAX_BLOCK_BASE_SIZE and make sure its accepted/rejected
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6)
# \-> b24 (6) -> b25 (7)
# \-> b3 (1) -> b4 (2)
self.log.info("Accept a block of size MAX_BLOCK_BASE_SIZE")
self.move_tip(15)
b23 = self.next_block(23, spend=out[6])
tx = CTransaction()
script_length = MAX_BLOCK_BASE_SIZE - len(b23.serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0)))
b23 = self.update_block(23, [tx])
# Make sure the math above worked out to produce a max-sized block
assert_equal(len(b23.serialize()), MAX_BLOCK_BASE_SIZE)
self.sync_blocks([b23], True)
self.save_spendable_output()
self.log.info("Reject a block of size MAX_BLOCK_BASE_SIZE + 1")
self.move_tip(15)
b24 = self.next_block(24, spend=out[6])
script_length = MAX_BLOCK_BASE_SIZE - len(b24.serialize()) - 69
script_output = CScript([b'\x00' * (script_length + 1)])
tx.vout = [CTxOut(0, script_output)]
b24 = self.update_block(24, [tx])
assert_equal(len(b24.serialize()), MAX_BLOCK_BASE_SIZE + 1)
self.sync_blocks([b24], False, 16, b'bad-blk-length', reconnect=True)
b25 = self.next_block(25, spend=out[7])
self.sync_blocks([b25], False)
# Create blocks with a coinbase input script size out of range
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7)
# \-> ... (6) -> ... (7)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block with coinbase input script size out of range")
self.move_tip(15)
b26 = self.next_block(26, spend=out[6])
b26.vtx[0].vin[0].scriptSig = b'\x00'
b26.vtx[0].rehash()
# update_block causes the merkle root to get updated, even with no new
# transactions, and updates the required state.
b26 = self.update_block(26, [])
self.sync_blocks([b26], False, 16, b'bad-cb-length', reconnect=True)
# Extend the b26 chain to make sure bitcoind isn't accepting b26
b27 = self.next_block(27, spend=out[7])
self.sync_blocks([b27], False)
# Now try a too-large-coinbase script
self.move_tip(15)
b28 = self.next_block(28, spend=out[6])
b28.vtx[0].vin[0].scriptSig = b'\x00' * 101
b28.vtx[0].rehash()
b28 = self.update_block(28, [])
self.sync_blocks([b28], False, 16, b'bad-cb-length', reconnect=True)
# Extend the b28 chain to make sure bitcoind isn't accepting b28
b29 = self.next_block(29, spend=out[7])
self.sync_blocks([b29], False)
# b30 has a max-sized coinbase scriptSig.
self.move_tip(23)
b30 = self.next_block(30)
b30.vtx[0].vin[0].scriptSig = b'\x00' * 100
b30.vtx[0].rehash()
b30 = self.update_block(30, [])
self.sync_blocks([b30], True)
self.save_spendable_output()
# b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY
#
# genesis -> ... -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b36 (11)
# \-> b34 (10)
# \-> b32 (9)
#
# MULTISIG: each op code counts as 20 sigops. To create the edge case, pack another 19 sigops at the end.
self.log.info("Accept a block with the max number of OP_CHECKMULTISIG sigops")
lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS - 1) // 20) + [OP_CHECKSIG] * 19)
b31 = self.next_block(31, spend=out[8], script=lots_of_multisigs)
assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS)
self.sync_blocks([b31], True)
self.save_spendable_output()
# this goes over the limit because the coinbase has one sigop
self.log.info("Reject a block with too many OP_CHECKMULTISIG sigops")
too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS // 20))
b32 = self.next_block(32, spend=out[9], script=too_many_multisigs)
assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS + 1)
self.sync_blocks([b32], False, 16, b'bad-blk-sigops', reconnect=True)
# CHECKMULTISIGVERIFY
self.log.info("Accept a block with the max number of OP_CHECKMULTISIGVERIFY sigops")
self.move_tip(31)
lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS - 1) // 20) + [OP_CHECKSIG] * 19)
b33 = self.next_block(33, spend=out[9], script=lots_of_multisigs)
self.sync_blocks([b33], True)
self.save_spendable_output()
self.log.info("Reject a block with too many OP_CHECKMULTISIGVERIFY sigops")
too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS // 20))
b34 = self.next_block(34, spend=out[10], script=too_many_multisigs)
self.sync_blocks([b34], False, 16, b'bad-blk-sigops', reconnect=True)
# CHECKSIGVERIFY
self.log.info("Accept a block with the max number of OP_CHECKSIGVERIFY sigops")
self.move_tip(33)
lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS - 1))
b35 = self.next_block(35, spend=out[10], script=lots_of_checksigs)
self.sync_blocks([b35], True)
self.save_spendable_output()
self.log.info("Reject a block with too many OP_CHECKSIGVERIFY sigops")
too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS))
b36 = self.next_block(36, spend=out[11], script=too_many_checksigs)
self.sync_blocks([b36], False, 16, b'bad-blk-sigops', reconnect=True)
# Check spending of a transaction in a block which failed to connect
#
# b6 (3)
# b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b37 (11)
# \-> b38 (11/37)
#
# save 37's spendable output, but then double-spend out11 to invalidate the block
self.log.info("Reject a block spending transaction from a block which failed to connect")
self.move_tip(35)
b37 = self.next_block(37, spend=out[11])
txout_b37 = b37.vtx[1]
tx = self.create_and_sign_transaction(out[11], 0)
b37 = self.update_block(37, [tx])
self.sync_blocks([b37], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
# attempt to spend b37's first non-coinbase tx, at which point b37 was still considered valid
self.move_tip(35)
b38 = self.next_block(38, spend=txout_b37)
self.sync_blocks([b38], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
# Check P2SH SigOp counting
#
#
# 13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b41 (12)
# \-> b40 (12)
#
# b39 - create some P2SH outputs that will require 6 sigops to spend:
#
# redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG
# p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL
#
self.log.info("Check P2SH SIGOPS are correctly counted")
self.move_tip(35)
b39 = self.next_block(39)
b39_outputs = 0
b39_sigops_per_output = 6
# Build the redeem script, hash it, use hash to create the p2sh script
redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY] * 5 + [OP_CHECKSIG])
redeem_script_hash = hash160(redeem_script)
p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL])
# Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE
# This must be signed because it is spending a coinbase
spend = out[11]
tx = self.create_tx(spend, 0, 1, p2sh_script)
tx.vout.append(CTxOut(spend.vout[0].nValue - 1, CScript([OP_TRUE])))
self.sign_tx(tx, spend)
tx.rehash()
b39 = self.update_block(39, [tx])
b39_outputs += 1
# Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE
tx_new = None
tx_last = tx
total_size = len(b39.serialize())
while(total_size < MAX_BLOCK_BASE_SIZE):
tx_new = self.create_tx(tx_last, 1, 1, p2sh_script)
tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE])))
tx_new.rehash()
total_size += len(tx_new.serialize())
if total_size >= MAX_BLOCK_BASE_SIZE:
break
b39.vtx.append(tx_new) # add tx to block
tx_last = tx_new
b39_outputs += 1
b39 = self.update_block(39, [])
self.sync_blocks([b39], True)
self.save_spendable_output()
# Test sigops in P2SH redeem scripts
#
# b40 creates 3333 tx's spending the 6-sigop P2SH outputs from b39 for a total of 19998 sigops.
# The first tx has one sigop and then at the end we add 2 more to put us just over the max.
#
# b41 does the same, less one, so it has the maximum sigops permitted.
#
self.log.info("Reject a block with too many P2SH sigops")
self.move_tip(39)
b40 = self.next_block(40, spend=out[12])
sigops = get_legacy_sigopcount_block(b40)
numTxes = (MAX_BLOCK_SIGOPS - sigops) // b39_sigops_per_output
assert_equal(numTxes <= b39_outputs, True)
lastOutpoint = COutPoint(b40.vtx[1].sha256, 0)
new_txs = []
for i in range(1, numTxes + 1):
tx = CTransaction()
tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
tx.vin.append(CTxIn(lastOutpoint, b''))
# second input is corresponding P2SH output from b39
tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b''))
# Note: must pass the redeem_script (not p2sh_script) to the signature hash function
(sighash, err) = SignatureHash(redeem_script, tx, 1, SIGHASH_ALL)
sig = self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))
scriptSig = CScript([sig, redeem_script])
tx.vin[1].scriptSig = scriptSig
tx.rehash()
new_txs.append(tx)
lastOutpoint = COutPoint(tx.sha256, 0)
b40_sigops_to_fill = MAX_BLOCK_SIGOPS - (numTxes * b39_sigops_per_output + sigops) + 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill)))
tx.rehash()
new_txs.append(tx)
self.update_block(40, new_txs)
self.sync_blocks([b40], False, 16, b'bad-blk-sigops', reconnect=True)
# same as b40, but one less sigop
self.log.info("Accept a block with the max number of P2SH sigops")
self.move_tip(39)
b41 = self.next_block(41, spend=None)
self.update_block(41, b40.vtx[1:-1])
b41_sigops_to_fill = b40_sigops_to_fill - 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill)))
tx.rehash()
self.update_block(41, [tx])
self.sync_blocks([b41], True)
# Fork off of b39 to create a constant base again
#
# b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13)
# \-> b41 (12)
#
self.move_tip(39)
b42 = self.next_block(42, spend=out[12])
self.save_spendable_output()
b43 = self.next_block(43, spend=out[13])
self.save_spendable_output()
self.sync_blocks([b42, b43], True)
# Test a number of really invalid scenarios
#
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14)
# \-> ??? (15)
# The next few blocks are going to be created "by hand" since they'll do funky things, such as having
# the first transaction be non-coinbase, etc. The purpose of b44 is to make sure this works.
self.log.info("Build block 44 manually")
height = self.block_heights[self.tip.sha256] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
b44 = CBlock()
b44.nTime = self.tip.nTime + 1
b44.hashPrevBlock = self.tip.sha256
b44.nBits = 0x207fffff
b44.vtx.append(coinbase)
b44.hashMerkleRoot = b44.calc_merkle_root()
b44.solve()
self.tip = b44
self.block_heights[b44.sha256] = height
self.blocks[44] = b44
self.sync_blocks([b44], True)
self.log.info("Reject a block with a non-coinbase as the first tx")
non_coinbase = self.create_tx(out[15], 0, 1)
b45 = CBlock()
b45.nTime = self.tip.nTime + 1
b45.hashPrevBlock = self.tip.sha256
b45.nBits = 0x207fffff
b45.vtx.append(non_coinbase)
b45.hashMerkleRoot = b45.calc_merkle_root()
b45.calc_sha256()
b45.solve()
self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256] + 1
self.tip = b45
self.blocks[45] = b45
self.sync_blocks([b45], False, 16, b'bad-cb-missing', reconnect=True)
self.log.info("Reject a block with no transactions")
self.move_tip(44)
b46 = CBlock()
b46.nTime = b44.nTime + 1
b46.hashPrevBlock = b44.sha256
b46.nBits = 0x207fffff
b46.vtx = []
b46.hashMerkleRoot = 0
b46.solve()
self.block_heights[b46.sha256] = self.block_heights[b44.sha256] + 1
self.tip = b46
assert 46 not in self.blocks
self.blocks[46] = b46
self.sync_blocks([b46], False, 16, b'bad-blk-length', reconnect=True)
self.log.info("Reject a block with invalid work")
self.move_tip(44)
b47 = self.next_block(47, solve=False)
target = uint256_from_compact(b47.nBits)
while b47.sha256 < target:
b47.nNonce += 1
b47.rehash()
self.sync_blocks([b47], False, request_block=False)
self.log.info("Reject a block with a timestamp >2 hours in the future")
self.move_tip(44)
b48 = self.next_block(48, solve=False)
b48.nTime = int(time.time()) + 60 * 60 * 3
b48.solve()
self.sync_blocks([b48], False, request_block=False)
self.log.info("Reject a block with invalid merkle hash")
self.move_tip(44)
b49 = self.next_block(49)
b49.hashMerkleRoot += 1
b49.solve()
self.sync_blocks([b49], False, 16, b'bad-txnmrklroot', reconnect=True)
self.log.info("Reject a block with incorrect POW limit")
self.move_tip(44)
b50 = self.next_block(50)
b50.nBits = b50.nBits - 1
b50.solve()
self.sync_blocks([b50], False, request_block=False, reconnect=True)
self.log.info("Reject a block with two coinbase transactions")
self.move_tip(44)
b51 = self.next_block(51)
cb2 = create_coinbase(51, self.coinbase_pubkey)
b51 = self.update_block(51, [cb2])
self.sync_blocks([b51], False, 16, b'bad-cb-multiple', reconnect=True)
self.log.info("Reject a block with duplicate transactions")
# Note: txns have to be in the right position in the merkle tree to trigger this error
self.move_tip(44)
b52 = self.next_block(52, spend=out[15])
tx = self.create_tx(b52.vtx[1], 0, 1)
b52 = self.update_block(52, [tx, tx])
self.sync_blocks([b52], False, 16, b'bad-txns-duplicate', reconnect=True)
# Test block timestamps
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15)
# \-> b54 (15)
#
self.move_tip(43)
b53 = self.next_block(53, spend=out[14])
self.sync_blocks([b53], False)
self.save_spendable_output()
self.log.info("Reject a block with timestamp before MedianTimePast")
b54 = self.next_block(54, spend=out[15])
b54.nTime = b35.nTime - 1
b54.solve()
self.sync_blocks([b54], False, request_block=False)
# valid timestamp
self.move_tip(53)
b55 = self.next_block(55, spend=out[15])
b55.nTime = b35.nTime
self.update_block(55, [])
self.sync_blocks([b55], True)
self.save_spendable_output()
# Test Merkle tree malleability
#
# -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16)
# \-> b57 (16)
# \-> b56p2 (16)
# \-> b56 (16)
#
# Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without
# affecting the merkle root of a block, while still invalidating it.
# See: src/consensus/merkle.h
#
# b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx.
# Result: OK
#
# b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle
# root but duplicate transactions.
# Result: Fails
#
# b57p2 has six transactions in its merkle tree:
# - coinbase, tx, tx1, tx2, tx3, tx4
# Merkle root calculation will duplicate as necessary.
# Result: OK.
#
# b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches
# duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates
# that the error was caught early, avoiding a DOS vulnerability.)
# b57 - a good block with 2 txs, don't submit until end
self.move_tip(55)
b57 = self.next_block(57)
tx = self.create_and_sign_transaction(out[16], 1)
tx1 = self.create_tx(tx, 0, 1)
b57 = self.update_block(57, [tx, tx1])
# b56 - copy b57, add a duplicate tx
self.log.info("Reject a block with a duplicate transaction in the Merkle Tree (but with a valid Merkle Root)")
self.move_tip(55)
b56 = copy.deepcopy(b57)
self.blocks[56] = b56
assert_equal(len(b56.vtx), 3)
b56 = self.update_block(56, [tx1])
assert_equal(b56.hash, b57.hash)
self.sync_blocks([b56], False, 16, b'bad-txns-duplicate', reconnect=True)
# b57p2 - a good block with 6 tx'es, don't submit until end
self.move_tip(55)
b57p2 = self.next_block("57p2")
tx = self.create_and_sign_transaction(out[16], 1)
tx1 = self.create_tx(tx, 0, 1)
tx2 = self.create_tx(tx1, 0, 1)
tx3 = self.create_tx(tx2, 0, 1)
tx4 = self.create_tx(tx3, 0, 1)
b57p2 = self.update_block("57p2", [tx, tx1, tx2, tx3, tx4])
# b56p2 - copy b57p2, duplicate two non-consecutive tx's
self.log.info("Reject a block with two duplicate transactions in the Merkle Tree (but with a valid Merkle Root)")
self.move_tip(55)
b56p2 = copy.deepcopy(b57p2)
self.blocks["b56p2"] = b56p2
assert_equal(b56p2.hash, b57p2.hash)
assert_equal(len(b56p2.vtx), 6)
b56p2 = self.update_block("b56p2", [tx3, tx4])
self.sync_blocks([b56p2], False, 16, b'bad-txns-duplicate', reconnect=True)
self.move_tip("57p2")
self.sync_blocks([b57p2], True)
self.move_tip(57)
self.sync_blocks([b57], False) # The tip is not updated because 57p2 seen first
self.save_spendable_output()
# Test a few invalid tx types
#
# -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> ??? (17)
#
# tx with prevout.n out of range
self.log.info("Reject a block with a transaction with prevout.n out of range")
self.move_tip(57)
b58 = self.next_block(58, spend=out[17])
tx = CTransaction()
assert(len(out[17].vout) < 42)
tx.vin.append(CTxIn(COutPoint(out[17].sha256, 42), CScript([OP_TRUE]), 0xffffffff))
tx.vout.append(CTxOut(0, b""))
tx.calc_sha256()
b58 = self.update_block(58, [tx])
self.sync_blocks([b58], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
# tx with output value > input value
self.log.info("Reject a block with a transaction with outputs > inputs")
self.move_tip(57)
b59 = self.next_block(59)
tx = self.create_and_sign_transaction(out[17], 51 * COIN)
b59 = self.update_block(59, [tx])
self.sync_blocks([b59], False, 16, b'bad-txns-in-belowout', reconnect=True)
# reset to good chain
self.move_tip(57)
b60 = self.next_block(60, spend=out[17])
self.sync_blocks([b60], True)
self.save_spendable_output()
# Test BIP30
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b61 (18)
#
# Blocks are not allowed to contain a transaction whose id matches that of an earlier,
# not-fully-spent transaction in the same chain. To test, make identical coinbases;
# the second one should be rejected.
#
self.log.info("Reject a block with a transaction with a duplicate hash of a previous transaction (BIP30)")
self.move_tip(60)
b61 = self.next_block(61, spend=out[18])
b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[0].scriptSig # Equalize the coinbases
b61.vtx[0].rehash()
b61 = self.update_block(61, [])
assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize())
self.sync_blocks([b61], False, 16, b'bad-txns-BIP30', reconnect=True)
# Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests)
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b62 (18)
#
self.log.info("Reject a block with a transaction with a nonfinal locktime")
self.move_tip(60)
b62 = self.next_block(62)
tx = CTransaction()
tx.nLockTime = 0xffffffff # this locktime is non-final
tx.vin.append(CTxIn(COutPoint(out[18].sha256, 0))) # don't set nSequence
tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
assert(tx.vin[0].nSequence < 0xffffffff)
tx.calc_sha256()
b62 = self.update_block(62, [tx])
self.sync_blocks([b62], False, 16, b'bad-txns-nonfinal')
# Test a non-final coinbase is also rejected
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b63 (-)
#
self.log.info("Reject a block with a coinbase transaction with a nonfinal locktime")
self.move_tip(60)
b63 = self.next_block(63)
b63.vtx[0].nLockTime = 0xffffffff
b63.vtx[0].vin[0].nSequence = 0xDEADBEEF
b63.vtx[0].rehash()
b63 = self.update_block(63, [])
self.sync_blocks([b63], False, 16, b'bad-txns-nonfinal')
# This checks that a block with a bloated VARINT between the block_header and the array of tx such that
# the block is > MAX_BLOCK_BASE_SIZE with the bloated varint, but <= MAX_BLOCK_BASE_SIZE without the bloated varint,
# does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not
# care whether the bloated block is accepted or rejected; it only cares that the second block is accepted.
#
# What matters is that the receiving node should not reject the bloated block, and then reject the canonical
# block on the basis that it's the same as an already-rejected block (which would be a consensus failure.)
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18)
# \
# b64a (18)
# b64a is a bloated block (non-canonical varint)
# b64 is a good block (same as b64 but w/ canonical varint)
#
self.log.info("Accept a valid block even if a bloated version of the block has previously been sent")
self.move_tip(60)
regular_block = self.next_block("64a", spend=out[18])
# make it a "broken_block," with non-canonical serialization
b64a = CBrokenBlock(regular_block)
b64a.initialize(regular_block)
self.blocks["64a"] = b64a
self.tip = b64a
tx = CTransaction()
# use canonical serialization to calculate size
script_length = MAX_BLOCK_BASE_SIZE - len(b64a.normal_serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0)))
b64a = self.update_block("64a", [tx])
assert_equal(len(b64a.serialize()), MAX_BLOCK_BASE_SIZE + 8)
self.sync_blocks([b64a], False, 1, b'error parsing message')
# bitcoind doesn't disconnect us for sending a bloated block, but if we subsequently
# resend the header message, it won't send us the getdata message again. Just
# disconnect and reconnect and then call sync_blocks.
# TODO: improve this test to be less dependent on P2P DOS behaviour.
node.disconnect_p2ps()
self.reconnect_p2p()
self.move_tip(60)
b64 = CBlock(b64a)
b64.vtx = copy.deepcopy(b64a.vtx)
assert_equal(b64.hash, b64a.hash)
assert_equal(len(b64.serialize()), MAX_BLOCK_BASE_SIZE)
self.blocks[64] = b64
b64 = self.update_block(64, [])
self.sync_blocks([b64], True)
self.save_spendable_output()
# Spend an output created in the block itself
#
# -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
#
self.log.info("Accept a block with a transaction spending an output created in the same block")
self.move_tip(64)
b65 = self.next_block(65)
tx1 = self.create_and_sign_transaction(out[19], out[19].vout[0].nValue)
tx2 = self.create_and_sign_transaction(tx1, 0)
b65 = self.update_block(65, [tx1, tx2])
self.sync_blocks([b65], True)
self.save_spendable_output()
# Attempt to spend an output created later in the same block
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
# \-> b66 (20)
self.log.info("Reject a block with a transaction spending an output created later in the same block")
self.move_tip(65)
b66 = self.next_block(66)
tx1 = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue)
tx2 = self.create_and_sign_transaction(tx1, 1)
b66 = self.update_block(66, [tx2, tx1])
self.sync_blocks([b66], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
# Attempt to double-spend a transaction created in a block
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
# \-> b67 (20)
#
#
self.log.info("Reject a block with a transaction double spending a transaction creted in the same block")
self.move_tip(65)
b67 = self.next_block(67)
tx1 = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue)
tx2 = self.create_and_sign_transaction(tx1, 1)
tx3 = self.create_and_sign_transaction(tx1, 2)
b67 = self.update_block(67, [tx1, tx2, tx3])
self.sync_blocks([b67], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
# More tests of block subsidy
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b68 (20)
#
# b68 - coinbase with an extra 10 satoshis,
# creates a tx that has 9 satoshis from out[20] go to fees
# this fails because the coinbase is trying to claim 1 satoshi too much in fees
#
# b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee
# this succeeds
#
self.log.info("Reject a block trying to claim too much subsidy in the coinbase transaction")
self.move_tip(65)
b68 = self.next_block(68, additional_coinbase_value=10)
tx = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue - 9)
b68 = self.update_block(68, [tx])
self.sync_blocks([b68], False, 16, b'bad-cb-amount', reconnect=True)
self.log.info("Accept a block claiming the correct subsidy in the coinbase transaction")
self.move_tip(65)
b69 = self.next_block(69, additional_coinbase_value=10)
tx = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue - 10)
self.update_block(69, [tx])
self.sync_blocks([b69], True)
self.save_spendable_output()
# Test spending the outpoint of a non-existent transaction
#
# -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b70 (21)
#
self.log.info("Reject a block containing a transaction spending from a non-existent input")
self.move_tip(69)
b70 = self.next_block(70, spend=out[21])
bogus_tx = CTransaction()
bogus_tx.sha256 = uint256_from_str(b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c")
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff))
tx.vout.append(CTxOut(1, b""))
b70 = self.update_block(70, [tx])
self.sync_blocks([b70], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
# Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks)
#
# -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b71 (21)
#
# b72 is a good block.
# b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b72.
self.log.info("Reject a block containing a duplicate transaction but with the same Merkle root (Merkle tree malleability")
self.move_tip(69)
b72 = self.next_block(72)
tx1 = self.create_and_sign_transaction(out[21], 2)
tx2 = self.create_and_sign_transaction(tx1, 1)
b72 = self.update_block(72, [tx1, tx2]) # now tip is 72
b71 = copy.deepcopy(b72)
b71.vtx.append(tx2) # add duplicate tx2
self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 # b71 builds off b69
self.blocks[71] = b71
assert_equal(len(b71.vtx), 4)
assert_equal(len(b72.vtx), 3)
assert_equal(b72.sha256, b71.sha256)
self.move_tip(71)
self.sync_blocks([b71], False, 16, b'bad-txns-duplicate', reconnect=True)
self.move_tip(72)
self.sync_blocks([b72], True)
self.save_spendable_output()
# Test some invalid scripts and MAX_BLOCK_SIGOPS
#
# -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b** (22)
#
# b73 - tx with excessive sigops that are placed after an excessively large script element.
# The purpose of the test is to make sure those sigops are counted.
#
# script is a bytearray of size 20,526
#
# bytearray[0-19,998] : OP_CHECKSIG
# bytearray[19,999] : OP_PUSHDATA4
# bytearray[20,000-20,003]: 521 (max_script_element_size+1, in little-endian format)
# bytearray[20,004-20,525]: unread data (script_element)
# bytearray[20,526] : OP_CHECKSIG (this puts us over the limit)
self.log.info("Reject a block containing too many sigops after a large script element")
self.move_tip(72)
b73 = self.next_block(73)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = int("4e", 16) # OP_PUSHDATA4
element_size = MAX_SCRIPT_ELEMENT_SIZE + 1
a[MAX_BLOCK_SIGOPS] = element_size % 256
a[MAX_BLOCK_SIGOPS + 1] = element_size // 256
a[MAX_BLOCK_SIGOPS + 2] = 0
a[MAX_BLOCK_SIGOPS + 3] = 0
tx = self.create_and_sign_transaction(out[22], 1, CScript(a))
b73 = self.update_block(73, [tx])
assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS + 1)
self.sync_blocks([b73], False, 16, b'bad-blk-sigops', reconnect=True)
# b74/75 - if we push an invalid script element, all prevous sigops are counted,
# but sigops after the element are not counted.
#
# The invalid script element is that the push_data indicates that
# there will be a large amount of data (0xffffff bytes), but we only
# provide a much smaller number. These bytes are CHECKSIGS so they would
# cause b75 to fail for excessive sigops, if those bytes were counted.
#
# b74 fails because we put MAX_BLOCK_SIGOPS+1 before the element
# b75 succeeds because we put MAX_BLOCK_SIGOPS before the element
self.log.info("Check sigops are counted correctly after an invalid script element")
self.move_tip(72)
b74 = self.next_block(74)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS] = 0x4e
a[MAX_BLOCK_SIGOPS + 1] = 0xfe
a[MAX_BLOCK_SIGOPS + 2] = 0xff
a[MAX_BLOCK_SIGOPS + 3] = 0xff
a[MAX_BLOCK_SIGOPS + 4] = 0xff
tx = self.create_and_sign_transaction(out[22], 1, CScript(a))
b74 = self.update_block(74, [tx])
self.sync_blocks([b74], False, 16, b'bad-blk-sigops', reconnect=True)
self.move_tip(72)
b75 = self.next_block(75)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = 0x4e
a[MAX_BLOCK_SIGOPS] = 0xff
a[MAX_BLOCK_SIGOPS + 1] = 0xff
a[MAX_BLOCK_SIGOPS + 2] = 0xff
a[MAX_BLOCK_SIGOPS + 3] = 0xff
tx = self.create_and_sign_transaction(out[22], 1, CScript(a))
b75 = self.update_block(75, [tx])
self.sync_blocks([b75], True)
self.save_spendable_output()
# Check that if we push an element filled with CHECKSIGs, they are not counted
self.move_tip(75)
b76 = self.next_block(76)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs
tx = self.create_and_sign_transaction(out[23], 1, CScript(a))
b76 = self.update_block(76, [tx])
self.sync_blocks([b76], True)
self.save_spendable_output()
# Test transaction resurrection
#
# -> b77 (24) -> b78 (25) -> b79 (26)
# \-> b80 (25) -> b81 (26) -> b82 (27)
#
# b78 creates a tx, which is spent in b79. After b82, both should be in mempool
#
# The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the
# rather obscure reason that the Python signature code does not distinguish between
# Low-S and High-S values (whereas the bitcoin code has custom code which does so);
# as a result of which, the odds are 50% that the python code will use the right
# value and the transaction will be accepted into the mempool. Until we modify the
# test framework to support low-S signing, we are out of luck.
#
# To get around this issue, we construct transactions which are not signed and which
# spend to OP_TRUE. If the standard-ness rules change, this test would need to be
# updated. (Perhaps to spend to a P2SH OP_TRUE script)
self.log.info("Test transaction resurrection during a re-org")
self.move_tip(76)
b77 = self.next_block(77)
tx77 = self.create_and_sign_transaction(out[24], 10 * COIN)
b77 = self.update_block(77, [tx77])
self.sync_blocks([b77], True)
self.save_spendable_output()
b78 = self.next_block(78)
tx78 = self.create_tx(tx77, 0, 9 * COIN)
b78 = self.update_block(78, [tx78])
self.sync_blocks([b78], True)
b79 = self.next_block(79)
tx79 = self.create_tx(tx78, 0, 8 * COIN)
b79 = self.update_block(79, [tx79])
self.sync_blocks([b79], True)
# mempool should be empty
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.move_tip(77)
b80 = self.next_block(80, spend=out[25])
self.sync_blocks([b80], False, request_block=False)
self.save_spendable_output()
b81 = self.next_block(81, spend=out[26])
self.sync_blocks([b81], False, request_block=False) # other chain is same length
self.save_spendable_output()
b82 = self.next_block(82, spend=out[27])
self.sync_blocks([b82], True) # now this chain is longer, triggers re-org
self.save_spendable_output()
# now check that tx78 and tx79 have been put back into the peer's mempool
mempool = self.nodes[0].getrawmempool()
assert_equal(len(mempool), 2)
assert(tx78.hash in mempool)
assert(tx79.hash in mempool)
# Test invalid opcodes in dead execution paths.
#
# -> b81 (26) -> b82 (27) -> b83 (28)
#
self.log.info("Accept a block with invalid opcodes in dead execution paths")
b83 = self.next_block(83)
op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF]
script = CScript(op_codes)
tx1 = self.create_and_sign_transaction(out[28], out[28].vout[0].nValue, script)
tx2 = self.create_and_sign_transaction(tx1, 0, CScript([OP_TRUE]))
tx2.vin[0].scriptSig = CScript([OP_FALSE])
tx2.rehash()
b83 = self.update_block(83, [tx1, tx2])
self.sync_blocks([b83], True)
self.save_spendable_output()
# Reorg on/off blocks that have OP_RETURN in them (and try to spend them)
#
# -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31)
# \-> b85 (29) -> b86 (30) \-> b89a (32)
#
self.log.info("Test re-orging blocks with OP_RETURN in them")
b84 = self.next_block(84)
tx1 = self.create_tx(out[29], 0, 0, CScript([OP_RETURN]))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.calc_sha256()
self.sign_tx(tx1, out[29])
tx1.rehash()
tx2 = self.create_tx(tx1, 1, 0, CScript([OP_RETURN]))
tx2.vout.append(CTxOut(0, CScript([OP_RETURN])))
tx3 = self.create_tx(tx1, 2, 0, CScript([OP_RETURN]))
tx3.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx4 = self.create_tx(tx1, 3, 0, CScript([OP_TRUE]))
tx4.vout.append(CTxOut(0, CScript([OP_RETURN])))
tx5 = self.create_tx(tx1, 4, 0, CScript([OP_RETURN]))
b84 = self.update_block(84, [tx1, tx2, tx3, tx4, tx5])
self.sync_blocks([b84], True)
self.save_spendable_output()
self.move_tip(83)
b85 = self.next_block(85, spend=out[29])
self.sync_blocks([b85], False) # other chain is same length
b86 = self.next_block(86, spend=out[30])
self.sync_blocks([b86], True)
self.move_tip(84)
b87 = self.next_block(87, spend=out[30])
self.sync_blocks([b87], False) # other chain is same length
self.save_spendable_output()
b88 = self.next_block(88, spend=out[31])
self.sync_blocks([b88], True)
self.save_spendable_output()
# trying to spend the OP_RETURN output is rejected
b89a = self.next_block("89a", spend=out[32])
tx = self.create_tx(tx1, 0, 0, CScript([OP_TRUE]))
b89a = self.update_block("89a", [tx])
self.sync_blocks([b89a], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)
self.log.info("Test a re-org of one week's worth of blocks (1088 blocks)")
self.move_tip(88)
LARGE_REORG_SIZE = 1088
blocks = []
spend = out[32]
for i in range(89, LARGE_REORG_SIZE + 89):
b = self.next_block(i, spend)
tx = CTransaction()
script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0)))
b = self.update_block(i, [tx])
assert_equal(len(b.serialize()), MAX_BLOCK_BASE_SIZE)
blocks.append(b)
self.save_spendable_output()
spend = self.get_spendable_output()
self.sync_blocks(blocks, True, timeout=180)
chain1_tip = i
# now create alt chain of same length
self.move_tip(88)
blocks2 = []
for i in range(89, LARGE_REORG_SIZE + 89):
blocks2.append(self.next_block("alt" + str(i)))
self.sync_blocks(blocks2, False, request_block=False)
# extend alt chain to trigger re-org
block = self.next_block("alt" + str(chain1_tip + 1))
self.sync_blocks([block], True, timeout=180)
# ... and re-org back to the first chain
self.move_tip(chain1_tip)
block = self.next_block(chain1_tip + 1)
self.sync_blocks([block], False, request_block=False)
block = self.next_block(chain1_tip + 2)
self.sync_blocks([block], True, timeout=180)
# Helper methods
################
def add_transactions_to_block(self, block, tx_list):
[tx.rehash() for tx in tx_list]
block.vtx.extend(tx_list)
# this is a little handier to use than the version in blocktools.py
def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])):
return create_tx_with_script(spend_tx, n, amount=value, script_pub_key=script)
# sign a transaction, using the key we know about
# this signs input 0 in tx, which is assumed to be spending output n in spend_tx
def sign_tx(self, tx, spend_tx):
scriptPubKey = bytearray(spend_tx.vout[0].scriptPubKey)
if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend
tx.vin[0].scriptSig = CScript()
return
(sighash, err) = SignatureHash(spend_tx.vout[0].scriptPubKey, tx, 0, SIGHASH_ALL)
tx.vin[0].scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))])
def create_and_sign_transaction(self, spend_tx, value, script=CScript([OP_TRUE])):
tx = self.create_tx(spend_tx, 0, value, script)
self.sign_tx(tx, spend_tx)
tx.rehash()
return tx
def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), solve=True):
if self.tip is None:
base_block_hash = self.genesis_hash
block_time = int(time.time()) + 1
else:
base_block_hash = self.tip.sha256
block_time = self.tip.nTime + 1
# First create the coinbase
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
coinbase.vout[0].nValue += additional_coinbase_value
coinbase.rehash()
if spend is None:
block = create_block(base_block_hash, coinbase, block_time)
else:
coinbase.vout[0].nValue += spend.vout[0].nValue - 1 # all but one satoshi to fees
coinbase.rehash()
block = create_block(base_block_hash, coinbase, block_time)
tx = self.create_tx(spend, 0, 1, script) # spend 1 satoshi
self.sign_tx(tx, spend)
self.add_transactions_to_block(block, [tx])
block.hashMerkleRoot = block.calc_merkle_root()
if solve:
block.solve()
self.tip = block
self.block_heights[block.sha256] = height
assert number not in self.blocks
self.blocks[number] = block
return block
# save the current tip so it can be spent by a later block
def save_spendable_output(self):
self.log.debug("saving spendable output %s" % self.tip.vtx[0])
self.spendable_outputs.append(self.tip)
# get an output that we previously marked as spendable
def get_spendable_output(self):
self.log.debug("getting spendable output %s" % self.spendable_outputs[0].vtx[0])
return self.spendable_outputs.pop(0).vtx[0]
# move the tip back to a previous block
def move_tip(self, number):
self.tip = self.blocks[number]
# adds transactions to the block and updates state
def update_block(self, block_number, new_transactions):
block = self.blocks[block_number]
self.add_transactions_to_block(block, new_transactions)
old_sha256 = block.sha256
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Update the internal state just like in next_block
self.tip = block
if block.sha256 != old_sha256:
self.block_heights[block.sha256] = self.block_heights[old_sha256]
del self.block_heights[old_sha256]
self.blocks[block_number] = block
return block
def bootstrap_p2p(self):
"""Add a P2P connection to the node.
Helper to connect and wait for version handshake."""
self.nodes[0].add_p2p_connection(P2PDataStore())
# We need to wait for the initial getheaders from the peer before we
# start populating our blockstore. If we don't, then we may run ahead
# to the next subtest before we receive the getheaders. We'd then send
# an INV for the next block and receive two getheaders - one for the
# IBD and one for the INV. We'd respond to both and could get
# unexpectedly disconnected if the DoS score for that error is 50.
self.nodes[0].p2p.wait_for_getheaders(timeout=5)
def reconnect_p2p(self):
"""Tear down and bootstrap the P2P connection to the node.
The node gets disconnected several times in this test. This helper
method reconnects the p2p and restarts the network thread."""
self.nodes[0].disconnect_p2ps()
self.bootstrap_p2p()
def sync_blocks(self, blocks, success=True, reject_code=None, reject_reason=None, request_block=True, reconnect=False, timeout=60):
"""Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block.
Call with success = False if the tip shouldn't advance to the most recent block."""
self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_code=reject_code, reject_reason=reject_reason, request_block=request_block, timeout=timeout)
if reconnect:
self.reconnect_p2p()
if __name__ == '__main__':
FullBlockTest().main()
| Bushstar/UFO-Project | test/functional/feature_block.py | Python | mit | 60,904 |
'''
Created by auto_sdk on 2015.06.23
'''
from aliyun.api.base import RestApi
class Rds20140815CheckAccountNameAvailableRequest(RestApi):
def __init__(self,domain='rds.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.AccountName = None
self.DBInstanceId = None
self.resourceOwnerAccount = None
def getapiname(self):
return 'rds.aliyuncs.com.CheckAccountNameAvailable.2014-08-15'
| francisar/rds_manager | aliyun/api/rest/Rds20140815CheckAccountNameAvailableRequest.py | Python | mit | 408 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Idea.color'
db.add_column(u'brainstorming_idea', 'color',
self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Idea.color'
db.delete_column(u'brainstorming_idea', 'color')
models = {
u'brainstorming.brainstorming': {
'Meta': {'ordering': "['-created']", 'object_name': 'Brainstorming'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'creator_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'creator_ip': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'brainstorming.brainstormingwatcher': {
'Meta': {'ordering': "['-created']", 'unique_together': "(('brainstorming', 'email'),)", 'object_name': 'BrainstormingWatcher'},
'brainstorming': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['brainstorming.Brainstorming']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'brainstorming.emailverification': {
'Meta': {'ordering': "['-created']", 'object_name': 'EmailVerification'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'brainstorming.idea': {
'Meta': {'ordering': "['-created']", 'object_name': 'Idea'},
'brainstorming': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['brainstorming.Brainstorming']"}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'creator_ip': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'creator_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'ratings': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['brainstorming'] | atizo/braindump | brainstorming/migrations/0005_auto__add_field_idea_color.py | Python | mit | 4,031 |
import os
import webapp2
from actions import cronActions
from views import views
import secrets
SECS_PER_WEEK = 60 * 60 * 24 * 7
# Enable ctypes -> Jinja2 tracebacks
PRODUCTION_MODE = not os.environ.get(
'SERVER_SOFTWARE', 'Development').startswith('Development')
ROOT_DIRECTORY = os.path.dirname(__file__)
if not PRODUCTION_MODE:
from google.appengine.tools.devappserver2.python import sandbox
sandbox._WHITE_LIST_C_MODULES += ['_ctypes', 'gestalt']
TEMPLATE_DIRECTORY = os.path.join(ROOT_DIRECTORY, 'src')
else:
TEMPLATE_DIRECTORY = os.path.join(ROOT_DIRECTORY, 'dist')
curr_path = os.path.abspath(os.path.dirname(__file__))
config = {
'webapp2_extras.sessions': {
'secret_key': secrets.COOKIE_KEY,
'session_max_age': SECS_PER_WEEK,
'cookie_args': {'max_age': SECS_PER_WEEK},
'cookie_name': 'echo_sense_session'
},
'webapp2_extras.jinja2': {
'template_path': TEMPLATE_DIRECTORY
}
}
app = webapp2.WSGIApplication(
[
# Cron jobs (see cron.yaml)
webapp2.Route('/cron/monthly', handler=cronActions.Monthly),
webapp2.Route(r'/<:.*>', handler=views.ActionPotentialApp, name="ActionPotentialApp"),
], debug=True, config=config)
| onejgordon/action-potential | actionpotential.py | Python | mit | 1,267 |
# this is the interface for `python archiver`
import archiver
import appdirs
import os
import sys
import pickle
import json
from archiver.archiver import Archiver
from archiver.parser import parseArgs
args = parseArgs()
from edit import edit
# ==============================================
print args
# TODO: see http://stackoverflow.com/questions/13168083/python-raw-input-replacement-that-uses-a-configurable-text-editor
#-- import pdb
#-- pdb.set_trace()
# ------------------------------------------------------------
# load the user data
# ------------------------------------------------------------
# get the user data directory
user_data_dir = appdirs.user_data_dir('FileArchiver', 'jdthorpe')
if not os.path.exists(user_data_dir) :
os.makedirs(user_data_dir)
# LOAD THE INDEX NAMES AND ACTIVE INDEX
indexes_path = os.path.join(user_data_dir,'INDEXES.json')
if os.path.exists(indexes_path):
with open(indexes_path,'rb') as fh:
indexes = json.load(fh)
else:
indexes= {'active':None,'names':[]}
if not os.path.exists(user_data_dir):
os.makedirs(user_data_dir)
def dumpIndexes():
with open(indexes_path,'wb') as fh:
json.dump(indexes,fh)
# ------------------------------------------------------------
# ------------------------------------------------------------
def getActiveName():
# ACTIVE INDEX NUMER
activeIndex = indexes['active']
if activeIndex is None:
print "No active index. Use 'list -i' to list available indexies and 'use' to set an active index."
sys.exit()
# GET THE NAME OF THE INDEX
try:
activeIndexName = indexes['names'][indexes['active']]
except:
print "Invalid index number"
sys.exit()
return activeIndexName
# ------------------------------------------------------------
# READ-WRITE UTILITY FUNCTIONS
# ------------------------------------------------------------
# TODO: catch specific excepitons:
# except IOError:
# # no such file
# except ValueError as e:
# # invalid json file
def readSettings(name):
""" A utility function which loads the index settings from file
"""
try:
with open(os.path.join(user_data_dir,name+".settings"),'rb') as fh:
settings = json.load(fh)
except Exception as e:
print "Error reading index settings"
import pdb
pdb.set_trace()
sys.exit()
return settings
def readData(name):
""" A utility function which loads the index data from file
"""
try:
with open(os.path.join(user_data_dir,name+".data"),'rb') as fh: data = pickle.load(fh)
except Exception as e:
print "Error reading index data"
import pdb
pdb.set_trace()
sys.exit()
return data
def dumpSettings(settings,name):
""" A utility function which saves the index settings to file
"""
try:
with open(os.path.join(user_data_dir,name+".settings"),'wb') as fh:
json.dump(settings,fh)
except Exception as e:
print "Error writing index settings"
import pdb
pdb.set_trace()
sys.exit()
def dumpData(data,name):
""" A utility function which saves the index settings to file
"""
try:
with open(os.path.join(user_data_dir,name+".data"),'wb') as fh:
pickle.dump(data,fh)
except:
print "Error writing index data"
import pdb
pdb.set_trace()
sys.exit()
# ------------------------------------------------------------
# ------------------------------------------------------------
if args.command == 'add':
activeName = getActiveName()
settings = readSettings(activeName)
if args.source is not None:
source = os.path.abspath(args.source)
if not os.path.exists(source):
print 'WARNING: no such directory "%s"'%(source)
elif not os.path.isdir(source):
print 'ERROR: "%s" is not a directory'%(source)
sys.exit()
print 'Adding source directory: %s'%(source)
if not any(samefile(source,f) for f in settings['sourceDirectories']):
settings['sourceDirectories'].append(source)
elif args.exclusions is not None:
import re
try:
re.compile(args.exclusion)
except re.error:
print 'Invalid regular expression "%s"'%(args.exclusion)
sys.exit()
if args.noic:
settings['directoryExclusionPatterns'].append(args.exclusion)
else:
settings['directoryExclusionPatterns'].append((args.exclusion,2)) # re.I == 2
elif args.archive is not None:
raise NotImplementedError
if settings['archiveDirectory'] is not None:
print "Archive path has already been set use 'remove' to delete the archive path before setting a new archive path"
archiveDirectory = os.path.abspath(args.archive)
if not os.path.exists(archiveDirectory):
if args.create :
os.makedirs(archiveDirectory)
else:
print 'ERROR: no such directory "%s"'%(archiveDirectory)
sys.exit()
elif not os.path.isdir(archiveDirectory):
print '"%s" is not a directory'%(archiveDirectory)
sys.exit()
print 'Setting archive directory to: %s'%(archiveDirectory)
settings['archiveDirectory'] = args.archive
else:
raise NotImplementedError
print 'Error in Arg Parser'
sys.exit()
dumpSettings(settings,activeName)
elif args.command == 'list':
if args.sources:
for f in readSettings(getActiveName())['sourceDirectories']:
print f
elif args.exclusions:
for f in readSettings(getActiveName())['directoryExclusionPatterns']:
print f
elif args.archive:
print readSettings(getActiveName())['archiveDirectory']
elif args.files:
archiver = Archiver()
archiver.data = readData(getActiveName())
for f in archiver:
print f
elif args.indexes:
print 'Active Index: %s (*)'%(getActiveName())
print 'Index Names: '
for i,name in enumerate(indexes['names']):
print ' %s %i: %s'%(
(' ','*')[(i == indexes['active'])+0],
i+1,
name,
)
else:
print 'Error in Arg Parser'
elif args.command == 'remove':
activeName = getActiveName()
settings = readSettings(activeName)
if args.source is not None:
if not (1 <= args.source <= len(settings['sourceDirectories'])):
print 'Invalid index %i'%(args.source)
del settings['sourceDirectories'][args.source - 1]
elif args.exclusion is not None:
raise NotImplementedError
if not (1 <= args.exclusion <= len(settings['directoryExclusionPatterns'])):
print 'Invalid index %i'%(args.exclusion)
del settings['directoryExclusionPatterns'][args.exclusion - 1]
elif args.archive is not None:
raise NotImplementedError
settings['archiveDirectory'] = None
else:
raise NotImplementedError
print 'Error in Arg Parser'
sys.exit()
dumpSettings(settings,activeName)
elif args.command == 'update':
activeName = getActiveName()
settings = readSettings(activeName)
if not len(settings['sourceDirectories']):
print "Error: no source directories in the active index. Please add a source directory via 'add -s'"
archiver = Archiver(
settings = readSettings(activeName),
data = readData(activeName))
archiver.update()
dumpSettings(archiver.settings,activeName)
dumpData(archiver.data,activeName)
elif args.command == 'clean':
raise NotImplementedError
activeName = getActiveName()
archiver = Archiver(
settings = readSettings(activeName),
data = readData(activeName))
archiver.clean()
dumpSettings(archiver.settings,activeName)
dumpData(archiver.data,activeName)
elif args.command == 'copy':
raise NotImplementedError
activeName = getActiveName()
settings = readSettings(activeName),
if settings['archiveDirectory'] is None:
print "ERROR Archive directory not set. Use 'add -a' to set the archive directory."
sys.exit()
Index(
settings = settings,
data = readData(activeName)).copy()
elif args.command == 'diskimages':
raise NotImplementedError
if args.size is None or args.size == "DVD":
size = 4.65*1<<20
elif args.size == "CD":
size = 645*1<<20
elif args.size == "DVD":
size = 4.65*1<<20
elif args.size == "DVD-dual":
size = 8.5*1<<30
elif args.size == "BD":
size = 25*1<<30
elif args.size == "BD-dual":
size = 50*1<<30
elif args.size == "BD-tripple":
size = 75*1<<30
elif args.size == "BD-xl":
size = 100*1<<30
else:
try:
size = int(float(args.size))
except:
print 'ERROR: unable to coerce "%s" to float or int'%(args.size)
sys.exit()
activeName = getActiveName()
settings = readSettings(activeName),
# GET THE DIRECTORY ARGUMENT
if args.directory is not None:
directory = args.directory
else:
if settings['archiveDirectory'] is None:
print "ERROR Archive directory not set and no directory specified. Use 'diskimages -d' to specifiy the disk image directory or 'add -a' to set the archive directory."
sys.exit()
else:
directory = os.path.join(settings['archiveDirectory'],'Disk Images')
# VALIDATE THE DIRECTORY
if not os.path.exists(directory):
if args.create :
os.makedirs(directory)
else:
print 'ERROR: no such directory "%s"'%(directory)
sys.exit()
elif not os.path.isdir(directory):
print '"%s" is not a directory'%(directory)
sys.exit()
# get the FPBF argument
if args.fpbf is not None:
FPBF = True
elif args.nofpbf is not None:
FPBF = False
else:
FPBF = sys.platform == 'darwin'
Index( settings = settings,
data = readData(activeName)).diskimages(directory,size,FPBF)
elif args.command == 'settings':
activeName = getActiveName()
if args.export is not None:
raise NotImplementedError
with open(args.export,'rb') as fh:
json.dump(readSettings(activeName),fh,indent=2,separators=(',', ': '))
elif args.load is not None:
raise NotImplementedError
with open(args.export,'wb') as fh:
settings = json.load(fh)
# give a chance for the settings to be validated
try:
archiver = Archiver(settings=settings)
except:
print "ERROR: invalid settings file"
dumpSettings(archiver.settings,args.name)
elif args.edit is not None:
settings = readSettings(activeName)
old = settings['identifierSettings'][args.edit]
new = edit(json.dumps(old,indent=2,separators=(',', ': ')))
settings['identifierSettings'][args.edit]= json.loads(new)
dumpSettings(settings,activeName)
else :
print json.dumps(readSettings(activeName),indent=2,separators=(',', ': '))
elif args.command == 'create':
if args.name in indexes['names']:
print "An index by the name '%s' already exists"%(args.name)
sys.exit()
import re
validater = re.compile(r'^[-() _a-zA-Z0-9](?:[-() _.a-zA-Z0-9]+[-() _a-zA-Z0-9])$')
if validater.match(args.name) is None:
print "ERROR: names must be composed of letters, numbers, hypen, underscore, space and dot charactes an not end or begin with a dot"
sys.exit()
archiver = Index()
dumpSettings(archiver.settings,args.name)
dumpData(archiver.data,args.name)
indexes['names'].append(args.name)
dumpIndexes()
# TODO: check if there are no other indexies. if so, make the new one active.
print "Created index '%s'"%(args.name)
elif args.command == 'save':
raise NotImplementedError
Index( settings = readSettings(getActiveName()),
data = readData(getActiveName())).save(args.filename)
elif args.command == 'use':
print indexes['names']
if not args.name in indexes['names']:
print "ERROR: No such index named '%s'"%(args.name)
sys.exit()
indexes['active'] =indexes['names'].index(args.name)
dumpIndexes()
elif args.command == 'delete':
if not args.name in indexes['names']:
print "ERROR: No such index named '%s'"%(args.name)
sys.exit()
nameIindex = indexes['names'].index(args.name)
if indexes['active'] == nameIindex:
print 'WARNING: deleting active index'
indexes['active'] = None
del indexes['names'][nameIindex]
dumpIndexes()
else :
print "unknown command %s"%(args.command)
| jdthorpe/archiver | __main__.py | Python | mit | 13,106 |
"""
.. module:: mlpy.auxiliary.datastructs
:platform: Unix, Windows
:synopsis: Provides data structure implementations.
.. moduleauthor:: Astrid Jackson <[email protected]>
"""
from __future__ import division, print_function, absolute_import
import heapq
import numpy as np
from abc import ABCMeta, abstractmethod
class Array(object):
"""The managed array class.
The managed array class pre-allocates memory to the given size
automatically resizing as needed.
Parameters
----------
size : int
The size of the array.
Examples
--------
>>> a = Array(5)
>>> a[0] = 3
>>> a[1] = 6
Retrieving an elements:
>>> a[0]
3
>>> a[2]
0
Finding the length of the array:
>>> len(a)
2
"""
def __init__(self, size):
self._data = np.zeros((size,))
self._capacity = size
self._size = 0
def __setitem__(self, index, value):
"""Set the the array at the index to the given value.
Parameters
----------
index : int
The index into the array.
value :
The value to set the array to.
"""
if index >= self._size:
if self._size == self._capacity:
self._capacity *= 2
new_data = np.zeros((self._capacity,))
new_data[:self._size] = self._data
self._data = new_data
self._size += 1
self._data[index] = value
def __getitem__(self, index):
"""Get the value at the given index.
Parameters
----------
index : int
The index into the array.
"""
return self._data[index]
def __len__(self):
"""The length of the array.
Returns
-------
int :
The size of the array
"""
return self._size
class Point2D(object):
"""The 2d-point class.
The 2d-point class is a container for positions
in a 2d-coordinate system.
Parameters
----------
x : float, optional
The x-position in a 2d-coordinate system. Default is 0.0.
y : float, optional
The y-position in a 2d-coordinate system. Default is 0.0.
Attributes
----------
x : float
The x-position in a 2d-coordinate system.
y : float
The y-position in a 2d-coordinate system.
"""
__slots__ = ['x', 'y']
def __init__(self, x=0.0, y=0.0):
self.x = x
self.y = y
class Point3D(object):
"""
The 3d-point class.
The 3d-point class is a container for positions
in a 3d-coordinate system.
Parameters
----------
x : float, optional
The x-position in a 2d-coordinate system. Default is 0.0.
y : float, optional
The y-position in a 2d-coordinate system. Default is 0.0.
z : float, optional
The z-position in a 3d-coordinate system. Default is 0.0.
Attributes
----------
x : float
The x-position in a 2d-coordinate system.
y : float
The y-position in a 2d-coordinate system.
z : float
The z-position in a 3d-coordinate system.
"""
__slots__ = ['x', 'y', 'z']
def __init__(self, x=0.0, y=0.0, z=0.0):
self.x = x
self.y = y
self.z = z
class Vector3D(Point3D):
"""The 3d-vector class.
.. todo::
Implement vector functionality.
Parameters
----------
x : float, optional
The x-position in a 2d-coordinate system. Default is 0.0.
y : float, optional
The y-position in a 2d-coordinate system. Default is 0.0.
z : float, optional
The z-position in a 3d-coordinate system. Default is 0.0.
Attributes
----------
x : float
The x-position in a 2d-coordinate system.
y : float
The y-position in a 2d-coordinate system.
z : float
The z-position in a 3d-coordinate system.
"""
def __init__(self, x=0.0, y=0.0, z=0.0):
super(Vector3D, self).__init__(x, y, z)
class Queue(object):
"""The abstract queue base class.
The queue class handles core functionality common for
any type of queue. All queues inherit from the queue
base class.
See Also
--------
:class:`FIFOQueue`, :class:`PriorityQueue`
"""
__metaclass__ = ABCMeta
def __init__(self):
self._queue = []
def __len__(self):
return len(self._queue)
def __contains__(self, item):
try:
self._queue.index(item)
return True
except Exception:
return False
def __iter__(self):
return iter(self._queue)
def __str__(self):
return '[' + ', '.join('{}'.format(el) for el in self._queue) + ']'
def __repr__(self):
return ', '.join('{}'.format(el) for el in self._queue)
@abstractmethod
def push(self, item):
"""Push a new element on the queue
Parameters
----------
item :
The element to push on the queue
"""
raise NotImplementedError
@abstractmethod
def pop(self):
"""Pop an element from the queue."""
raise NotImplementedError
def empty(self):
"""Check if the queue is empty.
Returns
-------
bool :
Whether the queue is empty.
"""
return len(self._queue) <= 0
def extend(self, items):
"""Extend the queue by a number of elements.
Parameters
----------
items : list
A list of items.
"""
for item in items:
self.push(item)
def get(self, item):
"""Return the element in the queue identical to `item`.
Parameters
----------
item :
The element to search for.
Returns
-------
The element in the queue identical to `item`. If the element
was not found, None is returned.
"""
try:
index = self._queue.index(item)
return self._queue[index]
except Exception:
return None
def remove(self, item):
"""Remove an element from the queue.
Parameters
----------
item :
The element to remove.
"""
self._queue.remove(item)
class FIFOQueue(Queue):
"""The first-in-first-out (FIFO) queue.
In a FIFO queue the first element added to the queue
is the first element to be removed.
Examples
--------
>>> q = FIFOQueue()
>>> q.push(5)
>>> q.extend([1, 3, 7])
>>> print q
[5, 1, 3, 7]
Retrieving an element:
>>> q.pop()
5
Removing an element:
>>> q.remove(3)
>>> print q
[1, 7]
Get the element in the queue identical to the given item:
>>> q.get(7)
7
Check if the queue is empty:
>>> q.empty()
False
Loop over the elements in the queue:
>>> for x in q:
>>> print x
1
7
Check if an element is in the queue:
>>> if 7 in q:
>>> print "yes"
yes
See Also
--------
:class:`PriorityQueue`
"""
def __init__(self):
super(FIFOQueue, self).__init__()
def push(self, item):
"""Push an element to the end of the queue.
Parameters
----------
item :
The element to append.
"""
self._queue.append(item)
def pop(self):
"""Return the element at the front of the queue.
Returns
-------
The first element in the queue.
"""
return self._queue.pop(0)
def extend(self, items):
"""Append a list of elements at the end of the queue.
Parameters
----------
items : list
List of elements.
"""
self._queue.extend(items)
class PriorityQueue(Queue):
"""
The priority queue.
In a priority queue each element has a priority associated with it. An element
with high priority (i.e., smallest value) is served before an element with low priority
(i.e., largest value). The priority queue is implemented with a heap.
Parameters
----------
func : callable
A callback function handling the priority. By default the priority
is the value of the element.
Examples
--------
>>> q = PriorityQueue()
>>> q.push(5)
>>> q.extend([1, 3, 7])
>>> print q
[(1,1), (5,5), (3,3), (7,7)]
Retrieving the element with highest priority:
>>> q.pop()
1
Removing an element:
>>> q.remove((3, 3))
>>> print q
[(5,5), (7,7)]
Get the element in the queue identical to the given item:
>>> q.get(7)
7
Check if the queue is empty:
>>> q.empty()
False
Loop over the elements in the queue:
>>> for x in q:
>>> print x
(5, 5)
(7, 7)
Check if an element is in the queue:
>>> if 7 in q:
>>> print "yes"
yes
See Also
--------
:class:`FIFOQueue`
"""
def __init__(self, func=lambda x: x):
super(PriorityQueue, self).__init__()
self.func = func
def __contains__(self, item):
for _, element in self._queue:
if item == element:
return True
return False
def __str__(self):
return '[' + ', '.join('({},{})'.format(*el) for el in self._queue) + ']'
def push(self, item):
"""Push an element on the priority queue.
The element is pushed on the priority queue according
to its priority.
Parameters
----------
item :
The element to push on the queue.
"""
heapq.heappush(self._queue, (self.func(item), item))
def pop(self):
"""Get the element with the highest priority.
Get the element with the highest priority (i.e., smallest value).
Returns
-------
The element with the highest priority.
"""
return heapq.heappop(self._queue)[1]
def get(self, item):
"""Return the element in the queue identical to `item`.
Parameters
----------
item :
The element to search for.
Returns
-------
The element in the queue identical to `item`. If the element
was not found, None is returned.
"""
for _, element in self._queue:
if item == element:
return element
return None
def remove(self, item):
"""Remove an element from the queue.
Parameters
----------
item :
The element to remove.
"""
super(PriorityQueue, self).remove(item)
heapq.heapify(self._queue)
| evenmarbles/mlpy | mlpy/auxiliary/datastructs.py | Python | mit | 10,818 |
import unittest
import numpy as np
from bayesnet.image.util import img2patch, patch2img
class TestImg2Patch(unittest.TestCase):
def test_img2patch(self):
img = np.arange(16).reshape(1, 4, 4, 1)
patch = img2patch(img, size=3, step=1)
expected = np.asarray([
[img[0, 0:3, 0:3, 0], img[0, 0:3, 1:4, 0]],
[img[0, 1:4, 0:3, 0], img[0, 1:4, 1:4, 0]]
])
expected = expected[None, ..., None]
self.assertTrue((patch == expected).all())
imgs = [
np.random.randn(2, 5, 6, 3),
np.random.randn(3, 10, 10, 2),
np.random.randn(1, 23, 17, 5)
]
sizes = [
(1, 1),
2,
(3, 4)
]
steps = [
(1, 2),
(3, 1),
3
]
shapes = [
(2, 5, 3, 1, 1, 3),
(3, 3, 9, 2, 2, 2),
(1, 7, 5, 3, 4, 5)
]
for img, size, step, shape in zip(imgs, sizes, steps, shapes):
self.assertEqual(shape, img2patch(img, size, step).shape)
class TestPatch2Img(unittest.TestCase):
def test_patch2img(self):
img = np.arange(16).reshape(1, 4, 4, 1)
patch = img2patch(img, size=2, step=2)
self.assertTrue((img == patch2img(patch, (2, 2), (1, 4, 4, 1))).all())
patch = img2patch(img, size=3, step=1)
expected = np.arange(0, 32, 2).reshape(1, 4, 4, 1)
expected[0, 0, 0, 0] /= 2
expected[0, 0, -1, 0] /= 2
expected[0, -1, 0, 0] /= 2
expected[0, -1, -1, 0] /= 2
expected[0, 1:3, 1:3, 0] *= 2
self.assertTrue((expected == patch2img(patch, (1, 1), (1, 4, 4, 1))).all())
if __name__ == '__main__':
unittest.main()
| ctgk/BayesianNetwork | test/image/test_util.py | Python | mit | 1,753 |
'''
logger_setup.py customizes the app's logging module. Each time an event is
logged the logger checks the level of the event (eg. debug, warning, info...).
If the event is above the approved threshold then it goes through. The handlers
do the same thing; they output to a file/shell if the event level is above their
threshold.
:Example:
>> from website import logger
>> logger.info('event', foo='bar')
**Levels**:
- logger.debug('For debugging purposes')
- logger.info('An event occured, for example a database update')
- logger.warning('Rare situation')
- logger.error('Something went wrong')
- logger.critical('Very very bad')
You can build a log incrementally as so:
>> log = logger.new(date='now')
>> log = log.bind(weather='rainy')
>> log.info('user logged in', user='John')
'''
import datetime as dt
import logging
from logging.handlers import RotatingFileHandler
import pytz
from flask import request, session
from structlog import wrap_logger
from structlog.processors import JSONRenderer
from app import app
# Set the logging level
app.logger.setLevel(app.config['LOG_LEVEL'])
# Remove the stdout handler
app.logger.removeHandler(app.logger.handlers[0])
TZ = pytz.timezone(app.config['TIMEZONE'])
def add_fields(_, level, event_dict):
''' Add custom fields to each record. '''
now = dt.datetime.now()
#event_dict['timestamp'] = TZ.localize(now, True).astimezone(pytz.utc).isoformat()
event_dict['timestamp'] = TZ.localize(now, True).astimezone\
(pytz.timezone(app.config['TIMEZONE'])).strftime(app.config['TIME_FMT'])
event_dict['level'] = level
if request:
try:
#event_dict['ip_address'] = request.headers['X-Forwarded-For'].split(',')[0].strip()
event_dict['ip_address'] = request.headers.get('X-Forwarded-For', request.remote_addr)
#event_dict['ip_address'] = request.header.get('X-Real-IP')
except:
event_dict['ip_address'] = 'unknown'
return event_dict
# Add a handler to write log messages to a file
if app.config.get('LOG_FILE'):
file_handler = RotatingFileHandler(filename=app.config['LOG_FILENAME'],
maxBytes=app.config['LOG_MAXBYTES'],
backupCount=app.config['LOG_BACKUPS'],
mode='a',
encoding='utf-8')
file_handler.setLevel(logging.DEBUG)
app.logger.addHandler(file_handler)
# Wrap the application logger with structlog to format the output
logger = wrap_logger(
app.logger,
processors=[
add_fields,
JSONRenderer(indent=None)
]
) | Kbman99/NetSecShare | app/logger_setup.py | Python | mit | 2,739 |
import _plotly_utils.basevalidators
class TextfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="textfont", parent_name="scattersmith", **kwargs):
super(TextfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Textfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
""",
),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/scattersmith/_textfont.py | Python | mit | 1,869 |
from django.core import serializers
from rest_framework.response import Response
from django.http import JsonResponse
try:
from urllib import quote_plus # python 2
except:
pass
try:
from urllib.parse import quote_plus # python 3
except:
pass
from django.contrib import messages
from django.contrib.contenttypes.models import ContentType
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from comments.forms import CommentForm
from comments.models import Comment
from .forms import PostForm
from .models import Post
def post_create(request):
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
form = PostForm(request.POST or None, request.FILES or None)
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.save()
# message success
messages.success(request, "Successfully Created")
return HttpResponseRedirect(instance.get_absolute_url())
context = {
"form": form,
}
return render(request, "post_form.html", context)
def post_detail(request, slug=None):
instance = get_object_or_404(Post, slug=slug)
if instance.publish > timezone.now().date() or instance.draft:
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
share_string = quote_plus(instance.content)
initial_data = {
"content_type": instance.get_content_type,
"object_id": instance.id
}
form = CommentForm(request.POST or None, initial=initial_data)
if form.is_valid() and request.user.is_authenticated():
c_type = form.cleaned_data.get("content_type")
content_type = ContentType.objects.get(model=c_type)
obj_id = form.cleaned_data.get('object_id')
content_data = form.cleaned_data.get("content")
parent_obj = None
try:
parent_id = int(request.POST.get("parent_id"))
except:
parent_id = None
if parent_id:
parent_qs = Comment.objects.filter(id=parent_id)
if parent_qs.exists() and parent_qs.count() == 1:
parent_obj = parent_qs.first()
new_comment, created = Comment.objects.get_or_create(
user=request.user,
content_type=content_type,
object_id=obj_id,
content=content_data,
parent=parent_obj,
)
return HttpResponseRedirect(new_comment.content_object.get_absolute_url())
comments = instance.comments
context = {
"title": instance.title,
"instance": instance,
"share_string": share_string,
"comments": comments,
"comment_form": form,
}
return render(request, "post_detail.html", context)
def post_list(request):
today = timezone.now().date()
queryset_list = Post.objects.active() # .order_by("-timestamp")
if request.user.is_staff or request.user.is_superuser:
queryset_list = Post.objects.all()
query = request.GET.get("q")
if query:
queryset_list = queryset_list.filter(
Q(title__icontains=query) |
Q(content__icontains=query) |
Q(user__first_name__icontains=query) |
Q(user__last_name__icontains=query)
).distinct()
paginator = Paginator(queryset_list, 8) # Show 25 contacts per page
page_request_var = "page"
page = request.GET.get(page_request_var)
try:
queryset = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
queryset = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
queryset = paginator.page(paginator.num_pages)
context = {
"object_list": queryset,
"title": "List",
"page_request_var": page_request_var,
"today": today,
}
return render(request, "post_list.html", context)
def post_update(request, slug=None):
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
instance = get_object_or_404(Post, slug=slug)
form = PostForm(request.POST or None,
request.FILES or None, instance=instance)
if form.is_valid():
instance = form.save(commit=False)
instance.save()
messages.success(request, "<a href='#'>Item</a> Saved",
extra_tags='html_safe')
return HttpResponseRedirect(instance.get_absolute_url())
context = {
"title": instance.title,
"instance": instance,
"form": form,
}
return render(request, "post_form.html", context)
def post_delete(request, slug=None):
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
instance = get_object_or_404(Post, slug=slug)
instance.delete()
messages.success(request, "Successfully deleted")
return redirect("posts:list")
| our-iot-project-org/pingow-web-service | src/posts/views.py | Python | mit | 5,217 |
from ..cw_model import CWModel
class Order(CWModel):
def __init__(self, json_dict=None):
self.id = None # (Integer)
self.company = None # *(CompanyReference)
self.contact = None # (ContactReference)
self.phone = None # (String)
self.phoneExt = None # (String)
self.email = None # (String)
self.site = None # (SiteReference)
self.status = None # *(OrderStatusReference)
self.opportunity = None # (OpportunityReference)
self.orderDate = None # (String)
self.dueDate = None # (String)
self.billingTerms = None # (BillingTermsReference)
self.taxCode = None # (TaxCodeReference)
self.poNumber = None # (String(50))
self.locationId = None # (Integer)
self.businessUnitId = None # (Integer)
self.salesRep = None # *(MemberReference)
self.notes = None # (String)
self.billClosedFlag = None # (Boolean)
self.billShippedFlag = None # (Boolean)
self.restrictDownpaymentFlag = None # (Boolean)
self.description = None # (String)
self.topCommentFlag = None # (Boolean)
self.bottomCommentFlag = None # (Boolean)
self.shipToCompany = None # (CompanyReference)
self.shipToContact = None # (ContactReference)
self.shipToSite = None # (SiteReference)
self.billToCompany = None # (CompanyReference)
self.billToContact = None # (ContactReference)
self.billToSite = None # (SiteReference)
self.productIds = None # (Integer[])
self.documentIds = None # (Integer[])
self.invoiceIds = None # (Integer[])
self.configIds = None # (Integer[])
self.total = None # (Number)
self.taxTotal = None # (Number)
self._info = None # (Metadata)
# initialize object with json dict
super().__init__(json_dict)
| joshuamsmith/ConnectPyse | sales/order.py | Python | mit | 1,974 |
from pydispatch import dispatcher
from PySide import QtCore, QtGui
import cbpos
logger = cbpos.get_logger(__name__)
from .page import BasePage
class MainWindow(QtGui.QMainWindow):
__inits = []
def __init__(self):
super(MainWindow, self).__init__()
self.tabs = QtGui.QTabWidget(self)
self.tabs.setTabsClosable(False)
self.tabs.setIconSize(QtCore.QSize(32, 32))
self.tabs.currentChanged.connect(self.onCurrentTabChanged)
self.toolbar = self.addToolBar('Base')
self.toolbar.setIconSize(QtCore.QSize(48,48)) #Suitable for touchscreens
self.toolbar.setObjectName('BaseToolbar')
toolbarStyle = cbpos.config['menu', 'toolbar_style']
# The index in this list is the same as that in the configuration page
available_styles = (
QtCore.Qt.ToolButtonFollowStyle,
QtCore.Qt.ToolButtonIconOnly,
QtCore.Qt.ToolButtonTextOnly,
QtCore.Qt.ToolButtonTextBesideIcon,
QtCore.Qt.ToolButtonTextUnderIcon,
)
try:
toolbarStyle = available_styles[int(toolbarStyle)]
except (ValueError, TypeError, IndexError):
toolbarStyle = QtCore.Qt.ToolButtonFollowStyle
self.toolbar.setToolButtonStyle(toolbarStyle)
self.setCentralWidget(self.tabs)
self.statusBar().showMessage(cbpos.tr._('Coinbox POS is ready.'))
self.setWindowTitle('Coinbox')
self.callInit()
self.loadToolbar()
self.loadMenu()
def loadToolbar(self):
"""
Loads the toolbar actions, restore toolbar state, and restore window geometry.
"""
mwState = cbpos.config['mainwindow', 'state']
mwGeom = cbpos.config['mainwindow', 'geometry']
for act in cbpos.menu.actions:
# TODO: Remember to load an icon with a proper size (eg 48x48 px for touchscreens)
action = QtGui.QAction(QtGui.QIcon(act.icon), act.label, self)
action.setShortcut(act.shortcut)
action.triggered.connect(act.trigger)
self.toolbar.addAction(action)
#Restores the saved mainwindow's toolbars and docks, and then the window geometry.
if mwState is not None:
self.restoreState( QtCore.QByteArray.fromBase64(mwState) )
if mwGeom is not None:
self.restoreGeometry( QtCore.QByteArray.fromBase64(mwGeom) )
else:
self.setGeometry(0, 0, 800, 600)
def loadMenu(self):
"""
Load the menu root items and items into the QTabWidget with the appropriate pages.
"""
show_empty_root_items = cbpos.config['menu', 'show_empty_root_items']
show_disabled_items = cbpos.config['menu', 'show_disabled_items']
hide_tab_bar = not cbpos.config['menu', 'show_tab_bar']
if hide_tab_bar:
# Hide the tab bar and prepare the toolbar for extra QAction's
self.tabs.tabBar().hide()
# This pre-supposes that the menu items will come after the actions
self.toolbar.addSeparator()
for root in cbpos.menu.items:
if not root.enabled and not show_disabled_items:
continue
if show_disabled_items:
# Show all child items
children = root.children
else:
# Filter out those which are disabled
children = [i for i in root.children if i.enabled]
# Hide empty menu root items
if len(children) == 0 and not show_empty_root_items:
continue
# Add the tab
widget = self.getTabWidget(children)
icon = QtGui.QIcon(root.icon)
index = self.tabs.addTab(widget, icon, root.label)
widget.setEnabled(root.enabled)
# Add the toolbar action if enabled
if hide_tab_bar:
# TODO: Remember to load an icon with a proper size (eg 48x48 px for touchscreens)
action = QtGui.QAction(QtGui.QIcon(icon), root.label, self)
action.onTrigger = lambda n=index: self.tabs.setCurrentIndex(n)
action.triggered.connect(action.onTrigger)
self.toolbar.addAction(action)
def onCurrentTabChanged(self, index, tabs=None):
if tabs is None:
tabs = self.tabs
widget = tabs.widget(index)
try:
signal = widget.shown
except AttributeError:
pass
else:
signal.emit()
def getTabWidget(self, items):
"""
Returns the appropriate window to be placed in the main QTabWidget,
depending on the number of children of a root menu item.
"""
count = len(items)
if count == 0:
# If there are no child items, just return an empty widget
widget = QtGui.QWidget()
widget.setEnabled(False)
return widget
elif count == 1:
# If there is only one item, show it as is.
logger.debug('Loading menu page for %s', items[0].name)
widget = items[0].page()
widget.setEnabled(items[0].enabled)
return widget
else:
# If there are many children, add them in a QTabWidget
tabs = QtGui.QTabWidget()
tabs.currentChanged.connect(lambda i, t=tabs: self.onCurrentTabChanged(i, t))
for item in items:
logger.debug('Loading menu page for %s', item.name)
widget = item.page()
icon = QtGui.QIcon(item.icon)
tabs.addTab(widget, icon, item.label)
widget.setEnabled(item.enabled)
return tabs
def saveWindowState(self):
"""
Saves the main window state (position, size, toolbar positions)
"""
mwState = self.saveState().toBase64()
mwGeom = self.saveGeometry().toBase64()
cbpos.config['mainwindow', 'state'] = unicode(mwState)
cbpos.config['mainwindow', 'geometry'] = unicode(mwGeom)
cbpos.config.save()
def closeEvent(self, event):
"""
Perform necessary operations before closing the window.
"""
self.saveWindowState()
#do any other thing before closing...
event.accept()
@classmethod
def addInit(cls, init):
"""
Adds the `init` method to the list of extensions of the `MainWindow.__init__`.
"""
cls.__inits.append(init)
def callInit(self):
"""
Handle calls to `__init__` methods of extensions of the MainWindow.
"""
for init in self.__inits:
init(self)
| coinbox/coinbox-mod-base | cbmod/base/views/window.py | Python | mit | 7,111 |
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from os.path import join
def configuration(parent_package='', top_path=None):
import warnings
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
config = Configuration('odr', parent_package, top_path)
libodr_files = ['d_odr.f',
'd_mprec.f',
'dlunoc.f']
blas_info = get_info('blas_opt')
if blas_info:
libodr_files.append('d_lpk.f')
else:
warnings.warn(BlasNotFoundError.__doc__)
libodr_files.append('d_lpkbls.f')
odrpack_src = [join('odrpack', x) for x in libodr_files]
config.add_library('odrpack', sources=odrpack_src)
sources = ['__odrpack.c']
libraries = ['odrpack'] + blas_info.pop('libraries', [])
include_dirs = ['.'] + blas_info.pop('include_dirs', [])
config.add_extension('__odrpack',
sources=sources,
libraries=libraries,
include_dirs=include_dirs,
depends=(['odrpack.h'] + odrpack_src),
**blas_info
)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| DailyActie/Surrogate-Model | 01-codes/scipy-master/scipy/odr/setup.py | Python | mit | 1,419 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import zmq
from zmq.eventloop import ioloop as ioloop_mod
import zmqdecorators
import time
SERVICE_NAME = "urpobot.motor"
SERVICE_PORT = 7575
SIGNALS_PORT = 7576
# How long to wait for new commands before stopping automatically
COMMAND_GRACE_TIME = 0.250
class motorserver(zmqdecorators.service):
def __init__(self, service_name, service_port, serialport):
super(motorserver, self).__init__(service_name, service_port)
self.serial_port = serialport
self.input_buffer = ""
self.evthandler = ioloop_mod.IOLoop.instance().add_handler(self.serial_port.fileno(), self.handle_serial_event, ioloop_mod.IOLoop.instance().READ)
self.last_command_time = time.time()
self.pcb = ioloop_mod.PeriodicCallback(self.check_data_reveived, COMMAND_GRACE_TIME)
self.pcb.start()
def check_data_reveived(self, *args):
if (time.time() - self.last_command_time > COMMAND_GRACE_TIME):
self._setspeeds(0,0)
def _setspeeds(self, m1speed, m2speed):
self.serial_port.write("S%04X%04X\n" % ((m1speed & 0xffff), (m2speed & 0xffff)))
@zmqdecorators.method()
def setspeeds(self, resp, m1speed, m2speed):
self.last_command_time = time.time()
#print("Got speeds %s,%s" % (m1speed, m2speed))
self._setspeeds(m1speed, m2speed)
# TODO: actually handle ACK/NACK somehow (we need to read it from the serialport but we can't block while waiting for it...)
resp.send("ACK")
def handle_serial_event(self, fd, events):
# Copied from arbus that was thread based
if not self.serial_port.inWaiting():
# Don't try to read if there is no data, instead sleep (yield) a bit
time.sleep(0)
return
data = self.serial_port.read(1)
if len(data) == 0:
return
#print("DEBUG: data=%s" % data)
# Put the data into inpit buffer and check for CRLF
self.input_buffer += data
# Trim prefix NULLs and linebreaks
self.input_buffer = self.input_buffer.lstrip(chr(0x0) + "\r\n")
#print "input_buffer=%s" % repr(self.input_buffer)
if ( len(self.input_buffer) > 1
and self.input_buffer[-2:] == "\r\n"):
# Got a message, parse it (sans the CRLF) and empty the buffer
self.message_received(self.input_buffer[:-2])
self.input_buffer = ""
def message_received(self, message):
#print("DEBUG: msg=%s" % message)
try:
# Currently we have no incoming messages from this board
pass
except Exception as e:
print "message_received exception: Got exception %s" % repr(e)
# Ignore indexerrors, they just mean we could not parse the command
pass
pass
def cleanup(self):
print("Cleanup called")
self._setspeeds(0,0)
def run(self):
print("Starting motorserver")
super(motorserver, self).run()
if __name__ == "__main__":
import serial
import sys,os
port = serial.Serial(sys.argv[1], 115200, xonxoff=False, timeout=0.01)
instance = motorserver(SERVICE_NAME, SERVICE_PORT, port)
instance.run()
| HelsinkiHacklab/urpobotti | python/motorctrl.py | Python | mit | 3,257 |
from rest_framework import serializers
from . import models
class Invoice(serializers.ModelSerializer):
class Meta:
model = models.Invoice
fields = (
'id', 'name', 'additional_infos', 'owner',
'creation_date', 'update_date',
)
| linovia/microinvoices | microinvoices/invoices/serializers.py | Python | mit | 281 |
"""Basic thermodynamic calculations for pickaxe."""
from typing import Union
import pint
from equilibrator_api import (
Q_,
ComponentContribution,
Reaction,
default_physiological_ionic_strength,
default_physiological_p_h,
default_physiological_p_mg,
default_physiological_temperature,
)
from equilibrator_api.phased_reaction import PhasedReaction
from equilibrator_assets.compounds import Compound
from equilibrator_assets.local_compound_cache import LocalCompoundCache
from equilibrator_cache.compound_cache import CompoundCache
from pymongo import MongoClient
from sqlalchemy import create_engine
from minedatabase.pickaxe import Pickaxe
class Thermodynamics:
"""Class to calculate thermodynamics of Pickaxe runs.
Thermodynamics allows for the calculation of:
1) Standard ∆G' of formation
2) Standard ∆G'o of reaction
3) Physiological ∆G'm of reaction
4) Adjusted ∆G' of reaction
eQuilibrator objects can also be obtained from r_ids and c_ids.
Parameters
----------
mongo_uri: str
URI of the mongo database.
client: MongoClient
Connection to Mongo.
CC: ComponentContribution
eQuilibrator Component Contribution object to calculate ∆G with.
lc: LocalCompoundCache
The local compound cache to generate eQuilibrator compounds from.
"""
def __init__(
self,
):
# Mongo params
self.mongo_uri = None
self.client = None
self._core = None
# eQ params
self.CC = ComponentContribution()
self.lc = None
self._water = None
def load_mongo(self, mongo_uri: Union[str, None] = None):
if mongo_uri:
self.mongo_uri = mongo_uri
self.client = MongoClient(mongo_uri)
else:
self.mongo_uri = "localhost:27017"
self.client = MongoClient()
self._core = self.client["core"]
def _all_dbs_loaded(self):
if self.client and self._core and self.lc:
return True
else:
print("Load connection to Mongo and eQuilibrator local cache.")
return False
def _eq_loaded(self):
if self.lc:
return True
else:
print("Load eQulibrator local cache.")
return False
def _reset_CC(self):
"""reset CC back to defaults"""
self.CC.p_h = default_physiological_p_h
self.CC.p_mg = default_physiological_p_mg
self.CC.temperature = default_physiological_temperature
self.CC.ionic_strength = default_physiological_ionic_strength
def load_thermo_from_postgres(
self, postgres_uri: str = "postgresql:///eq_compounds"
) -> None:
"""Load a LocalCompoundCache from a postgres uri for equilibrator.
Parameters
----------
postgres_uri : str, optional
uri of the postgres DB to use, by default "postgresql:///eq_compounds"
"""
self.lc = LocalCompoundCache()
self.lc.ccache = CompoundCache(create_engine(postgres_uri))
self._water = self.lc.get_compounds("O")
def load_thermo_from_sqlite(
self, sqlite_filename: str = "compounds.sqlite"
) -> None:
"""Load a LocalCompoundCache from a sqlite file for equilibrator.
compounds.sqlite can be generated through LocalCompoundCache's method
generate_local_cache_from_default_zenodo
Parameters
----------
sqlite_filename: str
filename of the sqlite file to load.
"""
self.lc = LocalCompoundCache()
self.lc.load_cache(sqlite_filename)
self._water = self.lc.get_compounds("O")
def get_eQ_compound_from_cid(
self, c_id: str, pickaxe: Pickaxe = None, db_name: str = None
) -> Union[Compound, None]:
"""Get an equilibrator compound for a given c_id from the core.
Attempts to retrieve a compound from the core or a specified db_name.
Parameters
----------
c_id : str
compound ID for MongoDB lookup of a compound.
pickaxe : Pickaxe
pickaxe object to look for the compound in, by default None.
db_name : str
Database to look for compound in before core database, by default None.
Returns
-------
equilibrator_assets.compounds.Compound
eQuilibrator Compound
"""
# Find locally in pickaxe
compound_smiles = None
if pickaxe:
if c_id in pickaxe.compounds:
compound_smiles = pickaxe.compounds[c_id]["SMILES"]
else:
return None
# Find in mongo db
elif self._all_dbs_loaded():
if db_name:
compound = self.client[db_name].compounds.find_one(
{"_id": c_id}, {"SMILES": 1}
)
if compound:
compound_smiles = compound["SMILES"]
# No cpd smiles from database name
if not compound_smiles:
compound = self._core.compounds.find_one({"_id": c_id}, {"SMILES": 1})
if compound:
compound_smiles = compound["SMILES"]
# No compound_smiles at all
if not compound_smiles or "*" in compound_smiles:
return None
else:
eQ_compound = self.lc.get_compounds(
compound_smiles, bypass_chemaxon=True, save_empty_compounds=True
)
return eQ_compound
def standard_dg_formation_from_cid(
self, c_id: str, pickaxe: Pickaxe = None, db_name: str = None
) -> Union[float, None]:
"""Get standard ∆Gfo for a compound.
Parameters
----------
c_id : str
Compound ID to get the ∆Gf for.
pickaxe : Pickaxe
pickaxe object to look for the compound in, by default None.
db_name : str
Database to look for compound in before core database, by default None.
Returns
-------
Union[float, None]
∆Gf'o for a compound, or None if unavailable.
"""
eQ_cpd = self.get_eQ_compound_from_cid(c_id, pickaxe, db_name)
if not eQ_cpd:
return None
dgf = self.CC.standard_dg_formation(eQ_cpd)
dgf = dgf[0]
return dgf
def get_eQ_reaction_from_rid(
self, r_id: str, pickaxe: Pickaxe = None, db_name: str = None
) -> Union[PhasedReaction, None]:
"""Get an eQuilibrator reaction object from an r_id.
Parameters
----------
r_id : str
Reaction id to get object for.
pickaxe : Pickaxe
pickaxe object to look for the compound in, by default None.
db_name : str
Database to look for reaction in.
Returns
-------
PhasedReaction
eQuilibrator reactiono to calculate ∆Gr with.
"""
if pickaxe:
if r_id in pickaxe.reactions:
reaction_info = pickaxe.reactions[r_id]
else:
return None
elif db_name:
mine = self.client[db_name]
reaction_info = mine.reactions.find_one({"_id": r_id})
if not reaction_info:
return None
else:
return None
reactants = reaction_info["Reactants"]
products = reaction_info["Products"]
lhs = " + ".join(f"{r[0]} {r[1]}" for r in reactants)
rhs = " + ".join(f"{p[0]} {p[1]}" for p in products)
reaction_string = " => ".join([lhs, rhs])
compounds = set([r[1] for r in reactants])
compounds.update(tuple(p[1] for p in products))
eQ_compound_dict = {
c_id: self.get_eQ_compound_from_cid(c_id, pickaxe, db_name)
for c_id in compounds
}
if not all(eQ_compound_dict.values()):
return None
if "X73bc8ef21db580aefe4dbc0af17d4013961d9d17" not in compounds:
eQ_compound_dict["water"] = self._water
eq_reaction = Reaction.parse_formula(eQ_compound_dict.get, reaction_string)
return eq_reaction
def physiological_dg_prime_from_rid(
self, r_id: str, pickaxe: Pickaxe = None, db_name: str = None
) -> Union[pint.Measurement, None]:
"""Calculate the ∆Gm' of a reaction.
Parameters
----------
r_id : str
ID of the reaction to calculate.
pickaxe : Pickaxe
pickaxe object to look for the compound in, by default None.
db_name : str
MINE the reaction is found in.
Returns
-------
pint.Measurement
The calculated ∆G'm.
"""
eQ_reaction = self.get_eQ_reaction_from_rid(r_id, pickaxe, db_name)
if not eQ_reaction:
return None
dGm_prime = self.CC.physiological_dg_prime(eQ_reaction)
return dGm_prime
def standard_dg_prime_from_rid(
self, r_id: str, pickaxe: Pickaxe = None, db_name: str = None
) -> Union[pint.Measurement, None]:
"""Calculate the ∆G'o of a reaction.
Parameters
----------
r_id : str
ID of the reaction to calculate.
pickaxe : Pickaxe
pickaxe object to look for the compound in, by default None.
db_name : str
MINE the reaction is found in.
Returns
-------
pint.Measurement
The calculated ∆G'o.
"""
eQ_reaction = self.get_eQ_reaction_from_rid(r_id, pickaxe, db_name)
if not eQ_reaction:
return None
dG0_prime = self.CC.standard_dg_prime(eQ_reaction)
return dG0_prime
def dg_prime_from_rid(
self,
r_id: str,
pickaxe: Pickaxe = None,
db_name: str = None,
p_h: Q_ = default_physiological_p_h,
p_mg: Q_ = default_physiological_p_mg,
ionic_strength: Q_ = default_physiological_ionic_strength,
) -> Union[pint.Measurement, None]:
"""Calculate the ∆G' of a reaction.
Parameters
----------
r_id : str
ID of the reaction to calculate.
pickaxe : Pickaxe
pickaxe object to look for the compound in, by default None.
db_name : str
MINE the reaction is found in.
p_h : Q_
pH of system.
p_mg: Q_
pMg of the system.
ionic_strength: Q_
ionic strength of the system.
Returns
-------
pint.Measurement
The calculated ∆G'.
"""
eQ_reaction = self.get_eQ_reaction_from_rid(r_id, pickaxe, db_name)
if not eQ_reaction:
return None
self.CC.p_h = p_h
self.CC.p_mg = p_mg
self.CC.ionic_strength = ionic_strength
dG_prime = self.CC.dg_prime(eQ_reaction)
self._reset_CC()
return dG_prime
| JamesJeffryes/MINE-Database | minedatabase/thermodynamics.py | Python | mit | 11,041 |
import os
#Decoration Starts
print """
+=============================================================+
|| Privilege Escalation Exploit ||
|| +===================================================+ ||
|| | _ _ _ ____ _ __ ____ ___ _____ | ||
|| | | | | | / \ / ___| |/ / | _ \|_ _|_ _| | ||
|| | | |_| | / _ \| | | ' / | |_) || | | | | ||
|| | | _ |/ ___ \ |___| . \ | _ < | | | | | ||
|| | |_| |_/_/ \_\____|_|\_\ |_| \_\___| |_| | ||
|| | | ||
|| +===================================================+ ||
|| ~ by Yadnyawalkya Tale ([email protected]) ~ ||
+=============================================================+
"""
#Decoration Ends
# Class according to Year Input
print "\n1. B.Tech Final Year\n2. T.Y.B.Tech\n3. S.Y.B.Tech\n4. F.Y.Tech"
year_input = input()
if year_input == 1:
year_choice = 1300000 #Final Year
elif year_input == 2:
year_choice = 1400000 #Third Year
elif year_input == 3:
year_choice = 1500000 #Second Year
elif year_input == 4:
year_choice = 1600000 #First Year
# Department Class Input
print "\n1.Automobile\n2.Civil\n3.ComputerScience\n4.InformationTechnology\n5.ETC\n6.Electrial\n7.Mech"
class_input = input()
if class_input == 1:
class_choice = 1000 #Automobile Department
elif class_input == 2:
class_choice = 2000 #Civil Department
elif class_input == 3:
class_choice = 3000 #ComputerScience Department
elif class_input == 4:
class_choice = 4000 #InformationTechnology Department
elif class_input == 5:
class_choice = 5000 #ETC Department
elif class_input == 6:
class_choice = 8000 #Electrial Department
elif class_input == 7:
class_choice = 6000 #Mechanical Department
startflag = year_choice + class_choice #For eg. Start @ 1303000
if class_input == 7:
endflag = year_choice + class_choice + 70 +128 #Special Arrangement for Mechanical ;)
else:
endflag = year_choice + class_choice + 70 #For eg. End @ 1303070
os.system("mkdir ritphotos")
decoration="="
while startflag < endflag:
startflag = startflag + 1
cmd1 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(startflag,startflag)
os.system(cmd1)
decoration = "=" + decoration
print "{0}".format(decoration)
print "100%\tPlease Wait..."
pstartflag = year_choice + class_choice + 150000
if class_input == 7:
pendflag = year_choice + class_choice + 40 + 150000 #For All branches
else:
pendflag = year_choice + class_choice + 15 + 150000 #Special Arrangement for Mechanical ;)
while pstartflag < pendflag:
pstartflag = pstartflag + 1
cmd2 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(pstartflag,pstartflag)
os.system(cmd2)
print "Downloading Images Complete..."
os.system("find ritphotos -size 0 -print0 |xargs -0 rm 2>/dev/null ") #Remove 0-Size Images
| Yadnyawalkya/hackRIT | hackRIT.py | Python | mit | 3,140 |
import codecs
unicode_string = "Hello Python 3 String"
bytes_object = b"Hello Python 3 Bytes"
print(unicode_string, type(unicode_string))
print(bytes_object, type(bytes_object))
#decode to unicode_string
ux = str(object=bytes_object, encoding="utf-8", errors="strict")
print(ux, type(ux))
ux = bytes_object.decode(encoding="utf-8", errors="strict")
print(ux, type(ux))
hex_bytes = codecs.encode(b"Binary Object", "hex_codec")
def string_to_bytes( text ):
return bin(int.from_bytes(text.encode(), 'big'))
def bytes_to_string( btext ):
#btext = int('0b110100001100101011011000110110001101111', 2)
return btext.to_bytes((btext.bit_length() + 7) // 8, 'big').decode()
def char_to_bytes(char):
return bin(ord(char))
def encodes(text):
bext = text.encode(encoding="utf-8")
enc_bext = codecs.encode(bext, "hex_codec")
return enc_bext.decode("utf-8")
def decodes():
pass
if __name__ == "__main__":
print( encodes("walla") )
| thedemz/python-gems | bitten.py | Python | mit | 978 |
"""Class to perform random over-sampling."""
# Authors: Guillaume Lemaitre <[email protected]>
# Christos Aridas
# License: MIT
from collections.abc import Mapping
from numbers import Real
import numpy as np
from scipy import sparse
from sklearn.utils import check_array, check_random_state
from sklearn.utils import _safe_indexing
from sklearn.utils.sparsefuncs import mean_variance_axis
from .base import BaseOverSampler
from ..utils import check_target_type
from ..utils import Substitution
from ..utils._docstring import _random_state_docstring
from ..utils._validation import _deprecate_positional_args
@Substitution(
sampling_strategy=BaseOverSampler._sampling_strategy_docstring,
random_state=_random_state_docstring,
)
class RandomOverSampler(BaseOverSampler):
"""Class to perform random over-sampling.
Object to over-sample the minority class(es) by picking samples at random
with replacement. The bootstrap can be generated in a smoothed manner.
Read more in the :ref:`User Guide <random_over_sampler>`.
Parameters
----------
{sampling_strategy}
{random_state}
shrinkage : float or dict, default=None
Parameter controlling the shrinkage applied to the covariance matrix.
when a smoothed bootstrap is generated. The options are:
- if `None`, a normal bootstrap will be generated without perturbation.
It is equivalent to `shrinkage=0` as well;
- if a `float` is given, the shrinkage factor will be used for all
classes to generate the smoothed bootstrap;
- if a `dict` is given, the shrinkage factor will specific for each
class. The key correspond to the targeted class and the value is
the shrinkage factor.
The value needs of the shrinkage parameter needs to be higher or equal
to 0.
.. versionadded:: 0.8
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
corresponds to the class labels from which to sample and the values
are the number of samples to sample.
sample_indices_ : ndarray of shape (n_new_samples,)
Indices of the samples selected.
.. versionadded:: 0.4
shrinkage_ : dict or None
The per-class shrinkage factor used to generate the smoothed bootstrap
sample. When `shrinkage=None` a normal bootstrap will be generated.
.. versionadded:: 0.8
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
See Also
--------
BorderlineSMOTE : Over-sample using the borderline-SMOTE variant.
SMOTE : Over-sample using SMOTE.
SMOTENC : Over-sample using SMOTE for continuous and categorical features.
SMOTEN : Over-sample using the SMOTE variant specifically for categorical
features only.
SVMSMOTE : Over-sample using SVM-SMOTE variant.
ADASYN : Over-sample using ADASYN.
KMeansSMOTE : Over-sample applying a clustering before to oversample using
SMOTE.
Notes
-----
Supports multi-class resampling by sampling each class independently.
Supports heterogeneous data as object array containing string and numeric
data.
When generating a smoothed bootstrap, this method is also known as Random
Over-Sampling Examples (ROSE) [1]_.
.. warning::
Since smoothed bootstrap are generated by adding a small perturbation
to the drawn samples, this method is not adequate when working with
sparse matrices.
References
----------
.. [1] G Menardi, N. Torelli, "Training and assessing classification
rules with imbalanced data," Data Mining and Knowledge
Discovery, 28(1), pp.92-122, 2014.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.over_sampling import \
RandomOverSampler # doctest: +NORMALIZE_WHITESPACE
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> ros = RandomOverSampler(random_state=42)
>>> X_res, y_res = ros.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{0: 900, 1: 900}})
"""
@_deprecate_positional_args
def __init__(
self,
*,
sampling_strategy="auto",
random_state=None,
shrinkage=None,
):
super().__init__(sampling_strategy=sampling_strategy)
self.random_state = random_state
self.shrinkage = shrinkage
def _check_X_y(self, X, y):
y, binarize_y = check_target_type(y, indicate_one_vs_all=True)
X, y = self._validate_data(
X,
y,
reset=True,
accept_sparse=["csr", "csc"],
dtype=None,
force_all_finite=False,
)
return X, y, binarize_y
def _fit_resample(self, X, y):
random_state = check_random_state(self.random_state)
if isinstance(self.shrinkage, Real):
self.shrinkage_ = {
klass: self.shrinkage for klass in self.sampling_strategy_
}
elif self.shrinkage is None or isinstance(self.shrinkage, Mapping):
self.shrinkage_ = self.shrinkage
else:
raise ValueError(
f"`shrinkage` should either be a positive floating number or "
f"a dictionary mapping a class to a positive floating number. "
f"Got {repr(self.shrinkage)} instead."
)
if self.shrinkage_ is not None:
missing_shrinkage_keys = (
self.sampling_strategy_.keys() - self.shrinkage_.keys()
)
if missing_shrinkage_keys:
raise ValueError(
f"`shrinkage` should contain a shrinkage factor for "
f"each class that will be resampled. The missing "
f"classes are: {repr(missing_shrinkage_keys)}"
)
for klass, shrink_factor in self.shrinkage_.items():
if shrink_factor < 0:
raise ValueError(
f"The shrinkage factor needs to be >= 0. "
f"Got {shrink_factor} for class {klass}."
)
# smoothed bootstrap imposes to make numerical operation; we need
# to be sure to have only numerical data in X
try:
X = check_array(X, accept_sparse=["csr", "csc"], dtype="numeric")
except ValueError as exc:
raise ValueError(
"When shrinkage is not None, X needs to contain only "
"numerical data to later generate a smoothed bootstrap "
"sample."
) from exc
X_resampled = [X.copy()]
y_resampled = [y.copy()]
sample_indices = range(X.shape[0])
for class_sample, num_samples in self.sampling_strategy_.items():
target_class_indices = np.flatnonzero(y == class_sample)
bootstrap_indices = random_state.choice(
target_class_indices,
size=num_samples,
replace=True,
)
sample_indices = np.append(sample_indices, bootstrap_indices)
if self.shrinkage_ is not None:
# generate a smoothed bootstrap with a perturbation
n_samples, n_features = X.shape
smoothing_constant = (4 / ((n_features + 2) * n_samples)) ** (
1 / (n_features + 4)
)
if sparse.issparse(X):
_, X_class_variance = mean_variance_axis(
X[target_class_indices, :],
axis=0,
)
X_class_scale = np.sqrt(X_class_variance, out=X_class_variance)
else:
X_class_scale = np.std(X[target_class_indices, :], axis=0)
smoothing_matrix = np.diagflat(
self.shrinkage_[class_sample] * smoothing_constant * X_class_scale
)
X_new = random_state.randn(num_samples, n_features)
X_new = X_new.dot(smoothing_matrix) + X[bootstrap_indices, :]
if sparse.issparse(X):
X_new = sparse.csr_matrix(X_new, dtype=X.dtype)
X_resampled.append(X_new)
else:
# generate a bootstrap
X_resampled.append(_safe_indexing(X, bootstrap_indices))
y_resampled.append(_safe_indexing(y, bootstrap_indices))
self.sample_indices_ = np.array(sample_indices)
if sparse.issparse(X):
X_resampled = sparse.vstack(X_resampled, format=X.format)
else:
X_resampled = np.vstack(X_resampled)
y_resampled = np.hstack(y_resampled)
return X_resampled, y_resampled
def _more_tags(self):
return {
"X_types": ["2darray", "string", "sparse", "dataframe"],
"sample_indices": True,
"allow_nan": True,
}
| scikit-learn-contrib/imbalanced-learn | imblearn/over_sampling/_random_over_sampler.py | Python | mit | 9,497 |
# Source Generated with Decompyle++
# File: session_recording.pyc (Python 2.5)
from __future__ import absolute_import
from pushbase.session_recording_component import FixedLengthSessionRecordingComponent
class SessionRecordingComponent(FixedLengthSessionRecordingComponent):
def __init__(self, *a, **k):
super(SessionRecordingComponent, self).__init__(*a, **a)
self.set_trigger_recording_on_release(not (self._record_button.is_pressed))
def set_trigger_recording_on_release(self, trigger_recording):
self._should_trigger_recording = trigger_recording
def _on_record_button_pressed(self):
pass
def _on_record_button_released(self):
if self._should_trigger_recording:
self._trigger_recording()
self._should_trigger_recording = True
| phatblat/AbletonLiveMIDIRemoteScripts | Push2/session_recording.py | Python | mit | 842 |
# Generated by Django 2.1 on 2018-08-26 00:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('model_filefields_example', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='book',
name='cover',
field=models.ImageField(blank=True, null=True, upload_to='model_filefields_example.BookCover/bytes/filename/mimetype'),
),
migrations.AlterField(
model_name='book',
name='index',
field=models.FileField(blank=True, null=True, upload_to='model_filefields_example.BookIndex/bytes/filename/mimetype'),
),
migrations.AlterField(
model_name='book',
name='pages',
field=models.FileField(blank=True, null=True, upload_to='model_filefields_example.BookPages/bytes/filename/mimetype'),
),
migrations.AlterField(
model_name='sounddevice',
name='instruction_manual',
field=models.FileField(blank=True, null=True, upload_to='model_filefields_example.SoundDeviceInstructionManual/bytes/filename/mimetype'),
),
]
| victor-o-silva/db_file_storage | demo_and_tests/model_filefields_example/migrations/0002_auto_20180826_0054.py | Python | mit | 1,197 |
""" -*- coding: utf-8 -*- """
from python2awscli import bin_aws
from python2awscli.error import AWSNotFound, ParseError, AWSDuplicate
from python2awscli import must
class BaseSecurityGroup(object):
def __init__(self, name, region, vpc, description, inbound=None, outbound=None):
"""
:param name: String, name of SG
:param region: String, AWS region
:param vpc: String, IP of the VPC this SG belongs to
:param description: String
:param inbound: List of dicts, IP Permissions that should exist
:param outbound: List of dicts, IP Permissions that should exist
"""
self.id = None
self.name = name
self.region = region
self.vpc = vpc
self.description = description
self.IpPermissions = []
self.IpPermissionsEgress = []
self.owner = None
self.changed = False
try:
self._get()
except AWSNotFound:
self._create()
self._merge_rules(must.be_list(inbound), self.IpPermissions)
self._merge_rules(must.be_list(outbound), self.IpPermissionsEgress, egress=True)
if self.changed:
self._get()
def _break_out(self, existing):
"""
Undo AWS's rule flattening so we can do simple 'if rule in existing' logic later.
:param existing: List of SG rules as dicts.
:return: List of SG rules as dicts.
"""
spool = list()
for rule in existing:
for ip in rule['IpRanges']:
copy_of_rule = rule.copy()
copy_of_rule['IpRanges'] = [ip]
copy_of_rule['UserIdGroupPairs'] = []
spool.append(copy_of_rule)
for group in rule['UserIdGroupPairs']:
copy_of_rule = rule.copy()
copy_of_rule['IpRanges'] = []
copy_of_rule['UserIdGroupPairs'] = [group]
spool.append(copy_of_rule)
return spool
def _merge_rules(self, requested, active, egress=False):
"""
:param requested: List of dicts, IP Permissions that should exist
:param active: List of dicts, IP Permissions that already exist
:param egress: Bool, addressing outbound rules or not?
:return: Bool
"""
if not isinstance(requested, list):
raise ParseError(
'SecurityGroup {0}, need a list of dicts, instead got "{1}"'.format(self.name, requested))
for rule in requested:
if rule not in active:
self._add_rule(rule, egress)
for active_rule in active:
if active_rule not in requested:
self._rm_rule(active_rule, egress)
return True
def _add_rule(self, ip_permissions, egress):
"""
:param ip_permissions: Dict of IP Permissions
:param egress: Bool
:return: Bool
"""
direction = 'authorize-security-group-ingress'
if egress:
direction = 'authorize-security-group-egress'
command = ['ec2', direction,
'--region', self.region,
'--group-id', self.id,
'--ip-permissions', str(ip_permissions).replace("'", '"')
]
bin_aws(command)
print('Authorized: {0}'.format(ip_permissions)) # TODO: Log(...)
self.changed = True
return True
def _rm_rule(self, ip_permissions, egress):
"""
:param ip_permissions: Dict of IP Permissions
:param egress: Bool
:return: Bool
"""
direction = 'revoke-security-group-ingress'
if egress:
direction = 'revoke-security-group-egress'
command = ['ec2', direction,
'--region', self.region,
'--group-id', self.id,
'--ip-permissions', str(ip_permissions).replace("'", '"')
]
bin_aws(command)
print('Revoked: {0}'.format(ip_permissions)) # TODO: Log(...)
self.changed = True
return True
def _create(self):
"""
Create a Security Group
:return:
"""
# AWS grants all new SGs this default outbound rule "This is pro-human & anti-machine behavior."
default_egress = {
'Ipv6Ranges': [],
'PrefixListIds': [],
'IpRanges': [{'CidrIp': '0.0.0.0/0'}],
'UserIdGroupPairs': [], 'IpProtocol': '-1'
}
command = [
'ec2', 'create-security-group',
'--region', self.region,
'--group-name', self.name,
'--description', self.description,
'--vpc-id', self.vpc
]
try:
self.id = bin_aws(command, key='GroupId')
except AWSDuplicate:
return False # OK if it already exists.
print('Created {0}'.format(command)) # TODO: Log(...)
self.IpPermissions = []
self.IpPermissionsEgress = [default_egress]
self.changed = True
return True
def _get(self):
"""
Get information about Security Group from AWS and update self
:return: Bool
"""
command = ['ec2', 'describe-security-groups', '--region', self.region, '--group-names', self.name]
result = bin_aws(command, key='SecurityGroups', max=1) # will raise NotFound if empty
me = result[0]
self.id = me['GroupId']
self.owner = me['OwnerId']
self.IpPermissions = self._break_out(me['IpPermissions'])
self.IpPermissionsEgress = self._break_out(me['IpPermissionsEgress'])
print('Got {0}'.format(command)) # TODO: Log(...)
return True
def _delete(self):
"""
Delete myself by my own id.
As of 20170114 no other methods call me. You must do `foo._delete()`
:return:
"""
command = ['ec2', 'delete-security-group', '--region', self.region,
# '--dry-run',
'--group-id', self.id
]
bin_aws(command, decode_output=False)
print('Deleted {0}'.format(command)) # TODO: Log(...)
return True
| jhazelwo/python-awscli | python2awscli/model/securitygroup.py | Python | mit | 6,235 |
# -*- coding: utf-8 -*-
"""urls.py: messages extends"""
from django.conf.urls import url
from messages_extends.views import message_mark_all_read, message_mark_read
urlpatterns = [
url(r'^mark_read/(?P<message_id>\d+)/$', message_mark_read, name='message_mark_read'),
url(r'^mark_read/all/$', message_mark_all_read, name='message_mark_all_read'),
]
| AliLozano/django-messages-extends | messages_extends/urls.py | Python | mit | 358 |
# The MIT License (MIT)
#
# Copyright (c) 2016 Frederic Guillot
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from cliff import app
from cliff import commandmanager
from pbr import version as app_version
import sys
from kanboard_cli.commands import application
from kanboard_cli.commands import project
from kanboard_cli.commands import task
from kanboard_cli import client
class KanboardShell(app.App):
def __init__(self):
super(KanboardShell, self).__init__(
description='Kanboard Command Line Client',
version=app_version.VersionInfo('kanboard_cli').version_string(),
command_manager=commandmanager.CommandManager('kanboard.cli'),
deferred_help=True)
self.client = None
self.is_super_user = True
def build_option_parser(self, description, version, argparse_kwargs=None):
parser = super(KanboardShell, self).build_option_parser(
description, version, argparse_kwargs=argparse_kwargs)
parser.add_argument(
'--url',
metavar='<api url>',
help='Kanboard API URL',
)
parser.add_argument(
'--username',
metavar='<api username>',
help='API username',
)
parser.add_argument(
'--password',
metavar='<api password>',
help='API password/token',
)
parser.add_argument(
'--auth-header',
metavar='<authentication header>',
help='API authentication header',
)
return parser
def initialize_app(self, argv):
client_manager = client.ClientManager(self.options)
self.client = client_manager.get_client()
self.is_super_user = client_manager.is_super_user()
self.command_manager.add_command('app version', application.ShowVersion)
self.command_manager.add_command('app timezone', application.ShowTimezone)
self.command_manager.add_command('project show', project.ShowProject)
self.command_manager.add_command('project list', project.ListProjects)
self.command_manager.add_command('task create', task.CreateTask)
self.command_manager.add_command('task list', task.ListTasks)
def main(argv=sys.argv[1:]):
return KanboardShell().run(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| kanboard/kanboard-cli | kanboard_cli/shell.py | Python | mit | 3,401 |
default_app_config = "gallery.apps.GalleryConfig"
| cdriehuys/chmvh-website | chmvh_website/gallery/__init__.py | Python | mit | 50 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os, sys
import tempfile
from winsys._compat import unittest
import uuid
import win32file
from winsys.tests.test_fs import utils
from winsys import fs
class TestFS (unittest.TestCase):
filenames = ["%d" % i for i in range (5)]
def setUp (self):
utils.mktemp ()
for filename in self.filenames:
with open (os.path.join (utils.TEST_ROOT, filename), "w"):
pass
def tearDown (self):
utils.rmtemp ()
def test_glob (self):
import glob
pattern = os.path.join (utils.TEST_ROOT, "*")
self.assertEquals (list (fs.glob (pattern)), glob.glob (pattern))
def test_listdir (self):
import os
fs_version = list (fs.listdir (utils.TEST_ROOT))
os_version = os.listdir (utils.TEST_ROOT)
self.assertEquals (fs_version, os_version, "%s differs from %s" % (fs_version, os_version))
#
# All the other module-level functions are hand-offs
# to the corresponding Entry methods.
#
if __name__ == "__main__":
unittest.main ()
if sys.stdout.isatty (): raw_input ("Press enter...")
| operepo/ope | laptop_credential/winsys/tests/test_fs/test_fs.py | Python | mit | 1,100 |
import numpy as np
import matplotlib.pylab as plt
from numba import cuda, uint8, int32, uint32, jit
from timeit import default_timer as timer
@cuda.jit('void(uint8[:], int32, int32[:], int32[:])')
def lbp_kernel(input, neighborhood, powers, h):
i = cuda.grid(1)
r = 0
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
cuda.atomic.add(h, r, 1)
def extract_1dlbp_gpu(input, neighborhood, d_powers):
maxThread = 512
blockDim = maxThread
d_input = cuda.to_device(input)
hist = np.zeros(2 ** (2 * neighborhood), dtype='int32')
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
d_hist = cuda.to_device(hist)
lbp_kernel[gridDim, blockDim](d_input, neighborhood, d_powers, d_hist)
d_hist.to_host()
return hist
def extract_1dlbp_gpu_debug(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
@jit("int32[:](uint8[:], int64, int32[:], int32[:])", nopython=True)
def extract_1dlbp_cpu_jit(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
def extract_1dlbp_cpu(input, neighborhood, p):
"""
Extract the 1d lbp pattern on CPU
"""
res = np.zeros(1 << (2 * neighborhood))
for i in range(neighborhood, len(input) - neighborhood):
left = input[i - neighborhood : i]
right = input[i + 1 : i + neighborhood + 1]
both = np.r_[left, right]
res[np.sum(p [both >= input[i]])] += 1
return res
X = np.arange(3, 7)
X = 10 ** X
neighborhood = 4
cpu_times = np.zeros(X.shape[0])
cpu_times_simple = cpu_times.copy()
cpu_times_jit = cpu_times.copy()
gpu_times = np.zeros(X.shape[0])
p = 1 << np.array(range(0, 2 * neighborhood), dtype='int32')
d_powers = cuda.to_device(p)
for i, x in enumerate(X):
input = np.random.randint(0, 256, size = x).astype(np.uint8)
print "Length: {0}".format(x)
print "--------------"
start = timer()
h_cpu = extract_1dlbp_cpu(input, neighborhood, p)
cpu_times[i] = timer() - start
print "Finished on CPU: time: {0:3.5f}s".format(cpu_times[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_simple = extract_1dlbp_gpu_debug(input, neighborhood, p, res)
cpu_times_simple[i] = timer() - start
print "Finished on CPU (simple): time: {0:3.5f}s".format(cpu_times_simple[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_jit = extract_1dlbp_cpu_jit(input, neighborhood, p, res)
cpu_times_jit[i] = timer() - start
print "Finished on CPU (numba: jit): time: {0:3.5f}s".format(cpu_times_jit[i])
start = timer()
h_gpu = extract_1dlbp_gpu(input, neighborhood, d_powers)
gpu_times[i] = timer() - start
print "Finished on GPU: time: {0:3.5f}s".format(gpu_times[i])
print "All h_cpu == h_gpu: ", (h_cpu_jit == h_gpu).all() and (h_cpu_simple == h_cpu_jit).all() and (h_cpu == h_cpu_jit).all()
print ''
f = plt.figure(figsize=(10, 5))
plt.plot(X, cpu_times, label = "CPU")
plt.plot(X, cpu_times_simple, label = "CPU non-vectorized")
plt.plot(X, cpu_times_jit, label = "CPU jit")
plt.plot(X, gpu_times, label = "GPU")
plt.yscale('log')
plt.xscale('log')
plt.xlabel('input length')
plt.ylabel('time, sec')
plt.legend()
plt.show()
| fierval/KaggleMalware | Learning/1dlbp_tests.py | Python | mit | 4,911 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "corponovo.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| hhalmeida/corponovo | manage.py | Python | mit | 807 |
import time
t1=.3
t2=.1
path="~/Dropbox/Ingenieria/asignaturas_actuales"
time.sleep(t2)
keyboard.send_key("<f6>")
time.sleep(t2)
keyboard.send_keys(path)
time.sleep(t1)
keyboard.send_key("<enter>")
| andresgomezvidal/autokey_scripts | data/General/file manager/asignaturas_actuales.py | Python | mit | 200 |
from charmhelpers.core.hookenv import (
config,
unit_get,
)
from charmhelpers.contrib.network.ip import (
get_address_in_network,
is_address_in_network,
is_ipv6,
get_ipv6_addr,
)
from charmhelpers.contrib.hahelpers.cluster import is_clustered
PUBLIC = 'public'
INTERNAL = 'int'
ADMIN = 'admin'
_address_map = {
PUBLIC: {
'config': 'os-public-network',
'fallback': 'public-address'
},
INTERNAL: {
'config': 'os-internal-network',
'fallback': 'private-address'
},
ADMIN: {
'config': 'os-admin-network',
'fallback': 'private-address'
}
}
def canonical_url(configs, endpoint_type=PUBLIC):
'''
Returns the correct HTTP URL to this host given the state of HTTPS
configuration, hacluster and charm configuration.
:configs OSTemplateRenderer: A config tempating object to inspect for
a complete https context.
:endpoint_type str: The endpoint type to resolve.
:returns str: Base URL for services on the current service unit.
'''
scheme = 'http'
if 'https' in configs.complete_contexts():
scheme = 'https'
address = resolve_address(endpoint_type)
if is_ipv6(address):
address = "[{}]".format(address)
return '%s://%s' % (scheme, address)
def resolve_address(endpoint_type=PUBLIC):
resolved_address = None
if is_clustered():
if config(_address_map[endpoint_type]['config']) is None:
# Assume vip is simple and pass back directly
resolved_address = config('vip')
else:
for vip in config('vip').split():
if is_address_in_network(
config(_address_map[endpoint_type]['config']),
vip):
resolved_address = vip
else:
if config('prefer-ipv6'):
fallback_addr = get_ipv6_addr()
else:
fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
resolved_address = get_address_in_network(
config(_address_map[endpoint_type]['config']), fallback_addr)
if resolved_address is None:
raise ValueError('Unable to resolve a suitable IP address'
' based on charm state and configuration')
else:
return resolved_address
| jiasir/openstack-trove | lib/charmhelpers/contrib/openstack/ip.py | Python | mit | 2,332 |
import collections
import re
import urlparse
class DSN(collections.MutableMapping):
''' Hold the results of a parsed dsn.
This is very similar to urlparse.ParseResult tuple.
http://docs.python.org/2/library/urlparse.html#results-of-urlparse-and-urlsplit
It exposes the following attributes:
scheme
schemes -- if your scheme has +'s in it, then this will contain a list of schemes split by +
path
paths -- the path segment split by /, so "/foo/bar" would be ["foo", "bar"]
host -- same as hostname (I just like host better)
hostname
hostloc -- host:port
username
password
netloc
query -- a dict of the query string
query_str -- the raw query string
port
fragment
'''
DSN_REGEXP = re.compile(r'^\S+://\S+')
FIELDS = ('scheme', 'netloc', 'path', 'params', 'query', 'fragment')
def __init__(self, dsn, **defaults):
''' Parse a dsn to parts similar to urlparse.
This is a nuts function that can serve as a good basis to parsing a custom dsn
:param dsn: the dsn to parse
:type dsn: str
:param defaults: any values you want to have defaults for if they aren't in the dsn
:type defaults: dict
'''
assert self.DSN_REGEXP.match(dsn), \
"{} is invalid, only full dsn urls (scheme://host...) allowed".format(dsn)
first_colon = dsn.find(':')
scheme = dsn[0:first_colon]
dsn_url = dsn[first_colon+1:]
url = urlparse.urlparse(dsn_url)
options = {}
if url.query:
for k, kv in urlparse.parse_qs(url.query, True, True).iteritems():
if len(kv) > 1:
options[k] = kv
else:
options[k] = kv[0]
self.scheme = scheme
self.hostname = url.hostname
self.path = url.path
self.params = url.params
self.query = options
self.fragment = url.fragment
self.username = url.username
self.password = url.password
self.port = url.port
self.query_str = url.query
for k, v in defaults.iteritems():
self.set_default(k, v)
def __iter__(self):
for f in self.FIELDS:
yield getattr(self, f, '')
def __len__(self):
return len(iter(self))
def __getitem__(self, field):
return getattr(self, field, None)
def __setitem__(self, field, value):
setattr(self, field, value)
def __delitem__(self, field):
delattr(self, field)
@property
def schemes(self):
'''the scheme, split by plus signs'''
return self.scheme.split('+')
@property
def netloc(self):
'''return username:password@hostname:port'''
s = ''
prefix = ''
if self.username:
s += self.username
prefix = '@'
if self.password:
s += ":{}".format(self.password)
prefix = '@'
s += "{}{}".format(prefix, self.hostloc)
return s
@property
def paths(self):
'''the path attribute split by /'''
return filter(None, self.path.split('/'))
@property
def host(self):
'''the hostname, but I like host better'''
return self.hostname
@property
def hostloc(self):
'''return host:port'''
hostloc = self.hostname
if self.port:
hostloc = '{}:{}'.format(hostloc, self.port)
return hostloc
def set_default(self, key, value):
''' Set a default value for key.
This is different than dict's setdefault because it will set default either
if the key doesn't exist, or if the value at the key evaluates to False, so
an empty string or a None will value will be updated.
:param key: the item to update
:type key: str
:param value: the items new value if key has a current value that evaluates to False
'''
if not getattr(self, key, None):
setattr(self, key, value)
def get_url(self):
'''return the dsn back into url form'''
return urlparse.urlunparse((
self.scheme,
self.netloc,
self.path,
self.params,
self.query_str,
self.fragment,
))
def copy(self):
return DSN(self.get_url())
def __str__(self):
return self.get_url()
| mylokin/servy | servy/utils/dsntool.py | Python | mit | 4,496 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 24 12:49:36 2017
@author: drsmith
"""
import os
from .globals import FdpError
def canonicalMachineName(machine=''):
aliases = {'nstxu': ['nstx', 'nstxu', 'nstx-u'],
'diiid': ['diiid', 'diii-d', 'd3d'],
'cmod': ['cmod', 'c-mod']}
for key, value in aliases.items():
if machine.lower() in value:
return key
# invalid machine name
raise FdpError('"{}" is not a valid machine name\n'.format(machine))
MDS_SERVERS = {
'nstxu': {'hostname': 'skylark.pppl.gov',
'port': '8000'},
'diiid': {'hostname': 'atlas.gat.com',
'port': '8000'}
}
EVENT_SERVERS = {
'nstxu': {'hostname': 'skylark.pppl.gov',
'port': '8000'},
'diiid': {'hostname': 'atlas.gat.com',
'port': '8000'},
'ltx': {'hostname': 'lithos.pppl.gov',
'port': '8000'}
}
LOGBOOK_CREDENTIALS = {
'nstxu': {'server': 'sql2008.pppl.gov',
'instance': None,
'username': None,
'password': None,
'database': None,
'port': '62917',
'table': 'entries',
'loginfile': os.path.join(os.getenv('HOME'),
'nstxlogs.sybase_login')
}
}
| Fusion-Data-Platform/fdp | fdp/lib/datasources.py | Python | mit | 1,353 |
# -*- coding:utf-8 -*-
# This code is automatically transpiled by Saklient Translator
import six
from ..client import Client
from .model import Model
from ..resources.resource import Resource
from ..resources.licenseinfo import LicenseInfo
from ...util import Util
import saklient
str = six.text_type
# module saklient.cloud.models.model_licenseinfo
class Model_LicenseInfo(Model):
## ライセンス種別情報を検索するための機能を備えたクラス。
## @private
# @return {str}
def _api_path(self):
return "/product/license"
## @private
# @return {str}
def _root_key(self):
return "LicenseInfo"
## @private
# @return {str}
def _root_key_m(self):
return "LicenseInfo"
## @private
# @return {str}
def _class_name(self):
return "LicenseInfo"
## @private
# @param {any} obj
# @param {bool} wrapped=False
# @return {saklient.cloud.resources.resource.Resource}
def _create_resource_impl(self, obj, wrapped=False):
Util.validate_type(wrapped, "bool")
return LicenseInfo(self._client, obj, wrapped)
## 次に取得するリストの開始オフセットを指定します。
#
# @param {int} offset オフセット
# @return {saklient.cloud.models.model_licenseinfo.Model_LicenseInfo} this
def offset(self, offset):
Util.validate_type(offset, "int")
return self._offset(offset)
## 次に取得するリストの上限レコード数を指定します。
#
# @param {int} count 上限レコード数
# @return {saklient.cloud.models.model_licenseinfo.Model_LicenseInfo} this
def limit(self, count):
Util.validate_type(count, "int")
return self._limit(count)
## Web APIのフィルタリング設定を直接指定します。
#
# @param {str} key キー
# @param {any} value 値
# @param {bool} multiple=False valueに配列を与え、OR条件で完全一致検索する場合にtrueを指定します。通常、valueはスカラ値であいまい検索されます。
# @return {saklient.cloud.models.model_licenseinfo.Model_LicenseInfo}
def filter_by(self, key, value, multiple=False):
Util.validate_type(key, "str")
Util.validate_type(multiple, "bool")
return self._filter_by(key, value, multiple)
## 次のリクエストのために設定されているステートをすべて破棄します。
#
# @return {saklient.cloud.models.model_licenseinfo.Model_LicenseInfo} this
def reset(self):
return self._reset()
## 指定したIDを持つ唯一のリソースを取得します。
#
# @param {str} id
# @return {saklient.cloud.resources.licenseinfo.LicenseInfo} リソースオブジェクト
def get_by_id(self, id):
Util.validate_type(id, "str")
return self._get_by_id(id)
## リソースの検索リクエストを実行し、結果をリストで取得します。
#
# @return {saklient.cloud.resources.licenseinfo.LicenseInfo[]} リソースオブジェクトの配列
def find(self):
return self._find()
## 指定した文字列を名前に含むリソースに絞り込みます。
#
# 大文字・小文字は区別されません。
# 半角スペースで区切られた複数の文字列は、それらをすべて含むことが条件とみなされます。
#
# @todo Implement test case
# @param {str} name
# @return {saklient.cloud.models.model_licenseinfo.Model_LicenseInfo}
def with_name_like(self, name):
Util.validate_type(name, "str")
return self._with_name_like(name)
## 名前でソートします。
#
# @todo Implement test case
# @param {bool} reverse=False
# @return {saklient.cloud.models.model_licenseinfo.Model_LicenseInfo}
def sort_by_name(self, reverse=False):
Util.validate_type(reverse, "bool")
return self._sort_by_name(reverse)
## @ignore
# @param {saklient.cloud.client.Client} client
def __init__(self, client):
super(Model_LicenseInfo, self).__init__(client)
Util.validate_type(client, "saklient.cloud.client.Client")
| sakura-internet/saklient.python | saklient/cloud/models/model_licenseinfo.py | Python | mit | 4,306 |
import os
import logging
from jsub.util import safe_mkdir
from jsub.util import safe_rmdir
class Submit(object):
def __init__(self, manager, task_id, sub_ids=None, dry_run=False, resubmit=False):
self.__manager = manager
self.__task = self.__manager.load_task(task_id)
self.__sub_ids = sub_ids
self.__dry_run = dry_run
self.__resubmit = resubmit
self.__logger = logging.getLogger('JSUB')
if self.__sub_ids==None:
self.__sub_ids=range(len(self.__task.data['jobvar']))
self.__initialize_manager()
def __initialize_manager(self):
self.__config_mgr = self.__manager.load_config_manager()
self.__backend_mgr = self.__manager.load_backend_manager()
self.__bootstrap_mgr = self.__manager.load_bootstrap_manager()
self.__navigator_mgr = self.__manager.load_navigator_manager()
self.__context_mgr = self.__manager.load_context_manager()
self.__action_mgr = self.__manager.load_action_manager()
self.__launcher_mgr = self.__manager.load_launcher_manager()
def handle(self):
run_root = self.__backend_mgr.get_run_root(self.__task.data['backend'], self.__task.data['id'])
main_root = os.path.join(run_root, 'main')
safe_rmdir(main_root)
safe_mkdir(main_root)
self.__create_input(main_root)
self.__create_context(main_root)
self.__create_action(main_root)
self.__create_navigator(main_root)
self.__create_bootstrap(main_root)
launcher_param = self.__create_launcher(run_root)
self.__submit(launcher_param)
def __create_input(self, main_root):
content = self.__manager.load_content()
input_dir = os.path.join(main_root,'input')
try:
content.get(self.__task.data['id'], 'input', os.path.join(main_root, 'input'))
except:
safe_mkdir(input_dir)
def __create_context(self, main_root):
context_dir = os.path.join(main_root, 'context')
safe_mkdir(context_dir)
action_default = {}
for unit, param in self.__task.data['workflow'].items():
action_default[unit] = self.__action_mgr.default_config(param['type'])
navigators = self.__config_mgr.navigator()
context_format = self.__navigator_mgr.context_format(navigators)
self.__context_mgr.create_context_file(self.__task.data, action_default, context_format, context_dir)
def __create_action(self, main_root):
action_dir = os.path.join(main_root, 'action')
safe_mkdir(action_dir)
actions = set()
for unit, param in self.__task.data['workflow'].items():
actions.add(param['type'])
self.__action_mgr.create_actions(actions, action_dir)
def __create_navigator(self, main_root):
navigator_dir = os.path.join(main_root, 'navigator')
safe_mkdir(navigator_dir)
navigators = self.__config_mgr.navigator()
self.__navigator_mgr.create_navigators(navigators, navigator_dir)
def __create_bootstrap(self, main_root):
bootstrap_dir = os.path.join(main_root, 'bootstrap')
safe_mkdir(bootstrap_dir)
bootstrap = self.__config_mgr.bootstrap()
self.__bootstrap_mgr.create_bootstrap(bootstrap, bootstrap_dir)
def __create_launcher(self, run_root):
launcher = self.__task.data['backend']['launcher']
return self.__launcher_mgr.create_launcher(launcher, run_root)
def __submit(self, launcher_param):
if self.__dry_run:
return
if self.__resubmit==False:
if self.__task.data.get('backend_job_ids') or self.__task.data.get('backend_task_id'):
self.__logger.info('This task has already been submitted to backend, rerun the command with "-r" option if you wish to delete current jobs and resubmit the task.')
return
else:
self.__logger.info('Removing submitted jobs on backend before resubmission.')
task_id = self.__task.data.get('backend_task_id')
#remove previously generated files in job folder
job_ids = self.__task.data.get('backend_job_ids')
run_root = self.__backend_mgr.get_run_root(self.__task.data['backend'], self.__task.data['id'])
job_root=os.path.join(run_root,'subjobs')
safe_rmdir(job_root)
if task_id:
self.__backend_mgr.delete_task(self.__task.data['backend'],backend_task_id = task_id)
elif job_ids:
self.__backend_mgr.delete_jobs(self.__task.data['backend'],backend_job_ids = job_ids)
result = self.__backend_mgr.submit(self.__task.data['backend'], self.__task.data['id'], launcher_param, sub_ids = self.__sub_ids)
if not type(result) is dict:
result = {}
if 'backend_job_ids' in result:
njobs = len(result['backend_job_ids'])
else:
njobs = len(result)
if njobs>0:
self.__logger.info('%d jobs successfully submitted to backend.'%(njobs))
self.__task.data.setdefault('backend_job_ids',{})
backend_job_ids=result.get('backend_job_ids',{})
backend_task_id=result.get('backend_task_id',0)
self.__task.data['backend_job_ids'].update(backend_job_ids)
self.__task.data['backend_task_id']=backend_task_id
self.__task.data['status'] = 'Submitted'
task_pool = self.__manager.load_task_pool()
task_pool.save(self.__task)
self.__logger.debug(result)
| jsubpy/jsub | jsub/operation/submit.py | Python | mit | 4,925 |
import os
from typing import List, Tuple
from raiden.network.blockchain_service import BlockChainService
from raiden.network.pathfinding import get_random_service
from raiden.network.proxies.service_registry import ServiceRegistry
from raiden.network.rpc.client import JSONRPCClient
from raiden.network.rpc.smartcontract_proxy import ContractProxy
from raiden.utils import typing
from raiden.utils.smart_contracts import deploy_contract_web3
from raiden.utils.solc import compile_files_cwd
from raiden_contracts.constants import CONTRACT_HUMAN_STANDARD_TOKEN
from raiden_contracts.contract_manager import ContractManager
def deploy_token(
deploy_client: JSONRPCClient,
contract_manager: ContractManager,
initial_amount: typing.TokenAmount,
decimals: int,
token_name: str,
token_symbol: str,
) -> ContractProxy:
token_address = deploy_contract_web3(
contract_name=CONTRACT_HUMAN_STANDARD_TOKEN,
deploy_client=deploy_client,
contract_manager=contract_manager,
constructor_arguments=(initial_amount, decimals, token_name, token_symbol),
)
contract_abi = contract_manager.get_contract_abi(CONTRACT_HUMAN_STANDARD_TOKEN)
return deploy_client.new_contract_proxy(
contract_interface=contract_abi, contract_address=token_address
)
def deploy_tokens_and_fund_accounts(
token_amount: int,
number_of_tokens: int,
deploy_service: BlockChainService,
participants: typing.List[typing.Address],
contract_manager: ContractManager,
) -> typing.List[typing.TokenAddress]:
""" Deploy `number_of_tokens` ERC20 token instances with `token_amount` minted and
distributed among `blockchain_services`. Optionally the instances will be registered with
the raiden registry.
Args:
token_amount (int): number of units that will be created per token
number_of_tokens (int): number of token instances that will be created
deploy_service (BlockChainService): the blockchain connection that will deploy
participants (list(address)): participant addresses that will receive tokens
"""
result = list()
for _ in range(number_of_tokens):
token_address = deploy_contract_web3(
CONTRACT_HUMAN_STANDARD_TOKEN,
deploy_service.client,
contract_manager=contract_manager,
constructor_arguments=(token_amount, 2, "raiden", "Rd"),
)
result.append(token_address)
# only the creator of the token starts with a balance (deploy_service),
# transfer from the creator to the other nodes
for transfer_to in participants:
deploy_service.token(token_address).transfer(
to_address=transfer_to, amount=token_amount // len(participants)
)
return result
def deploy_service_registry_and_set_urls(
private_keys, web3, contract_manager, service_registry_address
) -> Tuple[ServiceRegistry, List[str]]:
urls = ["http://foo", "http://boo", "http://coo"]
c1_client = JSONRPCClient(web3, private_keys[0])
c1_service_proxy = ServiceRegistry(
jsonrpc_client=c1_client,
service_registry_address=service_registry_address,
contract_manager=contract_manager,
)
c2_client = JSONRPCClient(web3, private_keys[1])
c2_service_proxy = ServiceRegistry(
jsonrpc_client=c2_client,
service_registry_address=service_registry_address,
contract_manager=contract_manager,
)
c3_client = JSONRPCClient(web3, private_keys[2])
c3_service_proxy = ServiceRegistry(
jsonrpc_client=c3_client,
service_registry_address=service_registry_address,
contract_manager=contract_manager,
)
# Test that getting a random service for an empty registry returns None
pfs_address = get_random_service(c1_service_proxy, "latest")
assert pfs_address is None
# Test that setting the urls works
c1_service_proxy.set_url(urls[0])
c2_service_proxy.set_url(urls[1])
c3_service_proxy.set_url(urls[2])
return c1_service_proxy, urls
def get_test_contract(name):
contract_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "smart_contracts", name)
)
contracts = compile_files_cwd([contract_path])
return contract_path, contracts
def deploy_rpc_test_contract(deploy_client, name):
contract_path, contracts = get_test_contract(f"{name}.sol")
contract_proxy, _ = deploy_client.deploy_solidity_contract(
name, contracts, libraries=dict(), constructor_parameters=None, contract_path=contract_path
)
return contract_proxy
def get_list_of_block_numbers(item):
""" Creates a list of block numbers of the given list/single event"""
if isinstance(item, list):
return [element["blockNumber"] for element in item]
if isinstance(item, dict):
block_number = item["blockNumber"]
return [block_number]
return list()
| hackaugusto/raiden | raiden/tests/utils/smartcontracts.py | Python | mit | 4,965 |
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import os
from indico.core import signals
from indico.core.db import db
from .logger import logger
from .oauth2 import require_oauth
__all__ = ['require_oauth']
@signals.core.app_created.connect
def _no_ssl_required_on_debug(app, **kwargs):
if app.debug or app.testing:
os.environ['AUTHLIB_INSECURE_TRANSPORT'] = '1'
@signals.users.merged.connect
def _delete_merged_user_tokens(target, source, **kwargs):
target_app_links = {link.application: link for link in target.oauth_app_links}
for source_link in source.oauth_app_links.all():
try:
target_link = target_app_links[source_link.application]
except KeyError:
logger.info('merge: reassigning %r to %r', source_link, target)
source_link.user = target
else:
logger.info('merge: merging %r into %r', source_link, target_link)
target_link.update_scopes(set(source_link.scopes))
target_link.tokens.extend(source_link.tokens)
db.session.delete(source_link)
| indico/indico | indico/core/oauth/__init__.py | Python | mit | 1,252 |
import random
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
_largesize = 300
def __init__(self, head):
self.head = head
self.lsize = 0
while head.next:
head = head.next
self.lsize += 1
self.m1_idx = None
self.m2_idx = None
if self.lsize > self._largesize:
self.m1_idx = self.lsize / 3 # start from 1/3
self.m1 = self._getN(self.m1_idx)
self.m2_idx = self.m1_idx * 2 # start from 2/3
self.m2 = self._getN(self.m2_idx)
def _getN(self, n):
n -= 1
p = self.head
while n:
p = p.next
n -= 1
return p
def getRandom(self):
def _get(delta, start):
p = start
while delta:
p = p.next
delta -= 1
return p.val
nextpos = random.randint(0, self.lsize)
if not self.m1_idx:
return _get(nextpos, self.head)
if nextpos < self.m1_idx:
val = _get(nextpos, self.head)
elif nextpos < self.m2_idx:
val = _get(nextpos - self.m1_idx, self.m1)
else:
val = _get(nextpos - self.m2_idx, self.m2)
return val
| daicang/Leetcode-solutions | 382-linked-list-random-node.py | Python | mit | 1,372 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os
from misura.canon import option
from misura.canon.option import get_typed_cols, get_insert_cmd, base_col_def, print_tree
import sqlite3
from misura.canon.tests import testdir
db = testdir + 'storage/tmpdb'
c1 = testdir + 'storage/Conf.csv'
def go(t):
o = option.Option(**{'handle': t, 'type': t})
o.validate()
return o
class SqlStore(unittest.TestCase):
@classmethod
def setUpClass(cls):
if os.path.exists(db):
os.remove(db)
cls.conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
st0 = option.CsvStore(kid='/base/')
st0.merge_file(c1)
st0.validate()
cls.desc = st0.desc
def test_get_typed_cols(self):
print(get_typed_cols(go('Integer')))
print(get_typed_cols(go('String')))
print(get_typed_cols(go('Point')))
print(get_typed_cols(go('Role')))
print(get_typed_cols(go('RoleIO')))
print(get_typed_cols(go('Log')))
print(get_typed_cols(go('Meta')))
def test_get_insert_cmd(self):
print(get_insert_cmd(go('Integer'), base_col_def))
print(get_insert_cmd(go('String'), base_col_def))
print(get_insert_cmd(go('Point'), base_col_def))
print(get_insert_cmd(go('Role'), base_col_def))
print(get_insert_cmd(go('RoleIO'), base_col_def))
print(get_insert_cmd(go('Log'), base_col_def))
print(get_insert_cmd(go('Meta'), base_col_def))
def test_column_definition(self):
s = option.SqlStore()
print(s.column_definition(go('Integer'))[1])
print(s.column_definition(go('String'))[1])
print(s.column_definition(go('Point'))[1])
print(s.column_definition(go('Role'))[1])
print(s.column_definition(go('RoleIO'))[1])
print(s.column_definition(go('Log'))[1])
print(s.column_definition(go('Meta'))[1])
def test_write_desc(self):
s = option.SqlStore()
s.cursor = self.conn.cursor()
s.write_desc(self.desc)
print('READING')
r = s.read_tree()
print(r)
print('print(tree\n', print_tree(r))
print('WRITING AGAIN')
s.write_tree(r)
print("READING AGAIN")
r = s.read_tree()
print(r)
print('print(tree2\n', print_tree(r))
# @unittest.skip('')
def test_tables(self):
st0 = option.CsvStore(kid='ciao')
st0.merge_file(c1)
st = option.SqlStore(kid='ciao')
st.desc = st0.desc
k0 = set(st.desc.keys())
cursor = self.conn.cursor()
st.write_table(cursor, 'conf1')
self.conn.commit()
cursor.execute('select handle from conf1')
r = cursor.fetchall()
k1 = set([eval(k[0]) for k in r])
self.assertEqual(k0, k1)
st2 = option.SqlStore(kid='ciao')
st2.read_table(cursor, 'conf1')
self.assertEqual(st.desc, st2.desc)
if __name__ == "__main__":
unittest.main()
| tainstr/misura.canon | misura/canon/option/tests/test_sqlstore.py | Python | mit | 3,011 |
from players.player import player
from auxiliar.aux_plot import *
import random
from collections import deque
import sys
sys.path.append('..')
import tensorblock as tb
import numpy as np
import tensorflow as tf
# PLAYER REINFORCE RNN
class player_reinforce_rnn_2(player):
# __INIT__
def __init__(self):
player.__init__(self)
self.experiences = deque()
# CHOOSE NEXT ACTION
def act(self, state):
return self.calculate(state)
# CALCULATE NETWORK
def calculate(self, state):
size = len( self.experiences )
if size < self.NUM_FRAMES:
return self.create_random_action()
states = np.zeros( (self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
for i , j in enumerate( range( size - self.NUM_FRAMES , size ) ):
states[i] = self.experiences[j][1]
states = np.expand_dims( states, 0 )
output = np.squeeze( self.brain.run('Output', [['Observation', states]]) )
action = np.random.choice( np.arange(len(output)), p=output )
return self.create_action(action)
# PREPARE NETWORK
def operations(self):
# Action Placeholders
self.brain.addInput( shape = [ None , self.num_actions ] , name = 'Actions' )
self.brain.addInput( shape = [ None ] , name = 'Target' )
# Operations
self.brain.addOperation( function = tb.ops.pgcost,
input = [ 'Output', 'Actions', 'Target' ],
name = 'Cost' )
# Optimizer
self.brain.addOperation( function = tb.optims.adam,
input = 'Cost',
learning_rate = self.LEARNING_RATE,
name = 'Optimizer' )
# TensorBoard
self.brain.addSummaryScalar( input = 'Cost' )
self.brain.addSummaryHistogram( input = 'Target' )
self.brain.addWriter( name = 'Writer' , dir = './' )
self.brain.addSummary( name = 'Summary' )
self.brain.initialize()
# TRAIN NETWORK
def train(self, prev_state, curr_state, actn, rewd, done, episode):
# Store New Experience Until Done
self.experiences.append((prev_state, curr_state, actn, rewd, done))
batchsize = len( self.experiences ) - self.NUM_FRAMES + 1
# Check for Train
if done:
# Select Batch
batch = self.experiences
# Separate Batch Data
prev_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
curr_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
actions = np.zeros( ( batchsize , self.num_actions ) )
rewards = np.zeros( ( batchsize ) )
dones = np.zeros( ( batchsize ) )
# Select Batches
for i in range( 0 , batchsize ):
for j in range( 0 , self.NUM_FRAMES ):
prev_states[i,j,:,:] = self.experiences[ i + j ][0]
curr_states[i,j,:,:] = self.experiences[ i + j ][1]
actions[i] = self.experiences[ i + self.NUM_FRAMES - 1][2]
rewards[i] = self.experiences[ i + self.NUM_FRAMES - 1][3]
dones[i] = self.experiences[ i + self.NUM_FRAMES - 1][4]
# Calculate Discounted Reward
running_add = 0
discounted_r = np.zeros_like(rewards)
for t in reversed(range(0, len(rewards))):
if rewards[t] != 0: # pygame_catch specific
running_add = 0
running_add = running_add * self.REWARD_DISCOUNT + rewards[t]
discounted_r[t] = running_add
# Optimize Neural Network
_, summary = self.brain.run( ['Optimizer','Summary'], [ ['Observation', prev_states ],
['Actions', actions ],
['Target', discounted_r ] ] )
# TensorBoard
self.brain.write( summary = summary, iter = episode )
# Reset Batch
self.experiences = deque()
| NiloFreitas/Deep-Reinforcement-Learning | reinforcement/players/player_reinforce_rnn_2.py | Python | mit | 4,361 |
from SBaaS_base.postgresql_orm_base import *
class data_stage01_rnasequencing_analysis(Base):
__tablename__ = 'data_stage01_rnasequencing_analysis'
id = Column(Integer, Sequence('data_stage01_rnasequencing_analysis_id_seq'), primary_key=True)
analysis_id = Column(String(500))
experiment_id = Column(String(50))
sample_name_abbreviation = Column(String(500)) # equivalent to sample_name_abbreviation
sample_name = Column(String(500)) # equivalent to sample_name_abbreviation
time_point = Column(String(10)) # converted to intermediate in lineage analysis
analysis_type = Column(String(100)); # time-course (i.e., multiple time points), paired (i.e., control compared to multiple replicates), group (i.e., single grouping of samples).
used_ = Column(Boolean);
comment_ = Column(Text);
__table_args__ = (
UniqueConstraint('experiment_id','sample_name_abbreviation','sample_name','time_point','analysis_type','analysis_id'),
)
def __init__(self,
row_dict_I,
):
self.analysis_id=row_dict_I['analysis_id'];
self.experiment_id=row_dict_I['experiment_id'];
self.sample_name_abbreviation=row_dict_I['sample_name_abbreviation'];
self.sample_name=row_dict_I['sample_name'];
self.time_point=row_dict_I['time_point'];
self.analysis_type=row_dict_I['analysis_type'];
self.used_=row_dict_I['used_'];
self.comment_=row_dict_I['comment_'];
def __set__row__(self,analysis_id_I,
experiment_id_I,
sample_name_abbreviation_I,
sample_name_I,
time_point_I,
analysis_type_I,
used__I,
comment__I):
self.analysis_id=analysis_id_I
self.experiment_id=experiment_id_I
self.sample_name_abbreviation=sample_name_abbreviation_I
self.sample_name=sample_name_I
self.time_point=time_point_I
self.analysis_type=analysis_type_I
self.used_=used__I
self.comment_=comment__I
def __repr__dict__(self):
return {'id':self.id,
'analysis_id':self.analysis_id,
'experiment_id':self.experiment_id,
'sample_name_abbreviation':self.sample_name_abbreviation,
'sample_name':self.sample_name,
'time_point':self.time_point,
'analysis_type':self.analysis_type,
'used_':self.used_,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__()) | dmccloskey/SBaaS_rnasequencing | SBaaS_rnasequencing/stage01_rnasequencing_analysis_postgresql_models.py | Python | mit | 2,579 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
from __future__ import absolute_import
from math import acos, cos, pi, radians, sin, sqrt
import auttitude as at
import numpy as np
def normalized_cross(a, b):
"""
Returns the normalized cross product between vectors.
Uses numpy.cross().
Parameters:
a: First vector.
b: Second vector.
"""
c = np.cross(a, b)
length = sqrt(c.dot(c))
return c/length if length > 0 else c
def general_plane_intersection(n_a, da, n_b, db):
"""
Returns a point and direction vector for the line of intersection
of two planes in space, or None if planes are parallel.
Parameters:
n_a: Normal vector to plane A
da: Point of plane A
n_b: Normal vector to plane B
db: Point of plane B
"""
# https://en.wikipedia.org/wiki/Intersection_curve
n_a = np.array(n_a)
n_b = np.array(n_b)
da = np.array(da)
db = np.array(db)
l_v = np.cross(n_a, n_b)
norm_l = sqrt(np.dot(l_v, l_v))
if norm_l == 0:
return None
else:
l_v /= norm_l
aa = np.dot(n_a, n_a)
bb = np.dot(n_b, n_b)
ab = np.dot(n_a, n_b)
d_ = 1./(aa*bb - ab*ab)
l_0 = (da*bb - db*ab)*d_*n_a + (db*aa - da*ab)*d_*n_b
return l_v, l_0
def small_circle_intersection(axis_a, angle_a, axis_b, angle_b):
"""
Finds the intersection between two small-circles returning zero, one or two
solutions as tuple.
Parameters:
axis_a: Vector defining first circle axis
angle_a: Small circle aperture angle (in radians) around axis_a
axis_b: Vector defining second circle axis
angle_b: Small circle aperture angle (in radians) around axis_b
"""
line = general_plane_intersection(axis_a, cos(angle_a),
axis_b, cos(angle_b))
if line is None:
return ()
l_v, l_0 = line
# https://en.wikipedia.org/wiki/Line%E2%80%93sphere_intersection
b = 2*l_v.dot(l_0)
delta = b*b - 4*(l_0.dot(l_0) - 1)
# Should the answers be normalized?
if delta < 0:
return ()
elif delta == 0:
return -b/2.,
else:
sqrt_delta = sqrt(delta)
return l_0 + l_v*(-b - sqrt_delta)/2., l_0 + l_v*(-b + sqrt_delta)/2.
def build_rotation_matrix(azim, plng, rake):
"""
Returns the rotation matrix that rotates the North vector to the line given
by Azimuth and Plunge and East and Up vectors are rotate clock-wise by Rake
around the rotated North vector.
Parameters:
azim: Line Azimuth from North (degrees).
plng: Line Plunge measured from horizontal (degrees).
rake: Rotation angle around rotated axis (degrees).
"""
# pylint: disable=bad-whitespace
azim, plng, rake = radians(azim), radians(plng), radians(rake)
R1 = np.array((( cos(rake), 0., sin(rake)),
( 0., 1., 0. ),
(-sin(rake), 0., cos(rake))))
R2 = np.array((( 1., 0., 0. ),
( 0., cos(plng), sin(plng)),
( 0., -sin(plng), cos(plng))))
R3 = np.array((( cos(azim), sin(azim), 0. ),
(-sin(azim), cos(azim), 0. ),
( 0., 0., 1. )))
return R3.dot(R2).dot(R1)
def adjust_lines_to_planes(lines, planes):
"""
Project each given line to it's respective plane. Returns the projected
lines as a new LineSet and the angle (in radians) between each line and
plane prior to projection.
Parameters:
lines: A LineSet like object with an array of n Lines
planes: A PlaseSet like object with an array of n Planes
"""
lines = at.LineSet(lines)
planes = at.PlaneSet(planes)
angles = np.zeros(len(lines))
adjusted_lines = np.zeros_like(lines)
for i, (line, plane) in enumerate(zip(lines, planes)):
cos_theta = np.dot(line, plane)
angles[i] = pi/2. - acos(cos_theta)
adjusted_line = line - line*cos_theta
adjusted_lines[i] = adjusted_line/sqrt(np.dot(adjusted_line,
adjusted_line))
return adjusted_lines, angles
| endarthur/autti | auttitude/math.py | Python | mit | 4,348 |
from django.contrib import admin
# Register your models here.
from rcps.models import *
class IngredientToRecipeInline(admin.TabularInline):
model = Ingredient.recipes.through
verbose_name = 'Ингредиент'
verbose_name_plural = 'Ингредиенты'
class EquipmentInline(admin.TabularInline):
model = Equipment.equipment_recipes.through
verbose_name = 'Инструмент'
verbose_name_plural = 'Инструменты'
class TagInline(admin.TabularInline):
model = Tag.tag_recipes.through
verbose_name = 'Тег'
verbose_name_plural = 'Теги'
class RecipeAdmin(admin.ModelAdmin):
model = Recipe
fields = ['recipe_name', 'recipe_link']
inlines = (
IngredientToRecipeInline,
EquipmentInline,
TagInline,
)
class IngredientComponentInAlternativeInline(admin.TabularInline):
model = IngredientAlternative.ingredients.through
verbose_name = 'Ингредиент'
verbose_name_plural = 'Ингредиенты'
class IngredientAlternativeAdmin(admin.ModelAdmin):
model = IngredientAlternative
inlines = (
IngredientComponentInAlternativeInline,
)
admin.site.register(Recipe, RecipeAdmin)
admin.site.register(Ingredient)
admin.site.register(IngredientAlternative, IngredientAlternativeAdmin)
admin.site.register(IngredientCategory)
admin.site.register(Equipment)
admin.site.register(EquipmentCategory)
admin.site.register(IngredientReplacement)
admin.site.register(Tag) | ADKosm/Recipes | Recipes/rcps/admin.py | Python | mit | 1,503 |
import os
class Config(object):
DEBUG = False
TESTING = False
CSRF_ENABLED = True
SECRET_KEY = "super_secret_key"
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
class ProductionConfig(Config):
DEBUG = False
SECRET_KEY = os.environ['SECRET_KEY']
class DevelopmentConfig(Config):
DEVELOPMENT = True
DEBUG = True
class TestingConfig(Config):
TESTING = True
| jiangtyd/crewviewer | project/config.py | Python | mit | 404 |
r"""
Create MapServer class diagrams
Requires https://graphviz.gitlab.io/_pages/Download/Download_windows.html
https://stackoverflow.com/questions/1494492/graphviz-how-to-go-from-dot-to-a-graph
For DOT languge see http://www.graphviz.org/doc/info/attrs.html
cd C:\Program Files (x86)\Graphviz2.38\bin
dot -Tpng D:\GitHub\mappyfile\mapfile_classes.dot -o outfile.png
outfile.png
For Entity Relationship diagrams:
https://graphviz.readthedocs.io/en/stable/examples.html#er-py
"""
import os
import pydot
# import pprint
FONT = "Lucida Sans"
def graphviz_setup(gviz_path):
os.environ['PATH'] = gviz_path + ";" + os.environ['PATH']
def add_child(graph, child_id, child_label, parent_id, colour):
"""
http://www.graphviz.org/doc/info/shapes.html#polygon
"""
node = pydot.Node(child_id, style="filled", fillcolor=colour, label=child_label, shape="polygon", fontname=FONT)
graph.add_node(node)
graph.add_edge(pydot.Edge(parent_id, node))
def add_children(graph, parent_id, d, level=0):
blue = "#6b6bd1"
white = "#fdfefd"
green = "#33a333"
colours = [blue, white, green] * 3
for class_, children in d.items():
colour = colours[level]
child_label = class_
child_id = parent_id + "_" + class_
add_child(graph, child_id, child_label, parent_id, colour)
add_children(graph, child_id, children, level+1)
def save_file(graph, fn):
filename = "%s.png" % fn
graph.write_png(filename)
graph.write("%s.dot" % fn)
os.startfile(filename)
def main(gviz_path, layer_only=False):
graphviz_setup(gviz_path)
graph = pydot.Dot(graph_type='digraph', rankdir="TB")
layer_children = {
'CLASS': {
'LABEL': {'STYLE': {}},
'CONNECTIONOPTIONS': {},
'LEADER': {'STYLE': {}},
'STYLE': {},
'VALIDATION': {}
},
'CLUSTER': {},
'COMPOSITE': {},
'FEATURE': {'POINTS': {}},
'GRID': {},
'JOIN': {},
'METADATA': {},
'PROJECTION': {},
'SCALETOKEN': {'VALUES': {}},
'VALIDATION': {}
}
# pprint.pprint(layer_children)
classes = {
"MAP": {
"LAYER": layer_children,
'LEGEND': {'LABEL': {}},
'PROJECTION': {},
'QUERYMAP': {},
'REFERENCE': {},
'SCALEBAR': {'LABEL': {}},
'SYMBOL': {},
'WEB': {'METADATA': {}, 'VALIDATION': {}}
}
}
if layer_only:
root = "LAYER"
classes = classes["MAP"]
fn = "layer_classes"
else:
fn = "map_classes"
root, = classes.keys()
node = pydot.Node(root, style="filled", fillcolor="#33a333", label=root, fontname=FONT, shape="polygon")
graph.add_node(node)
add_children(graph, root, classes[root])
save_file(graph, fn)
if __name__ == "__main__":
gviz_path = r"C:\Program Files (x86)\Graphviz2.38\bin"
main(gviz_path, True)
main(gviz_path, False)
print("Done!")
| geographika/mappyfile | docs/scripts/class_diagrams.py | Python | mit | 3,102 |
# Author: John Elkins <[email protected]>
# License: MIT <LICENSE>
from common import *
if len(sys.argv) < 2:
log('ERROR output directory is required')
time.sleep(3)
exit()
# setup the output directory, create it if needed
output_dir = sys.argv[1]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# log in and load personal library
api = open_api()
library = load_personal_library()
def playlist_handler(playlist_name, playlist_description, playlist_tracks):
# skip empty and no-name playlists
if not playlist_name: return
if len(playlist_tracks) == 0: return
# setup output files
playlist_name = playlist_name.replace('/', '')
open_log(os.path.join(output_dir,playlist_name+u'.log'))
outfile = codecs.open(os.path.join(output_dir,playlist_name+u'.csv'),
encoding='utf-8',mode='w')
# keep track of stats
stats = create_stats()
export_skipped = 0
# keep track of songids incase we need to skip duplicates
song_ids = []
log('')
log('============================================================')
log(u'Exporting '+ unicode(len(playlist_tracks)) +u' tracks from '
+playlist_name)
log('============================================================')
# add the playlist description as a "comment"
if playlist_description:
outfile.write(tsep)
outfile.write(playlist_description)
outfile.write(os.linesep)
for tnum, pl_track in enumerate(playlist_tracks):
track = pl_track.get('track')
# we need to look up these track in the library
if not track:
library_track = [
item for item in library if item.get('id')
in pl_track.get('trackId')]
if len(library_track) == 0:
log(u'!! '+str(tnum+1)+repr(pl_track))
export_skipped += 1
continue
track = library_track[0]
result_details = create_result_details(track)
if not allow_duplicates and result_details['songid'] in song_ids:
log('{D} '+str(tnum+1)+'. '+create_details_string(result_details,True))
export_skipped += 1
continue
# update the stats
update_stats(track,stats)
# export the track
song_ids.append(result_details['songid'])
outfile.write(create_details_string(result_details))
outfile.write(os.linesep)
# calculate the stats
stats_results = calculate_stats_results(stats,len(playlist_tracks))
# output the stats to the log
log('')
log_stats(stats_results)
log(u'export skipped: '+unicode(export_skipped))
# close the files
close_log()
outfile.close()
# the personal library is used so we can lookup tracks that fail to return
# info from the ...playlist_contents() call
playlist_contents = api.get_all_user_playlist_contents()
for playlist in playlist_contents:
playlist_name = playlist.get('name')
playlist_description = playlist.get('description')
playlist_tracks = playlist.get('tracks')
playlist_handler(playlist_name, playlist_description, playlist_tracks)
if export_thumbs_up:
# get thumbs up playlist
thumbs_up_tracks = []
for track in library:
if track.get('rating') is not None and int(track.get('rating')) > 1:
thumbs_up_tracks.append(track)
# modify format of each dictionary to match the data type
# of the other playlists
thumbs_up_tracks_formatted = []
for t in thumbs_up_tracks:
thumbs_up_tracks_formatted.append({'track': t})
playlist_handler('Thumbs up', 'Thumbs up tracks', thumbs_up_tracks_formatted)
if export_all:
all_tracks_formatted = []
for t in library:
all_tracks_formatted.append({'track': t})
playlist_handler('All', 'All tracks', all_tracks_formatted)
close_api()
| soulfx/gmusic-playlist | ExportLists.py | Python | mit | 3,890 |
import _plotly_utils.basevalidators
class ShowexponentValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="showexponent",
parent_name="scatterpolar.marker.colorbar",
**kwargs
):
super(ShowexponentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/scatterpolar/marker/colorbar/_showexponent.py | Python | mit | 554 |
__author__ = 'miko'
from Tkinter import Frame
class GameState(Frame):
def __init__(self, *args, **kwargs):
self.stateName = kwargs["stateName"]
self.root = args[0]
self.id = kwargs["id"]
Frame.__init__(self, self.root.mainWindow)
self.config(
background="gold"
)
self.place(relwidth=1, relheight=1)
| FSI-HochschuleTrier/hacker-jeopardy | de/hochschuletrier/jpy/states/GameState.py | Python | mit | 319 |
from csacompendium.csa_practice.models import PracticeLevel
from csacompendium.utils.pagination import APILimitOffsetPagination
from csacompendium.utils.permissions import IsOwnerOrReadOnly
from csacompendium.utils.viewsutils import DetailViewUpdateDelete, CreateAPIViewHook
from rest_framework.filters import DjangoFilterBackend
from rest_framework.generics import CreateAPIView, ListAPIView
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from .filters import PracticeLevelListFilter
from csacompendium.csa_practice.api.practicelevel.practicelevelserializers import practice_level_serializers
def practice_level_views():
"""
Practice level views
:return: All practice level views
:rtype: Object
"""
practice_level_serializer = practice_level_serializers()
class PracticeLevelCreateAPIView(CreateAPIViewHook):
"""
Creates a single record.
"""
queryset = PracticeLevel.objects.all()
serializer_class = practice_level_serializer['PracticeLevelDetailSerializer']
permission_classes = [IsAuthenticated]
class PracticeLevelListAPIView(ListAPIView):
"""
API list view. Gets all records API.
"""
queryset = PracticeLevel.objects.all()
serializer_class = practice_level_serializer['PracticeLevelListSerializer']
filter_backends = (DjangoFilterBackend,)
filter_class = PracticeLevelListFilter
pagination_class = APILimitOffsetPagination
class PracticeLevelDetailAPIView(DetailViewUpdateDelete):
"""
Updates a record.
"""
queryset = PracticeLevel.objects.all()
serializer_class = practice_level_serializer['PracticeLevelDetailSerializer']
permission_classes = [IsAuthenticated, IsAdminUser]
lookup_field = 'slug'
return {
'PracticeLevelListAPIView': PracticeLevelListAPIView,
'PracticeLevelDetailAPIView': PracticeLevelDetailAPIView,
'PracticeLevelCreateAPIView': PracticeLevelCreateAPIView
}
| nkoech/csacompendium | csacompendium/csa_practice/api/practicelevel/practicelevelviews.py | Python | mit | 2,046 |
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""The Query type hierarchy for DBCore.
"""
import re
from operator import mul
from beets import util
from datetime import datetime, timedelta
import unicodedata
from functools import reduce
class ParsingError(ValueError):
"""Abstract class for any unparseable user-requested album/query
specification.
"""
class InvalidQueryError(ParsingError):
"""Represent any kind of invalid query.
The query should be a unicode string or a list, which will be space-joined.
"""
def __init__(self, query, explanation):
if isinstance(query, list):
query = " ".join(query)
message = f"'{query}': {explanation}"
super().__init__(message)
class InvalidQueryArgumentValueError(ParsingError):
"""Represent a query argument that could not be converted as expected.
It exists to be caught in upper stack levels so a meaningful (i.e. with the
query) InvalidQueryError can be raised.
"""
def __init__(self, what, expected, detail=None):
message = f"'{what}' is not {expected}"
if detail:
message = f"{message}: {detail}"
super().__init__(message)
class Query:
"""An abstract class representing a query into the item database.
"""
def clause(self):
"""Generate an SQLite expression implementing the query.
Return (clause, subvals) where clause is a valid sqlite
WHERE clause implementing the query and subvals is a list of
items to be substituted for ?s in the clause.
"""
return None, ()
def match(self, item):
"""Check whether this query matches a given Item. Can be used to
perform queries on arbitrary sets of Items.
"""
raise NotImplementedError
def __repr__(self):
return f"{self.__class__.__name__}()"
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return 0
class FieldQuery(Query):
"""An abstract query that searches in a specific field for a
pattern. Subclasses must provide a `value_match` class method, which
determines whether a certain pattern string matches a certain value
string. Subclasses may also provide `col_clause` to implement the
same matching functionality in SQLite.
"""
def __init__(self, field, pattern, fast=True):
self.field = field
self.pattern = pattern
self.fast = fast
def col_clause(self):
return None, ()
def clause(self):
if self.fast:
return self.col_clause()
else:
# Matching a flexattr. This is a slow query.
return None, ()
@classmethod
def value_match(cls, pattern, value):
"""Determine whether the value matches the pattern. Both
arguments are strings.
"""
raise NotImplementedError()
def match(self, item):
return self.value_match(self.pattern, item.get(self.field))
def __repr__(self):
return ("{0.__class__.__name__}({0.field!r}, {0.pattern!r}, "
"{0.fast})".format(self))
def __eq__(self, other):
return super().__eq__(other) and \
self.field == other.field and self.pattern == other.pattern
def __hash__(self):
return hash((self.field, hash(self.pattern)))
class MatchQuery(FieldQuery):
"""A query that looks for exact matches in an item field."""
def col_clause(self):
return self.field + " = ?", [self.pattern]
@classmethod
def value_match(cls, pattern, value):
return pattern == value
class NoneQuery(FieldQuery):
"""A query that checks whether a field is null."""
def __init__(self, field, fast=True):
super().__init__(field, None, fast)
def col_clause(self):
return self.field + " IS NULL", ()
def match(self, item):
return item.get(self.field) is None
def __repr__(self):
return "{0.__class__.__name__}({0.field!r}, {0.fast})".format(self)
class StringFieldQuery(FieldQuery):
"""A FieldQuery that converts values to strings before matching
them.
"""
@classmethod
def value_match(cls, pattern, value):
"""Determine whether the value matches the pattern. The value
may have any type.
"""
return cls.string_match(pattern, util.as_string(value))
@classmethod
def string_match(cls, pattern, value):
"""Determine whether the value matches the pattern. Both
arguments are strings. Subclasses implement this method.
"""
raise NotImplementedError()
class StringQuery(StringFieldQuery):
"""A query that matches a whole string in a specific item field."""
def col_clause(self):
search = (self.pattern
.replace('\\', '\\\\')
.replace('%', '\\%')
.replace('_', '\\_'))
clause = self.field + " like ? escape '\\'"
subvals = [search]
return clause, subvals
@classmethod
def string_match(cls, pattern, value):
return pattern.lower() == value.lower()
class SubstringQuery(StringFieldQuery):
"""A query that matches a substring in a specific item field."""
def col_clause(self):
pattern = (self.pattern
.replace('\\', '\\\\')
.replace('%', '\\%')
.replace('_', '\\_'))
search = '%' + pattern + '%'
clause = self.field + " like ? escape '\\'"
subvals = [search]
return clause, subvals
@classmethod
def string_match(cls, pattern, value):
return pattern.lower() in value.lower()
class RegexpQuery(StringFieldQuery):
"""A query that matches a regular expression in a specific item
field.
Raises InvalidQueryError when the pattern is not a valid regular
expression.
"""
def __init__(self, field, pattern, fast=True):
super().__init__(field, pattern, fast)
pattern = self._normalize(pattern)
try:
self.pattern = re.compile(self.pattern)
except re.error as exc:
# Invalid regular expression.
raise InvalidQueryArgumentValueError(pattern,
"a regular expression",
format(exc))
@staticmethod
def _normalize(s):
"""Normalize a Unicode string's representation (used on both
patterns and matched values).
"""
return unicodedata.normalize('NFC', s)
@classmethod
def string_match(cls, pattern, value):
return pattern.search(cls._normalize(value)) is not None
class BooleanQuery(MatchQuery):
"""Matches a boolean field. Pattern should either be a boolean or a
string reflecting a boolean.
"""
def __init__(self, field, pattern, fast=True):
super().__init__(field, pattern, fast)
if isinstance(pattern, str):
self.pattern = util.str2bool(pattern)
self.pattern = int(self.pattern)
class BytesQuery(MatchQuery):
"""Match a raw bytes field (i.e., a path). This is a necessary hack
to work around the `sqlite3` module's desire to treat `bytes` and
`unicode` equivalently in Python 2. Always use this query instead of
`MatchQuery` when matching on BLOB values.
"""
def __init__(self, field, pattern):
super().__init__(field, pattern)
# Use a buffer/memoryview representation of the pattern for SQLite
# matching. This instructs SQLite to treat the blob as binary
# rather than encoded Unicode.
if isinstance(self.pattern, (str, bytes)):
if isinstance(self.pattern, str):
self.pattern = self.pattern.encode('utf-8')
self.buf_pattern = memoryview(self.pattern)
elif isinstance(self.pattern, memoryview):
self.buf_pattern = self.pattern
self.pattern = bytes(self.pattern)
def col_clause(self):
return self.field + " = ?", [self.buf_pattern]
class NumericQuery(FieldQuery):
"""Matches numeric fields. A syntax using Ruby-style range ellipses
(``..``) lets users specify one- or two-sided ranges. For example,
``year:2001..`` finds music released since the turn of the century.
Raises InvalidQueryError when the pattern does not represent an int or
a float.
"""
def _convert(self, s):
"""Convert a string to a numeric type (float or int).
Return None if `s` is empty.
Raise an InvalidQueryError if the string cannot be converted.
"""
# This is really just a bit of fun premature optimization.
if not s:
return None
try:
return int(s)
except ValueError:
try:
return float(s)
except ValueError:
raise InvalidQueryArgumentValueError(s, "an int or a float")
def __init__(self, field, pattern, fast=True):
super().__init__(field, pattern, fast)
parts = pattern.split('..', 1)
if len(parts) == 1:
# No range.
self.point = self._convert(parts[0])
self.rangemin = None
self.rangemax = None
else:
# One- or two-sided range.
self.point = None
self.rangemin = self._convert(parts[0])
self.rangemax = self._convert(parts[1])
def match(self, item):
if self.field not in item:
return False
value = item[self.field]
if isinstance(value, str):
value = self._convert(value)
if self.point is not None:
return value == self.point
else:
if self.rangemin is not None and value < self.rangemin:
return False
if self.rangemax is not None and value > self.rangemax:
return False
return True
def col_clause(self):
if self.point is not None:
return self.field + '=?', (self.point,)
else:
if self.rangemin is not None and self.rangemax is not None:
return ('{0} >= ? AND {0} <= ?'.format(self.field),
(self.rangemin, self.rangemax))
elif self.rangemin is not None:
return f'{self.field} >= ?', (self.rangemin,)
elif self.rangemax is not None:
return f'{self.field} <= ?', (self.rangemax,)
else:
return '1', ()
class CollectionQuery(Query):
"""An abstract query class that aggregates other queries. Can be
indexed like a list to access the sub-queries.
"""
def __init__(self, subqueries=()):
self.subqueries = subqueries
# Act like a sequence.
def __len__(self):
return len(self.subqueries)
def __getitem__(self, key):
return self.subqueries[key]
def __iter__(self):
return iter(self.subqueries)
def __contains__(self, item):
return item in self.subqueries
def clause_with_joiner(self, joiner):
"""Return a clause created by joining together the clauses of
all subqueries with the string joiner (padded by spaces).
"""
clause_parts = []
subvals = []
for subq in self.subqueries:
subq_clause, subq_subvals = subq.clause()
if not subq_clause:
# Fall back to slow query.
return None, ()
clause_parts.append('(' + subq_clause + ')')
subvals += subq_subvals
clause = (' ' + joiner + ' ').join(clause_parts)
return clause, subvals
def __repr__(self):
return "{0.__class__.__name__}({0.subqueries!r})".format(self)
def __eq__(self, other):
return super().__eq__(other) and \
self.subqueries == other.subqueries
def __hash__(self):
"""Since subqueries are mutable, this object should not be hashable.
However and for conveniences purposes, it can be hashed.
"""
return reduce(mul, map(hash, self.subqueries), 1)
class AnyFieldQuery(CollectionQuery):
"""A query that matches if a given FieldQuery subclass matches in
any field. The individual field query class is provided to the
constructor.
"""
def __init__(self, pattern, fields, cls):
self.pattern = pattern
self.fields = fields
self.query_class = cls
subqueries = []
for field in self.fields:
subqueries.append(cls(field, pattern, True))
super().__init__(subqueries)
def clause(self):
return self.clause_with_joiner('or')
def match(self, item):
for subq in self.subqueries:
if subq.match(item):
return True
return False
def __repr__(self):
return ("{0.__class__.__name__}({0.pattern!r}, {0.fields!r}, "
"{0.query_class.__name__})".format(self))
def __eq__(self, other):
return super().__eq__(other) and \
self.query_class == other.query_class
def __hash__(self):
return hash((self.pattern, tuple(self.fields), self.query_class))
class MutableCollectionQuery(CollectionQuery):
"""A collection query whose subqueries may be modified after the
query is initialized.
"""
def __setitem__(self, key, value):
self.subqueries[key] = value
def __delitem__(self, key):
del self.subqueries[key]
class AndQuery(MutableCollectionQuery):
"""A conjunction of a list of other queries."""
def clause(self):
return self.clause_with_joiner('and')
def match(self, item):
return all(q.match(item) for q in self.subqueries)
class OrQuery(MutableCollectionQuery):
"""A conjunction of a list of other queries."""
def clause(self):
return self.clause_with_joiner('or')
def match(self, item):
return any(q.match(item) for q in self.subqueries)
class NotQuery(Query):
"""A query that matches the negation of its `subquery`, as a shorcut for
performing `not(subquery)` without using regular expressions.
"""
def __init__(self, subquery):
self.subquery = subquery
def clause(self):
clause, subvals = self.subquery.clause()
if clause:
return f'not ({clause})', subvals
else:
# If there is no clause, there is nothing to negate. All the logic
# is handled by match() for slow queries.
return clause, subvals
def match(self, item):
return not self.subquery.match(item)
def __repr__(self):
return "{0.__class__.__name__}({0.subquery!r})".format(self)
def __eq__(self, other):
return super().__eq__(other) and \
self.subquery == other.subquery
def __hash__(self):
return hash(('not', hash(self.subquery)))
class TrueQuery(Query):
"""A query that always matches."""
def clause(self):
return '1', ()
def match(self, item):
return True
class FalseQuery(Query):
"""A query that never matches."""
def clause(self):
return '0', ()
def match(self, item):
return False
# Time/date queries.
def _to_epoch_time(date):
"""Convert a `datetime` object to an integer number of seconds since
the (local) Unix epoch.
"""
if hasattr(date, 'timestamp'):
# The `timestamp` method exists on Python 3.3+.
return int(date.timestamp())
else:
epoch = datetime.fromtimestamp(0)
delta = date - epoch
return int(delta.total_seconds())
def _parse_periods(pattern):
"""Parse a string containing two dates separated by two dots (..).
Return a pair of `Period` objects.
"""
parts = pattern.split('..', 1)
if len(parts) == 1:
instant = Period.parse(parts[0])
return (instant, instant)
else:
start = Period.parse(parts[0])
end = Period.parse(parts[1])
return (start, end)
class Period:
"""A period of time given by a date, time and precision.
Example: 2014-01-01 10:50:30 with precision 'month' represents all
instants of time during January 2014.
"""
precisions = ('year', 'month', 'day', 'hour', 'minute', 'second')
date_formats = (
('%Y',), # year
('%Y-%m',), # month
('%Y-%m-%d',), # day
('%Y-%m-%dT%H', '%Y-%m-%d %H'), # hour
('%Y-%m-%dT%H:%M', '%Y-%m-%d %H:%M'), # minute
('%Y-%m-%dT%H:%M:%S', '%Y-%m-%d %H:%M:%S') # second
)
relative_units = {'y': 365, 'm': 30, 'w': 7, 'd': 1}
relative_re = '(?P<sign>[+|-]?)(?P<quantity>[0-9]+)' + \
'(?P<timespan>[y|m|w|d])'
def __init__(self, date, precision):
"""Create a period with the given date (a `datetime` object) and
precision (a string, one of "year", "month", "day", "hour", "minute",
or "second").
"""
if precision not in Period.precisions:
raise ValueError(f'Invalid precision {precision}')
self.date = date
self.precision = precision
@classmethod
def parse(cls, string):
"""Parse a date and return a `Period` object or `None` if the
string is empty, or raise an InvalidQueryArgumentValueError if
the string cannot be parsed to a date.
The date may be absolute or relative. Absolute dates look like
`YYYY`, or `YYYY-MM-DD`, or `YYYY-MM-DD HH:MM:SS`, etc. Relative
dates have three parts:
- Optionally, a ``+`` or ``-`` sign indicating the future or the
past. The default is the future.
- A number: how much to add or subtract.
- A letter indicating the unit: days, weeks, months or years
(``d``, ``w``, ``m`` or ``y``). A "month" is exactly 30 days
and a "year" is exactly 365 days.
"""
def find_date_and_format(string):
for ord, format in enumerate(cls.date_formats):
for format_option in format:
try:
date = datetime.strptime(string, format_option)
return date, ord
except ValueError:
# Parsing failed.
pass
return (None, None)
if not string:
return None
# Check for a relative date.
match_dq = re.match(cls.relative_re, string)
if match_dq:
sign = match_dq.group('sign')
quantity = match_dq.group('quantity')
timespan = match_dq.group('timespan')
# Add or subtract the given amount of time from the current
# date.
multiplier = -1 if sign == '-' else 1
days = cls.relative_units[timespan]
date = datetime.now() + \
timedelta(days=int(quantity) * days) * multiplier
return cls(date, cls.precisions[5])
# Check for an absolute date.
date, ordinal = find_date_and_format(string)
if date is None:
raise InvalidQueryArgumentValueError(string,
'a valid date/time string')
precision = cls.precisions[ordinal]
return cls(date, precision)
def open_right_endpoint(self):
"""Based on the precision, convert the period to a precise
`datetime` for use as a right endpoint in a right-open interval.
"""
precision = self.precision
date = self.date
if 'year' == self.precision:
return date.replace(year=date.year + 1, month=1)
elif 'month' == precision:
if (date.month < 12):
return date.replace(month=date.month + 1)
else:
return date.replace(year=date.year + 1, month=1)
elif 'day' == precision:
return date + timedelta(days=1)
elif 'hour' == precision:
return date + timedelta(hours=1)
elif 'minute' == precision:
return date + timedelta(minutes=1)
elif 'second' == precision:
return date + timedelta(seconds=1)
else:
raise ValueError(f'unhandled precision {precision}')
class DateInterval:
"""A closed-open interval of dates.
A left endpoint of None means since the beginning of time.
A right endpoint of None means towards infinity.
"""
def __init__(self, start, end):
if start is not None and end is not None and not start < end:
raise ValueError("start date {} is not before end date {}"
.format(start, end))
self.start = start
self.end = end
@classmethod
def from_periods(cls, start, end):
"""Create an interval with two Periods as the endpoints.
"""
end_date = end.open_right_endpoint() if end is not None else None
start_date = start.date if start is not None else None
return cls(start_date, end_date)
def contains(self, date):
if self.start is not None and date < self.start:
return False
if self.end is not None and date >= self.end:
return False
return True
def __str__(self):
return f'[{self.start}, {self.end})'
class DateQuery(FieldQuery):
"""Matches date fields stored as seconds since Unix epoch time.
Dates can be specified as ``year-month-day`` strings where only year
is mandatory.
The value of a date field can be matched against a date interval by
using an ellipsis interval syntax similar to that of NumericQuery.
"""
def __init__(self, field, pattern, fast=True):
super().__init__(field, pattern, fast)
start, end = _parse_periods(pattern)
self.interval = DateInterval.from_periods(start, end)
def match(self, item):
if self.field not in item:
return False
timestamp = float(item[self.field])
date = datetime.fromtimestamp(timestamp)
return self.interval.contains(date)
_clause_tmpl = "{0} {1} ?"
def col_clause(self):
clause_parts = []
subvals = []
if self.interval.start:
clause_parts.append(self._clause_tmpl.format(self.field, ">="))
subvals.append(_to_epoch_time(self.interval.start))
if self.interval.end:
clause_parts.append(self._clause_tmpl.format(self.field, "<"))
subvals.append(_to_epoch_time(self.interval.end))
if clause_parts:
# One- or two-sided interval.
clause = ' AND '.join(clause_parts)
else:
# Match any date.
clause = '1'
return clause, subvals
class DurationQuery(NumericQuery):
"""NumericQuery that allow human-friendly (M:SS) time interval formats.
Converts the range(s) to a float value, and delegates on NumericQuery.
Raises InvalidQueryError when the pattern does not represent an int, float
or M:SS time interval.
"""
def _convert(self, s):
"""Convert a M:SS or numeric string to a float.
Return None if `s` is empty.
Raise an InvalidQueryError if the string cannot be converted.
"""
if not s:
return None
try:
return util.raw_seconds_short(s)
except ValueError:
try:
return float(s)
except ValueError:
raise InvalidQueryArgumentValueError(
s,
"a M:SS string or a float")
# Sorting.
class Sort:
"""An abstract class representing a sort operation for a query into
the item database.
"""
def order_clause(self):
"""Generates a SQL fragment to be used in a ORDER BY clause, or
None if no fragment is used (i.e., this is a slow sort).
"""
return None
def sort(self, items):
"""Sort the list of objects and return a list.
"""
return sorted(items)
def is_slow(self):
"""Indicate whether this query is *slow*, meaning that it cannot
be executed in SQL and must be executed in Python.
"""
return False
def __hash__(self):
return 0
def __eq__(self, other):
return type(self) == type(other)
class MultipleSort(Sort):
"""Sort that encapsulates multiple sub-sorts.
"""
def __init__(self, sorts=None):
self.sorts = sorts or []
def add_sort(self, sort):
self.sorts.append(sort)
def _sql_sorts(self):
"""Return the list of sub-sorts for which we can be (at least
partially) fast.
A contiguous suffix of fast (SQL-capable) sub-sorts are
executable in SQL. The remaining, even if they are fast
independently, must be executed slowly.
"""
sql_sorts = []
for sort in reversed(self.sorts):
if not sort.order_clause() is None:
sql_sorts.append(sort)
else:
break
sql_sorts.reverse()
return sql_sorts
def order_clause(self):
order_strings = []
for sort in self._sql_sorts():
order = sort.order_clause()
order_strings.append(order)
return ", ".join(order_strings)
def is_slow(self):
for sort in self.sorts:
if sort.is_slow():
return True
return False
def sort(self, items):
slow_sorts = []
switch_slow = False
for sort in reversed(self.sorts):
if switch_slow:
slow_sorts.append(sort)
elif sort.order_clause() is None:
switch_slow = True
slow_sorts.append(sort)
else:
pass
for sort in slow_sorts:
items = sort.sort(items)
return items
def __repr__(self):
return f'MultipleSort({self.sorts!r})'
def __hash__(self):
return hash(tuple(self.sorts))
def __eq__(self, other):
return super().__eq__(other) and \
self.sorts == other.sorts
class FieldSort(Sort):
"""An abstract sort criterion that orders by a specific field (of
any kind).
"""
def __init__(self, field, ascending=True, case_insensitive=True):
self.field = field
self.ascending = ascending
self.case_insensitive = case_insensitive
def sort(self, objs):
# TODO: Conversion and null-detection here. In Python 3,
# comparisons with None fail. We should also support flexible
# attributes with different types without falling over.
def key(item):
field_val = item.get(self.field, '')
if self.case_insensitive and isinstance(field_val, str):
field_val = field_val.lower()
return field_val
return sorted(objs, key=key, reverse=not self.ascending)
def __repr__(self):
return '<{}: {}{}>'.format(
type(self).__name__,
self.field,
'+' if self.ascending else '-',
)
def __hash__(self):
return hash((self.field, self.ascending))
def __eq__(self, other):
return super().__eq__(other) and \
self.field == other.field and \
self.ascending == other.ascending
class FixedFieldSort(FieldSort):
"""Sort object to sort on a fixed field.
"""
def order_clause(self):
order = "ASC" if self.ascending else "DESC"
if self.case_insensitive:
field = '(CASE ' \
'WHEN TYPEOF({0})="text" THEN LOWER({0}) ' \
'WHEN TYPEOF({0})="blob" THEN LOWER({0}) ' \
'ELSE {0} END)'.format(self.field)
else:
field = self.field
return f"{field} {order}"
class SlowFieldSort(FieldSort):
"""A sort criterion by some model field other than a fixed field:
i.e., a computed or flexible field.
"""
def is_slow(self):
return True
class NullSort(Sort):
"""No sorting. Leave results unsorted."""
def sort(self, items):
return items
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return False
def __eq__(self, other):
return type(self) == type(other) or other is None
def __hash__(self):
return 0
| beetbox/beets | beets/dbcore/query.py | Python | mit | 29,107 |
#!/usr/bin/env python
import subprocess
import praw
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit Me!
# Each day after you post a signup post, copy its 6-character ID to this array.
signupPageSubmissionIds = [ '7zrrj1', '7zxkpq', '8055hn', '80ddrf', '80nbm1', '80waq3' ]
flaskport = 8993
app = Flask(__name__)
app.debug = True
commentHashesAndComments = {}
def loginAndReturnRedditSession():
config = ConfigParser()
config.read("../reddit-password-credentials.cfg")
user = config.get("Reddit", "user")
password = config.get("Reddit", "password")
# TODO: password auth is going away, and we will soon need to do oauth.
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
redditSession.login(user, password, disable_warning=True)
# submissions = redditSession.get_subreddit('pornfree').get_hot(limit=5)
# print [str(x) for x in submissions]
return redditSession
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
# New version of praw does not require explicit use of the OAuth2Util object. Presumably because reddit now REQUIRES oauth.
# o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
# TODO: Testing comment of refresh. We authenticate fresh every time, so presumably no need to do o.refresh().
# o.refresh(force=True)
return redditSession
def getSubmissionsForRedditSession(redditSession):
# submissions = [redditSession.get_submission(submission_id=submissionId) for submissionId in signupPageSubmissionIds]
submissions = [redditSession.submission(id=submissionId) for submissionId in signupPageSubmissionIds]
for submission in submissions:
submission.comments.replace_more(limit=None)
# submission.replace_more_comments(limit=None, threshold=0)
return submissions
def getCommentsForSubmissions(submissions):
comments = []
for submission in submissions:
commentForest = submission.comments
comments += [comment for comment in commentForest.list() if comment.__class__ == praw.models.Comment]
return comments
def retireCommentHash(commentHash):
with open("retiredcommenthashes.txt", "a") as commentHashFile:
commentHashFile.write(commentHash + '\n')
def retiredCommentHashes():
with open("retiredcommenthashes.txt", "r") as commentHashFile:
# return commentHashFile.readlines()
return commentHashFile.read().splitlines()
@app.route('/moderatesignups.html')
def moderatesignups():
global commentHashesAndComments
commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
# redditSession = loginAndReturnRedditSession()
redditSession = loginOAuthAndReturnRedditSession()
submissions = getSubmissionsForRedditSession(redditSession)
flat_comments = getCommentsForSubmissions(submissions)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write(os.getcwd())
stringio.write("<br>\n")
for submission in submissions:
stringio.write(submission.title)
stringio.write("<br>\n")
stringio.write("</h3>\n\n")
stringio.write('<form action="copydisplayduringsignuptoclipboard.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" value="Copy display-during-signup.py stdout to clipboard">')
stringio.write('</form>')
for comment in flat_comments:
# print comment.is_root
# print comment.score
i += 1
commentHash = sha1()
commentHash.update(comment.fullname)
commentHash.update(comment.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
commentHashesAndComments[commentHash] = comment
authorName = str(comment.author) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write('</b></font><br>')
if ParticipantCollection().hasParticipantNamed(authorName):
stringio.write(' <small><font color="green">(member)</font></small>')
# if ParticipantCollection().participantNamed(authorName).isStillIn:
# stringio.write(' <small><font color="green">(in)</font></small>')
# else:
# stringio.write(' <small><font color="red">(out)</font></small>')
else:
stringio.write(' <small><font color="red">(not a member)</font></small>')
stringio.write('<form action="takeaction.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" name="actiontotake" value="Signup" style="color:white;background-color:green">')
# stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
# stringio.write('<input type="submit" name="actiontotake" value="Relapse">')
# stringio.write('<input type="submit" name="actiontotake" value="Reinstate">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
stringio.write('<input type="hidden" name="commenthash" value="' + commentHash + '">')
# stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
stringio.write('</form>')
stringio.write(bleach.clean(markdown.markdown(comment.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
@app.route('/takeaction.html', methods=["POST"])
def takeaction():
username = b64decode(request.form["username"])
commentHash = str(request.form["commenthash"])
# commentPermalink = request.form["commentpermalink"]
actionToTake = request.form["actiontotake"]
# print commentHashesAndComments
comment = commentHashesAndComments[commentHash]
# print "comment: " + str(comment)
if actionToTake == 'Signup':
print "signup - " + username
subprocess.call(['./signup.py', username])
comment.upvote()
retireCommentHash(commentHash)
# if actionToTake == 'Signup and checkin':
# print "signup and checkin - " + username
# subprocess.call(['./signup-and-checkin.sh', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Relapse':
# print "relapse - " + username
# subprocess.call(['./relapse.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Reinstate':
# print "reinstate - " + username
# subprocess.call(['./reinstate.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
elif actionToTake == 'Skip comment':
print "Skip comment - " + username
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == "Skip comment and don't upvote":
print "Skip comment and don't upvote - " + username
retireCommentHash(commentHash)
return Response("hello", mimetype='text/html')
@app.route('/copydisplayduringsignuptoclipboard.html', methods=["POST"])
def copydisplayduringsignuptoclipboard():
print "TODO: Copy display to clipboard"
subprocess.call(['./display-during-signup.py'])
return Response("hello", mimetype='text/html')
if __name__ == '__main__':
app.run(host='127.0.0.1', port=flaskport)
| foobarbazblarg/stayclean | stayclean-2018-march/serve-signups-with-flask.py | Python | mit | 8,581 |
from flask_webapi import status
from unittest import TestCase
class TestStatus(TestCase):
def test_is_informational(self):
self.assertFalse(status.is_informational(99))
self.assertFalse(status.is_informational(200))
for i in range(100, 199):
self.assertTrue(status.is_informational(i))
def test_is_success(self):
self.assertFalse(status.is_success(199))
self.assertFalse(status.is_success(300))
for i in range(200, 299):
self.assertTrue(status.is_success(i))
def test_is_redirect(self):
self.assertFalse(status.is_redirect(299))
self.assertFalse(status.is_redirect(400))
for i in range(300, 399):
self.assertTrue(status.is_redirect(i))
def test_is_client_error(self):
self.assertFalse(status.is_client_error(399))
self.assertFalse(status.is_client_error(500))
for i in range(400, 499):
self.assertTrue(status.is_client_error(i))
def test_is_server_error(self):
self.assertFalse(status.is_server_error(499))
self.assertFalse(status.is_server_error(600))
for i in range(500, 599):
self.assertTrue(status.is_server_error(i))
| viniciuschiele/flask-webapi | tests/test_status.py | Python | mit | 1,233 |