hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c4aad7d61342eb43c7ab5a6792d71b925c81eb8 | 4,918 | py | Python | models/pointnet_cls_rot_transfer.py | OmidPoursaeed/Self_supervised_Learning_Point_Clouds | 4f684cc761347f329eb967823f80522a8a3aedc0 | [
"MIT"
] | 11 | 2020-12-16T16:27:36.000Z | 2021-12-01T04:07:56.000Z | models/pointnet_cls_rot_transfer.py | OmidPoursaeed/Self_supervised_Learning_Point_Clouds | 4f684cc761347f329eb967823f80522a8a3aedc0 | [
"MIT"
] | 2 | 2021-02-09T11:35:01.000Z | 2021-08-06T01:39:42.000Z | models/pointnet_cls_rot_transfer.py | OmidPoursaeed/Self_supervised_Learning_Point_Clouds | 4f684cc761347f329eb967823f80522a8a3aedc0 | [
"MIT"
] | 1 | 2021-08-05T14:07:51.000Z | 2021-08-05T14:07:51.000Z | import tensorflow as tf
import numpy as np
import math
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tf_util
from transform_nets import input_transform_net, feature_transform_net
def placeholder_inputs(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
labels_pl = tf.placeholder(tf.int32, shape=(batch_size))
return pointclouds_pl, labels_pl
def get_model(point_cloud, is_training, is_training_base, bn_decay=None, use_input_trans=True, use_feature_trans=True, num_classes=40):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
if use_input_trans:
with tf.variable_scope('transform_net1') as sc:
transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
point_cloud_transformed = tf.matmul(point_cloud, transform)
else:
point_cloud_transformed = point_cloud
input_image = tf.expand_dims(point_cloud_transformed, -1)
with tf.variable_scope('pointnet_cls_rotation'):
net = tf_util.conv2d(input_image, 64, [1,3],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv2', bn_decay=bn_decay)
if use_feature_trans:
with tf.variable_scope('transform_net2') as sc:
transform = feature_transform_net(net, is_training, bn_decay, K=64)
end_points['transform'] = transform
net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
net_transformed = tf.expand_dims(net_transformed, [2])
else:
net_transformed = net
with tf.variable_scope('pointnet_cls_rotation'):
net = tf_util.conv2d(net_transformed, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv4', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv5', bn_decay=bn_decay)
# Symmetric function: max pooling
net = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool')
net = tf.reshape(net, [batch_size, -1])
# Retrained layers
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
scope='dp1')
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
scope='fc2', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
scope='dp2')
net = tf_util.fully_connected(net, 128, bn=True, is_training=is_training,
scope='transfer/fc3', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
scope='transfer/dp3')
net = tf_util.fully_connected(net, num_classes, activation_fn=None, scope='transfer/fc4')
return net, end_points
def get_loss(pred, label, end_points, reg_weight=0.001):
""" pred: B*NUM_CLASSES,
label: B, """
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf.summary.scalar('classify loss', classify_loss)
# Enforce the transformation as orthogonal matrix
# transform = end_points['transform'] # BxKxK
# K = transform.get_shape()[1].value
# mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0,2,1]))
# mat_diff -= tf.constant(np.eye(K), dtype=tf.float32)
# mat_diff_loss = tf.nn.l2_loss(mat_diff)
# tf.summary.scalar('mat loss', mat_diff_loss)
return classify_loss # + mat_diff_loss * reg_weight
if __name__=='__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32,1024,3))
outputs = get_model(inputs, tf.constant(True))
print(outputs)
| 44.709091 | 135 | 0.614884 | import tensorflow as tf
import numpy as np
import math
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tf_util
from transform_nets import input_transform_net, feature_transform_net
def placeholder_inputs(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
labels_pl = tf.placeholder(tf.int32, shape=(batch_size))
return pointclouds_pl, labels_pl
def get_model(point_cloud, is_training, is_training_base, bn_decay=None, use_input_trans=True, use_feature_trans=True, num_classes=40):
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
if use_input_trans:
with tf.variable_scope('transform_net1') as sc:
transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
point_cloud_transformed = tf.matmul(point_cloud, transform)
else:
point_cloud_transformed = point_cloud
input_image = tf.expand_dims(point_cloud_transformed, -1)
with tf.variable_scope('pointnet_cls_rotation'):
net = tf_util.conv2d(input_image, 64, [1,3],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv2', bn_decay=bn_decay)
if use_feature_trans:
with tf.variable_scope('transform_net2') as sc:
transform = feature_transform_net(net, is_training, bn_decay, K=64)
end_points['transform'] = transform
net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
net_transformed = tf.expand_dims(net_transformed, [2])
else:
net_transformed = net
with tf.variable_scope('pointnet_cls_rotation'):
net = tf_util.conv2d(net_transformed, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv4', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv5', bn_decay=bn_decay)
net = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool')
net = tf.reshape(net, [batch_size, -1])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
scope='dp1')
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
scope='fc2', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
scope='dp2')
net = tf_util.fully_connected(net, 128, bn=True, is_training=is_training,
scope='transfer/fc3', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
scope='transfer/dp3')
net = tf_util.fully_connected(net, num_classes, activation_fn=None, scope='transfer/fc4')
return net, end_points
def get_loss(pred, label, end_points, reg_weight=0.001):
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf.summary.scalar('classify loss', classify_loss)
return classify_loss
if __name__=='__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32,1024,3))
outputs = get_model(inputs, tf.constant(True))
print(outputs)
| true | true |
1c4aae5426137d7eb9f5b72856c25b9d57d53549 | 5,219 | py | Python | seaborn/algorithms.py | jwillis0720/seaborn | 0dc93d01c78370e91ebdf72c888719fbbc6d1085 | [
"BSD-3-Clause"
] | null | null | null | seaborn/algorithms.py | jwillis0720/seaborn | 0dc93d01c78370e91ebdf72c888719fbbc6d1085 | [
"BSD-3-Clause"
] | null | null | null | seaborn/algorithms.py | jwillis0720/seaborn | 0dc93d01c78370e91ebdf72c888719fbbc6d1085 | [
"BSD-3-Clause"
] | null | null | null | """Algorithms to support fitting routines in seaborn plotting functions."""
import numbers
import numpy as np
import warnings
from math import sqrt
def wls_confidence_interval(data, z=1.96):
"""Calculate the Wilson score confidence interval for a data set.
data : array of 1-dimensional data, 1's or 0's
z : float, z-score default=1.96 for a 95% confidence interval
"""
n = len(data)
# counts the number of 1 or Trues over false or 0
p = len([i for i in data if i]) / n
denominator = 1 + z ** 2 / n
centre_adjusted_probability = p + z * z / (2 * n)
adjusted_standard_deviation = sqrt((p * (1 - p) + z * z / (4 * n)) / n)
lower_bound = (centre_adjusted_probability - z * adjusted_standard_deviation) / denominator
upper_bound = (centre_adjusted_probability + z * adjusted_standard_deviation) / denominator
return (lower_bound, upper_bound)
def bootstrap(*args, **kwargs):
"""Resample one or more arrays with replacement and store aggregate values.
Positional arguments are a sequence of arrays to bootstrap along the first
axis and pass to a summary function.
Keyword arguments:
n_boot : int, default 10000
Number of iterations
axis : int, default None
Will pass axis to ``func`` as a keyword argument.
units : array, default None
Array of sampling unit IDs. When used the bootstrap resamples units
and then observations within units instead of individual
datapoints.
func : string or callable, default np.mean
Function to call on the args that are passed in. If string, tries
to use as named method on numpy array.
seed : Generator | SeedSequence | RandomState | int | None
Seed for the random number generator; useful if you want
reproducible resamples.
Returns
-------
boot_dist: array
array of bootstrapped statistic values
"""
# Ensure list of arrays are same length
if len(np.unique(list(map(len, args)))) > 1:
raise ValueError("All input arrays must have the same length")
n = len(args[0])
# Default keyword arguments
n_boot = kwargs.get("n_boot", 10000)
func = kwargs.get("func", np.mean)
axis = kwargs.get("axis", None)
units = kwargs.get("units", None)
random_seed = kwargs.get("random_seed", None)
if random_seed is not None:
msg = "`random_seed` has been renamed to `seed` and will be removed"
warnings.warn(msg)
seed = kwargs.get("seed", random_seed)
if axis is None:
func_kwargs = dict()
else:
func_kwargs = dict(axis=axis)
# Initialize the resampler
rng = _handle_random_seed(seed)
# Coerce to arrays
args = list(map(np.asarray, args))
if units is not None:
units = np.asarray(units)
# Allow for a function that is the name of a method on an array
if isinstance(func, str):
def f(x):
return getattr(x, func)()
else:
f = func
# Handle numpy changes
try:
integers = rng.integers
except AttributeError:
integers = rng.randint
# Do the bootstrap
if units is not None:
return _structured_bootstrap(args, n_boot, units, f, func_kwargs, integers)
boot_dist = []
for i in range(int(n_boot)):
resampler = integers(0, n, n, dtype=np.intp) # intp is indexing dtype
sample = [a.take(resampler, axis=0) for a in args]
boot_dist.append(f(*sample, **func_kwargs))
return np.array(boot_dist)
def _structured_bootstrap(args, n_boot, units, func, func_kwargs, integers):
"""Resample units instead of datapoints."""
unique_units = np.unique(units)
n_units = len(unique_units)
args = [[a[units == unit] for unit in unique_units] for a in args]
boot_dist = []
for i in range(int(n_boot)):
resampler = integers(0, n_units, n_units, dtype=np.intp)
sample = [[a[i] for i in resampler] for a in args]
lengths = map(len, sample[0])
resampler = [integers(0, n, n, dtype=np.intp) for n in lengths]
sample = [[c.take(r, axis=0) for c, r in zip(a, resampler)] for a in sample]
sample = list(map(np.concatenate, sample))
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _handle_random_seed(seed=None):
"""Given a seed in one of many formats, return a random number generator.
Generalizes across the numpy 1.17 changes, preferring newer functionality.
"""
if isinstance(seed, np.random.RandomState):
rng = seed
else:
try:
# General interface for seeding on numpy >= 1.17
rng = np.random.default_rng(seed)
except AttributeError:
# We are on numpy < 1.17, handle options ourselves
if isinstance(seed, (numbers.Integral, np.integer)):
rng = np.random.RandomState(seed)
elif seed is None:
rng = np.random.RandomState()
else:
err = "{} cannot be used to seed the randomn number generator"
raise ValueError(err.format(seed))
return rng
| 34.335526 | 95 | 0.634796 | import numbers
import numpy as np
import warnings
from math import sqrt
def wls_confidence_interval(data, z=1.96):
n = len(data)
p = len([i for i in data if i]) / n
denominator = 1 + z ** 2 / n
centre_adjusted_probability = p + z * z / (2 * n)
adjusted_standard_deviation = sqrt((p * (1 - p) + z * z / (4 * n)) / n)
lower_bound = (centre_adjusted_probability - z * adjusted_standard_deviation) / denominator
upper_bound = (centre_adjusted_probability + z * adjusted_standard_deviation) / denominator
return (lower_bound, upper_bound)
def bootstrap(*args, **kwargs):
if len(np.unique(list(map(len, args)))) > 1:
raise ValueError("All input arrays must have the same length")
n = len(args[0])
n_boot = kwargs.get("n_boot", 10000)
func = kwargs.get("func", np.mean)
axis = kwargs.get("axis", None)
units = kwargs.get("units", None)
random_seed = kwargs.get("random_seed", None)
if random_seed is not None:
msg = "`random_seed` has been renamed to `seed` and will be removed"
warnings.warn(msg)
seed = kwargs.get("seed", random_seed)
if axis is None:
func_kwargs = dict()
else:
func_kwargs = dict(axis=axis)
rng = _handle_random_seed(seed)
args = list(map(np.asarray, args))
if units is not None:
units = np.asarray(units)
if isinstance(func, str):
def f(x):
return getattr(x, func)()
else:
f = func
try:
integers = rng.integers
except AttributeError:
integers = rng.randint
if units is not None:
return _structured_bootstrap(args, n_boot, units, f, func_kwargs, integers)
boot_dist = []
for i in range(int(n_boot)):
resampler = integers(0, n, n, dtype=np.intp) sample = [a.take(resampler, axis=0) for a in args]
boot_dist.append(f(*sample, **func_kwargs))
return np.array(boot_dist)
def _structured_bootstrap(args, n_boot, units, func, func_kwargs, integers):
unique_units = np.unique(units)
n_units = len(unique_units)
args = [[a[units == unit] for unit in unique_units] for a in args]
boot_dist = []
for i in range(int(n_boot)):
resampler = integers(0, n_units, n_units, dtype=np.intp)
sample = [[a[i] for i in resampler] for a in args]
lengths = map(len, sample[0])
resampler = [integers(0, n, n, dtype=np.intp) for n in lengths]
sample = [[c.take(r, axis=0) for c, r in zip(a, resampler)] for a in sample]
sample = list(map(np.concatenate, sample))
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _handle_random_seed(seed=None):
if isinstance(seed, np.random.RandomState):
rng = seed
else:
try:
rng = np.random.default_rng(seed)
except AttributeError:
if isinstance(seed, (numbers.Integral, np.integer)):
rng = np.random.RandomState(seed)
elif seed is None:
rng = np.random.RandomState()
else:
err = "{} cannot be used to seed the randomn number generator"
raise ValueError(err.format(seed))
return rng
| true | true |
1c4aaf705158fbe0e91ecb297f20bdbdacfd197c | 95 | py | Python | siga/prospeccao/apps.py | JenniferAmaral/DjangoSpike | 768237bb0f3cffe7bbdbcab38a8bae6faa78e495 | [
"Apache-2.0"
] | null | null | null | siga/prospeccao/apps.py | JenniferAmaral/DjangoSpike | 768237bb0f3cffe7bbdbcab38a8bae6faa78e495 | [
"Apache-2.0"
] | 2 | 2021-03-19T03:21:17.000Z | 2021-03-30T13:24:07.000Z | siga/prospeccao/apps.py | JenniferAmaral/DjangoSpike | 768237bb0f3cffe7bbdbcab38a8bae6faa78e495 | [
"Apache-2.0"
] | null | null | null | from django.apps import AppConfig
class ProspeccaoConfig(AppConfig):
name = 'prospeccao'
| 15.833333 | 34 | 0.768421 | from django.apps import AppConfig
class ProspeccaoConfig(AppConfig):
name = 'prospeccao'
| true | true |
1c4aaf84e496d0feb65c92950f3a798d787c4fe0 | 4,776 | py | Python | app/storage.py | JB-Tellez/flask-hello-world | 7fce8dea357a847c90bce095b2bfc43036903e4d | [
"MIT"
] | null | null | null | app/storage.py | JB-Tellez/flask-hello-world | 7fce8dea357a847c90bce095b2bfc43036903e4d | [
"MIT"
] | null | null | null | app/storage.py | JB-Tellez/flask-hello-world | 7fce8dea357a847c90bce095b2bfc43036903e4d | [
"MIT"
] | null | null | null | from flask import Flask, jsonify, request
from flask_cors import CORS
from os import environ
import requests
from datetime import datetime
def get_location():
query = request.args.get('data')
api_key = environ.get('GEOCODE_API_KEY')
URL = f'https://maps.googleapis.com/maps/api/geocode/json?address={query}&key={api_key}'
print('URL', URL)
locations = requests.get(URL).json()
print('locations', locations)
location = Location(query, locations['results'][0])
return jsonify(location.serialize())
class Location:
def __init__(self, query, info):
self.search_query = query
self.formatted_query = info['formatted_address']
self.latitude = info['geometry']['location']['lat']
self.longitude = info['geometry']['location']['lng']
def serialize(self):
return vars(self)
def get_weather():
api_key = environ.get('WEATHER_API_KEY')
latitude = request.args['data[latitude]']
longitude = request.args['data[longitude]']
url = f'https://api.darksky.net/forecast/{api_key}/{latitude},{longitude}'
forecasts = requests.get(url).json()
dailies = [Forecast(daily).serialize()
for daily in forecasts['daily']['data']]
return jsonify(dailies)
class Forecast:
def __init__(self, info):
self.forecast = info['summary']
epoch_seconds = int(info['time'])
self.time = datetime.utcfromtimestamp(
epoch_seconds).strftime("%A %B %d, %Y")
def serialize(self):
return vars(self)
def get_events():
api_key = environ.get('EVENTBRITE_API_KEY')
address = request.args['data[formatted_query]']
url = f'https://www.eventbriteapi.com/v3/events/search?token={api_key}&location.address={address}'
event_data = requests.get(url).json()
events = [Event(eventInfo).serialize()
for eventInfo in event_data['events']]
return jsonify(events)
class Event:
def __init__(self, info):
self.link = info['url']
self.name = info['name']['text']
self.event_date = datetime.fromisoformat(
info['start']['local']).strftime("%A %B %d, %Y")
self.summary = info['summary']
def serialize(self):
return vars(self)
def get_yelps():
auth_token = environ.get('YELP_API_KEY')
location = request.args['data[search_query]']
url = f'https://api.yelp.com/v3/businesses/search?location={location}'
hed = {'Authorization': 'Bearer ' + auth_token}
api_data = requests.get(url, headers=hed).json()
yelps = [Yelp(business).serialize() for business in api_data['businesses']]
return jsonify(yelps)
class Yelp:
"""
"""
def __init__(self, info):
self.name = info['name']
self.image_url = info['image_url']
self.rating = info['rating']
self.url = info['url']
def serialize(self):
return vars(self)
def get_movies():
api_key = environ.get('MOVIE_API_KEY')
query = request.args['data[search_query]']
url = f'https://api.themoviedb.org/3/search/movie/?api_key={api_key}&language=en-US&page=1&query={query}'
api_data = requests.get(url).json()
movies = [Movie(info).serialize() for info in api_data['results']]
return jsonify(movies)
class Movie:
"""
"""
def __init__(self, info):
self.title = info['title']
self.overview = info['overview']
self.average_votes = info['vote_average']
self.total_votes = info['vote_count']
self.popularity = info['popularity']
self.released_on = info['release_date']
self.image_url = 'https://image.tmdb.org/t/p/w500' + \
(info['poster_path'] or '')
def serialize(self):
return vars(self)
def get_trails():
api_key = environ.get('TRAIL_API_KEY')
latitude = request.args['data[latitude]']
longitude = request.args['data[longitude]']
url = f'https://www.hikingproject.com/data/get-trails?lat={latitude}&lon={longitude}&maxDistance=200&key={api_key}'
trail_data = requests.get(url).json()
trails = [Trail(trail_info).serialize()
for trail_info in trail_data['trails']]
return jsonify(trails)
class Trail:
"""
"""
def __init__(self, info):
self.name = info['name']
self.location = info['location']
self.length = info['length']
self.stars = info['stars']
self.star_votes = info['starVotes']
self.summary = info['summary']
self.trail_url = info['url']
self.conditions = info['conditionDetails']
self.condition_date = info['conditionDate'][0:10]
self.condition_time = info['conditionDate'][12:]
def serialize(self):
return vars(self)
| 24.618557 | 119 | 0.629606 | from flask import Flask, jsonify, request
from flask_cors import CORS
from os import environ
import requests
from datetime import datetime
def get_location():
query = request.args.get('data')
api_key = environ.get('GEOCODE_API_KEY')
URL = f'https://maps.googleapis.com/maps/api/geocode/json?address={query}&key={api_key}'
print('URL', URL)
locations = requests.get(URL).json()
print('locations', locations)
location = Location(query, locations['results'][0])
return jsonify(location.serialize())
class Location:
def __init__(self, query, info):
self.search_query = query
self.formatted_query = info['formatted_address']
self.latitude = info['geometry']['location']['lat']
self.longitude = info['geometry']['location']['lng']
def serialize(self):
return vars(self)
def get_weather():
api_key = environ.get('WEATHER_API_KEY')
latitude = request.args['data[latitude]']
longitude = request.args['data[longitude]']
url = f'https://api.darksky.net/forecast/{api_key}/{latitude},{longitude}'
forecasts = requests.get(url).json()
dailies = [Forecast(daily).serialize()
for daily in forecasts['daily']['data']]
return jsonify(dailies)
class Forecast:
def __init__(self, info):
self.forecast = info['summary']
epoch_seconds = int(info['time'])
self.time = datetime.utcfromtimestamp(
epoch_seconds).strftime("%A %B %d, %Y")
def serialize(self):
return vars(self)
def get_events():
api_key = environ.get('EVENTBRITE_API_KEY')
address = request.args['data[formatted_query]']
url = f'https://www.eventbriteapi.com/v3/events/search?token={api_key}&location.address={address}'
event_data = requests.get(url).json()
events = [Event(eventInfo).serialize()
for eventInfo in event_data['events']]
return jsonify(events)
class Event:
def __init__(self, info):
self.link = info['url']
self.name = info['name']['text']
self.event_date = datetime.fromisoformat(
info['start']['local']).strftime("%A %B %d, %Y")
self.summary = info['summary']
def serialize(self):
return vars(self)
def get_yelps():
auth_token = environ.get('YELP_API_KEY')
location = request.args['data[search_query]']
url = f'https://api.yelp.com/v3/businesses/search?location={location}'
hed = {'Authorization': 'Bearer ' + auth_token}
api_data = requests.get(url, headers=hed).json()
yelps = [Yelp(business).serialize() for business in api_data['businesses']]
return jsonify(yelps)
class Yelp:
def __init__(self, info):
self.name = info['name']
self.image_url = info['image_url']
self.rating = info['rating']
self.url = info['url']
def serialize(self):
return vars(self)
def get_movies():
api_key = environ.get('MOVIE_API_KEY')
query = request.args['data[search_query]']
url = f'https://api.themoviedb.org/3/search/movie/?api_key={api_key}&language=en-US&page=1&query={query}'
api_data = requests.get(url).json()
movies = [Movie(info).serialize() for info in api_data['results']]
return jsonify(movies)
class Movie:
def __init__(self, info):
self.title = info['title']
self.overview = info['overview']
self.average_votes = info['vote_average']
self.total_votes = info['vote_count']
self.popularity = info['popularity']
self.released_on = info['release_date']
self.image_url = 'https://image.tmdb.org/t/p/w500' + \
(info['poster_path'] or '')
def serialize(self):
return vars(self)
def get_trails():
api_key = environ.get('TRAIL_API_KEY')
latitude = request.args['data[latitude]']
longitude = request.args['data[longitude]']
url = f'https://www.hikingproject.com/data/get-trails?lat={latitude}&lon={longitude}&maxDistance=200&key={api_key}'
trail_data = requests.get(url).json()
trails = [Trail(trail_info).serialize()
for trail_info in trail_data['trails']]
return jsonify(trails)
class Trail:
def __init__(self, info):
self.name = info['name']
self.location = info['location']
self.length = info['length']
self.stars = info['stars']
self.star_votes = info['starVotes']
self.summary = info['summary']
self.trail_url = info['url']
self.conditions = info['conditionDetails']
self.condition_date = info['conditionDate'][0:10]
self.condition_time = info['conditionDate'][12:]
def serialize(self):
return vars(self)
| true | true |
1c4ab0a1b89ee3ce8f7c20af7b3a0cf0e50ea511 | 1,530 | py | Python | server/models/portfolio/risk.py | lluo5779/Robo-Adviser | 43aa4b73bfc96e55ed664328330a930975596124 | [
"MIT"
] | null | null | null | server/models/portfolio/risk.py | lluo5779/Robo-Adviser | 43aa4b73bfc96e55ed664328330a930975596124 | [
"MIT"
] | 3 | 2021-03-31T19:24:03.000Z | 2021-12-13T20:26:39.000Z | server/models/portfolio/risk.py | lluo5779/Robo-Adviser | 43aa4b73bfc96e55ed664328330a930975596124 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
def risk_prefs(horizon, aversion, cardinal, return_target, l, mu_bl1, mu_bl2, cov_bl1):
if horizon is None:
horizon = 10
alpha = 0.05
safe_target = float(((mu_bl1 + mu_bl2) / 2).mean())
# set the variances for the first period estimates
vars = pd.DataFrame(np.diag(cov_bl1), index=cov_bl1.index)
risk_mul, turn_mul = l, 1
if horizon <= 1:
# select the 12 assets with the lowest variances
risk_mul *= 2
turn_mul *= 0.25
alpha = 0.20
elif horizon <= 5:
risk_mul *= 0.75
turn_mul *= 1
alpha = 0.10
else:
risk_mul *= 0.25
turn_mul *= 2
print("RISK PREFERENCES\n\n\n")
if return_target > safe_target:
risk_mul *= 0.5
if aversion == 1:
cardinality = list(np.where(mu_bl1.rank() > len(mu_bl1) - cardinal, 1, 0).ravel())
exposures = (0.02, 0.30)
elif aversion == 2:
cardinality = list(np.where(pd.DataFrame(np.divide(mu_bl1.values, vars.values).ravel()).rank() > len(mu_bl1) - cardinal, 1, 0).ravel())
exposures = (0.04, 0.20)
else:
# NO SINGLE NAME STOCKS
vars = pd.DataFrame(np.diag(cov_bl1.iloc[:-10, :-10]), index=mu_bl1[:-10].index)
cardinality = list(np.where(vars.rank(ascending=True) > (len(mu_bl1[:-10])- cardinal), 1, 0).ravel()) + [0]*10
exposures = (0.05, 0.15)
risk_mul *= aversion
return (alpha, alpha*1.02), (risk_mul, turn_mul), exposures, cardinality
| 28.333333 | 143 | 0.594118 | import numpy as np
import pandas as pd
def risk_prefs(horizon, aversion, cardinal, return_target, l, mu_bl1, mu_bl2, cov_bl1):
if horizon is None:
horizon = 10
alpha = 0.05
safe_target = float(((mu_bl1 + mu_bl2) / 2).mean())
vars = pd.DataFrame(np.diag(cov_bl1), index=cov_bl1.index)
risk_mul, turn_mul = l, 1
if horizon <= 1:
risk_mul *= 2
turn_mul *= 0.25
alpha = 0.20
elif horizon <= 5:
risk_mul *= 0.75
turn_mul *= 1
alpha = 0.10
else:
risk_mul *= 0.25
turn_mul *= 2
print("RISK PREFERENCES\n\n\n")
if return_target > safe_target:
risk_mul *= 0.5
if aversion == 1:
cardinality = list(np.where(mu_bl1.rank() > len(mu_bl1) - cardinal, 1, 0).ravel())
exposures = (0.02, 0.30)
elif aversion == 2:
cardinality = list(np.where(pd.DataFrame(np.divide(mu_bl1.values, vars.values).ravel()).rank() > len(mu_bl1) - cardinal, 1, 0).ravel())
exposures = (0.04, 0.20)
else:
vars = pd.DataFrame(np.diag(cov_bl1.iloc[:-10, :-10]), index=mu_bl1[:-10].index)
cardinality = list(np.where(vars.rank(ascending=True) > (len(mu_bl1[:-10])- cardinal), 1, 0).ravel()) + [0]*10
exposures = (0.05, 0.15)
risk_mul *= aversion
return (alpha, alpha*1.02), (risk_mul, turn_mul), exposures, cardinality
| true | true |
1c4ab187d643a0593fe5cdf9d597191769ebd87f | 2,243 | py | Python | scripts/recalc_afacts.py | EvictionLab/eviction-lab-etl | d94a7e52de8890c9371518b5020d1a6aa3a5fc2e | [
"MIT"
] | 9 | 2018-04-07T17:52:49.000Z | 2020-07-06T01:52:21.000Z | scripts/recalc_afacts.py | EvictionLab/eviction-lab-etl | d94a7e52de8890c9371518b5020d1a6aa3a5fc2e | [
"MIT"
] | 56 | 2017-09-11T21:19:13.000Z | 2020-01-06T18:57:23.000Z | scripts/recalc_afacts.py | EvictionLab/eviction-lab-etl | d94a7e52de8890c9371518b5020d1a6aa3a5fc2e | [
"MIT"
] | 1 | 2019-11-04T18:56:45.000Z | 2019-11-04T18:56:45.000Z | """
Recalculates allocation factors for a given geography level and geographic
correspondence file.
Arguments
----------
argv[1] : str
The geography level to create weights for (block-groups or tracts)
argv[2] : str
The file path to the geography correspondence file
generated from http://mcdc.missouri.edu/applications/geocorr2000.html
Outputs
-------
str
a string of CSV data containing the weights
Output has header: GEOID00,pop2k,afact
"""
import sys
import pandas as pd
if __name__ == '__main__':
# load provided csv files into dataframes
geocorr_df = pd.read_csv(
sys.argv[2],
dtype={
'county': 'object',
'tract': 'object',
'bg': 'object',
'block': 'object',
'pop2k': 'float64'
})
# combine geography levels in the 2000 geo correspondence file to create
# block level GEOIDs for all entries
geocorr_df['GEOID00'] = (
geocorr_df['county'] + geocorr_df['tract'].str.replace(
'.', '') + geocorr_df['block'])
# Create GEOID for the provided geography level (tracts or block groups)
if sys.argv[1] == 'tracts':
geocorr_df['GEOID'] = (
geocorr_df['county'] + geocorr_df['tract'].str.replace(
'.', ''))
# Slice the last 4 characters off of block GEOID to get tract GEOID
geoid_slice = -4
elif sys.argv[1] == 'block-groups':
geocorr_df['GEOID'] = (
geocorr_df['county'] + geocorr_df['tract'].str.replace(
'.', '') + geocorr_df['bg'])
# Slice the last 3 characters off of block GEOID to get block group GEOID
geoid_slice = -3
else:
raise ValueError('Invalid geography string supplied')
# recalculate allocation factors
pop2k_totals = pd.DataFrame(geocorr_df.groupby('GEOID')['pop2k'].sum()).reset_index()
pop2k_totals.rename(columns={'pop2k': 'total_pop_00'}, inplace=True)
geocorr_df = geocorr_df.merge(pop2k_totals, on='GEOID', how='left')
del pop2k_totals
geocorr_df['afact'] = geocorr_df['pop2k'] / geocorr_df['total_pop_00']
output_df = geocorr_df[['GEOID00', 'pop2k', 'afact']].copy()
output_df.to_csv(sys.stdout, index=False)
| 32.042857 | 89 | 0.628622 |
import sys
import pandas as pd
if __name__ == '__main__':
geocorr_df = pd.read_csv(
sys.argv[2],
dtype={
'county': 'object',
'tract': 'object',
'bg': 'object',
'block': 'object',
'pop2k': 'float64'
})
geocorr_df['GEOID00'] = (
geocorr_df['county'] + geocorr_df['tract'].str.replace(
'.', '') + geocorr_df['block'])
if sys.argv[1] == 'tracts':
geocorr_df['GEOID'] = (
geocorr_df['county'] + geocorr_df['tract'].str.replace(
'.', ''))
geoid_slice = -4
elif sys.argv[1] == 'block-groups':
geocorr_df['GEOID'] = (
geocorr_df['county'] + geocorr_df['tract'].str.replace(
'.', '') + geocorr_df['bg'])
geoid_slice = -3
else:
raise ValueError('Invalid geography string supplied')
pop2k_totals = pd.DataFrame(geocorr_df.groupby('GEOID')['pop2k'].sum()).reset_index()
pop2k_totals.rename(columns={'pop2k': 'total_pop_00'}, inplace=True)
geocorr_df = geocorr_df.merge(pop2k_totals, on='GEOID', how='left')
del pop2k_totals
geocorr_df['afact'] = geocorr_df['pop2k'] / geocorr_df['total_pop_00']
output_df = geocorr_df[['GEOID00', 'pop2k', 'afact']].copy()
output_df.to_csv(sys.stdout, index=False)
| true | true |
1c4ab1e64362cf00e8647c93dc60b6ca75d9cbb0 | 4,371 | py | Python | ui/prefs.py | bfrobin446/openfrontier | bde74dc82be858cd0b0bc64ddfe76020d1179a9c | [
"MIT"
] | null | null | null | ui/prefs.py | bfrobin446/openfrontier | bde74dc82be858cd0b0bc64ddfe76020d1179a9c | [
"MIT"
] | null | null | null | ui/prefs.py | bfrobin446/openfrontier | bde74dc82be858cd0b0bc64ddfe76020d1179a9c | [
"MIT"
] | null | null | null | from PyQt4.QtCore import *
from PyQt4.QtGui import *
import itertools
from . import colors
from . import keys
from .colorbutton import ColorButton
class KeyPicker(QLineEdit):
keyChanged = pyqtSignal(Qt.Key)
def __init__(self, key=None, parent=None, flags=Qt.Widget, **kwargs):
QLineEdit.__init__(self, parent, **kwargs)
self.setWindowFlags(flags)
self.setReadOnly(True)
self.key = key
self.setText(self.textForKey(self.key))
@staticmethod
def textForKey(k):
for name, value in Qt.__dict__.items():
if name[0:4] == 'Key_':
if k == value:
return name
return ''
def focusInEvent(self, evt):
self.setText("<press a key>")
def keyPressEvent(self, evt):
self.key = evt.key()
self.setText(self.textForKey(self.key))
self.keyChanged.emit(self.key)
def setKey(self, key):
self.key = key
self.setText(self.textForKey(self.key))
self.keyChanged.emit(self.key)
class PrefsDialog(QDialog):
def __init__(self, parent=None, flags=Qt.Widget):
QDialog.__init__(self, parent, flags)
self.settings = QSettings()
self.setLayout(QVBoxLayout(self))
self.tabs = QTabWidget(self)
self.layout().addWidget(self.tabs)
self.buttons = QDialogButtonBox(
QDialogButtonBox.Ok | QDialogButtonBox.RestoreDefaults,
accepted = self.close
)
self.layout().addWidget(self.buttons)
self.buttons.button(
self.buttons.RestoreDefaults).clicked.connect(self.defaults)
self.addTab(ColorPrefPane(), "Colors")
self.addTab(KeyPrefPane(), "Keys")
def addTab(self, widget, title):
scroller = QScrollArea()
scroller.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroller.setWidget(widget)
scroller.setMinimumWidth(
widget.sizeHint().width()
+ qApp.style().pixelMetric(
QStyle.PM_ScrollBarExtent, None, scroller)
* 2)
widget.resize(widget.sizeHint())
self.tabs.addTab(scroller, title)
def defaults(self):
self.tabs.currentWidget().widget().defaults()
class ColorPrefPane(QWidget):
def __init__(self, parent=None, flags=Qt.Widget):
QWidget.__init__(self, parent, flags)
self.setLayout(QVBoxLayout(self))
for cat, catname in zip(colors.categories, colors.catnames):
self.layout().addWidget(
QLabel(''.join(('<b><big>', catname, '</big></b>')), self))
catLayout = QGridLayout()
self.layout().addLayout(catLayout)
for i, c in enumerate(getattr(colors, cat).values()):
catLayout.addWidget(QLabel(c.displayname, self), i, 1)
picker = ColorButton(c.current, self, colorChanged = c.update)
catLayout.addWidget(picker, i, 0)
def defaults(self):
for color, picker in zip(
itertools.chain.from_iterable(
getattr(colors, cat).values() for cat in colors.categories),
(obj for obj in self.children() if isinstance(obj, ColorButton))
):
picker.setColor(color.default)
class KeyPrefPane(QWidget):
def __init__(self, parent=None, flags=Qt.Widget):
QWidget.__init__(self, parent, flags)
self.setLayout(QVBoxLayout(self))
for cat, catname in zip(keys.categories, keys.catnames):
self.layout().addWidget(
QLabel(''.join(('<b><big>', catname, '</big></b>')), self))
catLayout = QGridLayout()
self.layout().addLayout(catLayout)
for i, c in enumerate(getattr(keys, cat).values()):
catLayout.addWidget(QLabel(c.displayname, self), i, 1)
picker = KeyPicker(c.current, self, keyChanged = c.update)
catLayout.addWidget(picker, i, 0)
def defaults(self):
for key, picker in zip(
itertools.chain.from_iterable(
getattr(keys, cat).values() for cat in keys.categories),
(obj for obj in self.children() if isinstance(obj, KeyPicker))
):
picker.setKey(key.default)
| 35.25 | 80 | 0.590254 | from PyQt4.QtCore import *
from PyQt4.QtGui import *
import itertools
from . import colors
from . import keys
from .colorbutton import ColorButton
class KeyPicker(QLineEdit):
keyChanged = pyqtSignal(Qt.Key)
def __init__(self, key=None, parent=None, flags=Qt.Widget, **kwargs):
QLineEdit.__init__(self, parent, **kwargs)
self.setWindowFlags(flags)
self.setReadOnly(True)
self.key = key
self.setText(self.textForKey(self.key))
@staticmethod
def textForKey(k):
for name, value in Qt.__dict__.items():
if name[0:4] == 'Key_':
if k == value:
return name
return ''
def focusInEvent(self, evt):
self.setText("<press a key>")
def keyPressEvent(self, evt):
self.key = evt.key()
self.setText(self.textForKey(self.key))
self.keyChanged.emit(self.key)
def setKey(self, key):
self.key = key
self.setText(self.textForKey(self.key))
self.keyChanged.emit(self.key)
class PrefsDialog(QDialog):
def __init__(self, parent=None, flags=Qt.Widget):
QDialog.__init__(self, parent, flags)
self.settings = QSettings()
self.setLayout(QVBoxLayout(self))
self.tabs = QTabWidget(self)
self.layout().addWidget(self.tabs)
self.buttons = QDialogButtonBox(
QDialogButtonBox.Ok | QDialogButtonBox.RestoreDefaults,
accepted = self.close
)
self.layout().addWidget(self.buttons)
self.buttons.button(
self.buttons.RestoreDefaults).clicked.connect(self.defaults)
self.addTab(ColorPrefPane(), "Colors")
self.addTab(KeyPrefPane(), "Keys")
def addTab(self, widget, title):
scroller = QScrollArea()
scroller.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroller.setWidget(widget)
scroller.setMinimumWidth(
widget.sizeHint().width()
+ qApp.style().pixelMetric(
QStyle.PM_ScrollBarExtent, None, scroller)
* 2)
widget.resize(widget.sizeHint())
self.tabs.addTab(scroller, title)
def defaults(self):
self.tabs.currentWidget().widget().defaults()
class ColorPrefPane(QWidget):
def __init__(self, parent=None, flags=Qt.Widget):
QWidget.__init__(self, parent, flags)
self.setLayout(QVBoxLayout(self))
for cat, catname in zip(colors.categories, colors.catnames):
self.layout().addWidget(
QLabel(''.join(('<b><big>', catname, '</big></b>')), self))
catLayout = QGridLayout()
self.layout().addLayout(catLayout)
for i, c in enumerate(getattr(colors, cat).values()):
catLayout.addWidget(QLabel(c.displayname, self), i, 1)
picker = ColorButton(c.current, self, colorChanged = c.update)
catLayout.addWidget(picker, i, 0)
def defaults(self):
for color, picker in zip(
itertools.chain.from_iterable(
getattr(colors, cat).values() for cat in colors.categories),
(obj for obj in self.children() if isinstance(obj, ColorButton))
):
picker.setColor(color.default)
class KeyPrefPane(QWidget):
def __init__(self, parent=None, flags=Qt.Widget):
QWidget.__init__(self, parent, flags)
self.setLayout(QVBoxLayout(self))
for cat, catname in zip(keys.categories, keys.catnames):
self.layout().addWidget(
QLabel(''.join(('<b><big>', catname, '</big></b>')), self))
catLayout = QGridLayout()
self.layout().addLayout(catLayout)
for i, c in enumerate(getattr(keys, cat).values()):
catLayout.addWidget(QLabel(c.displayname, self), i, 1)
picker = KeyPicker(c.current, self, keyChanged = c.update)
catLayout.addWidget(picker, i, 0)
def defaults(self):
for key, picker in zip(
itertools.chain.from_iterable(
getattr(keys, cat).values() for cat in keys.categories),
(obj for obj in self.children() if isinstance(obj, KeyPicker))
):
picker.setKey(key.default)
| true | true |
1c4ab1eafd7a0741e2d75a3e980b2a4775179a92 | 417 | py | Python | backend/tester1000_dev_23525/wsgi.py | crowdbotics-dev/tester1000-dev-23525 | 46c650bdac998a4df3ee19917a09571ec58c0c68 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/tester1000_dev_23525/wsgi.py | crowdbotics-dev/tester1000-dev-23525 | 46c650bdac998a4df3ee19917a09571ec58c0c68 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/tester1000_dev_23525/wsgi.py | crowdbotics-dev/tester1000-dev-23525 | 46c650bdac998a4df3ee19917a09571ec58c0c68 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | """
WSGI config for tester1000_dev_23525 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tester1000_dev_23525.settings')
application = get_wsgi_application()
| 24.529412 | 80 | 0.798561 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tester1000_dev_23525.settings')
application = get_wsgi_application()
| true | true |
1c4ab3589fb7396833431a0bdd7613a3be9d614a | 5,648 | py | Python | openstack/tests/unit/cloud/test_qos_rule_type.py | catalinpopc/openstacksdk | adaf758076b0c74cf4bb55e88fdee7072764f5f3 | [
"Apache-2.0"
] | null | null | null | openstack/tests/unit/cloud/test_qos_rule_type.py | catalinpopc/openstacksdk | adaf758076b0c74cf4bb55e88fdee7072764f5f3 | [
"Apache-2.0"
] | null | null | null | openstack/tests/unit/cloud/test_qos_rule_type.py | catalinpopc/openstacksdk | adaf758076b0c74cf4bb55e88fdee7072764f5f3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 OVH SAS
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from openstack.cloud import exc
from openstack.tests.unit import base
class TestQosRuleType(base.TestCase):
rule_type_name = "bandwidth_limit"
qos_extension = {
"updated": "2015-06-08T10:00:00-00:00",
"name": "Quality of Service",
"links": [],
"alias": "qos",
"description": "The Quality of Service extension."
}
qos_rule_type_details_extension = {
"updated": "2017-06-22T10:00:00-00:00",
"name": "Details of QoS rule types",
"links": [],
"alias": "qos-rule-type-details",
"description": ("Expose details about QoS rule types supported by "
"loaded backend drivers")
}
mock_rule_type_bandwidth_limit = {
'type': 'bandwidth_limit'
}
mock_rule_type_dscp_marking = {
'type': 'dscp_marking'
}
mock_rule_types = [
mock_rule_type_bandwidth_limit, mock_rule_type_dscp_marking]
mock_rule_type_details = {
'drivers': [{
'name': 'linuxbridge',
'supported_parameters': [{
'parameter_values': {'start': 0, 'end': 2147483647},
'parameter_type': 'range',
'parameter_name': u'max_kbps'
}, {
'parameter_values': ['ingress', 'egress'],
'parameter_type': 'choices',
'parameter_name': u'direction'
}, {
'parameter_values': {'start': 0, 'end': 2147483647},
'parameter_type': 'range',
'parameter_name': 'max_burst_kbps'
}]
}],
'type': rule_type_name
}
def test_list_qos_rule_types(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': [self.qos_extension]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'qos', 'rule-types.json']),
json={'rule_types': self.mock_rule_types})
])
rule_types = self.cloud.list_qos_rule_types()
self.assertEqual(self.mock_rule_types, rule_types)
self.assert_calls()
def test_list_qos_rule_types_no_qos_extension(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': []})
])
self.assertRaises(exc.OpenStackCloudException,
self.cloud.list_qos_rule_types)
self.assert_calls()
def test_get_qos_rule_type_details(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': [
self.qos_extension,
self.qos_rule_type_details_extension]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': [
self.qos_extension,
self.qos_rule_type_details_extension]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'qos', 'rule-types',
'%s.json' % self.rule_type_name]),
json={'rule_type': self.mock_rule_type_details})
])
self.assertEqual(
self.mock_rule_type_details,
self.cloud.get_qos_rule_type_details(self.rule_type_name)
)
self.assert_calls()
def test_get_qos_rule_type_details_no_qos_extension(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': []})
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.get_qos_rule_type_details, self.rule_type_name)
self.assert_calls()
def test_get_qos_rule_type_details_no_qos_details_extension(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': [self.qos_extension]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': [self.qos_extension]})
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.get_qos_rule_type_details, self.rule_type_name)
self.assert_calls()
| 37.653333 | 78 | 0.554887 |
from openstack.cloud import exc
from openstack.tests.unit import base
class TestQosRuleType(base.TestCase):
rule_type_name = "bandwidth_limit"
qos_extension = {
"updated": "2015-06-08T10:00:00-00:00",
"name": "Quality of Service",
"links": [],
"alias": "qos",
"description": "The Quality of Service extension."
}
qos_rule_type_details_extension = {
"updated": "2017-06-22T10:00:00-00:00",
"name": "Details of QoS rule types",
"links": [],
"alias": "qos-rule-type-details",
"description": ("Expose details about QoS rule types supported by "
"loaded backend drivers")
}
mock_rule_type_bandwidth_limit = {
'type': 'bandwidth_limit'
}
mock_rule_type_dscp_marking = {
'type': 'dscp_marking'
}
mock_rule_types = [
mock_rule_type_bandwidth_limit, mock_rule_type_dscp_marking]
mock_rule_type_details = {
'drivers': [{
'name': 'linuxbridge',
'supported_parameters': [{
'parameter_values': {'start': 0, 'end': 2147483647},
'parameter_type': 'range',
'parameter_name': u'max_kbps'
}, {
'parameter_values': ['ingress', 'egress'],
'parameter_type': 'choices',
'parameter_name': u'direction'
}, {
'parameter_values': {'start': 0, 'end': 2147483647},
'parameter_type': 'range',
'parameter_name': 'max_burst_kbps'
}]
}],
'type': rule_type_name
}
def test_list_qos_rule_types(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': [self.qos_extension]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'qos', 'rule-types.json']),
json={'rule_types': self.mock_rule_types})
])
rule_types = self.cloud.list_qos_rule_types()
self.assertEqual(self.mock_rule_types, rule_types)
self.assert_calls()
def test_list_qos_rule_types_no_qos_extension(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': []})
])
self.assertRaises(exc.OpenStackCloudException,
self.cloud.list_qos_rule_types)
self.assert_calls()
def test_get_qos_rule_type_details(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': [
self.qos_extension,
self.qos_rule_type_details_extension]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': [
self.qos_extension,
self.qos_rule_type_details_extension]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'qos', 'rule-types',
'%s.json' % self.rule_type_name]),
json={'rule_type': self.mock_rule_type_details})
])
self.assertEqual(
self.mock_rule_type_details,
self.cloud.get_qos_rule_type_details(self.rule_type_name)
)
self.assert_calls()
def test_get_qos_rule_type_details_no_qos_extension(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': []})
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.get_qos_rule_type_details, self.rule_type_name)
self.assert_calls()
def test_get_qos_rule_type_details_no_qos_details_extension(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': [self.qos_extension]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': [self.qos_extension]})
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.get_qos_rule_type_details, self.rule_type_name)
self.assert_calls()
| true | true |
1c4ab3b577beb0365924ba95af395d8155ef537a | 1,359 | py | Python | src/niweb/apps/userprofile/views.py | emjemj/ni | a78e6d97d1e4610aad7698c4f0f459221c680b4f | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2018-12-21T09:35:27.000Z | 2019-07-31T18:51:58.000Z | src/niweb/apps/userprofile/views.py | emjemj/ni | a78e6d97d1e4610aad7698c4f0f459221c680b4f | [
"BSD-2-Clause-FreeBSD"
] | 6 | 2019-07-25T07:10:23.000Z | 2021-02-08T09:58:57.000Z | src/niweb/apps/userprofile/views.py | emjemj/ni | a78e6d97d1e4610aad7698c4f0f459221c680b4f | [
"BSD-2-Clause-FreeBSD"
] | 5 | 2019-02-06T12:00:26.000Z | 2021-11-19T14:48:06.000Z | from apps.userprofile.models import UserProfile
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render, get_object_or_404
from actstream.models import actor_stream
@login_required
def list_userprofiles(request):
profile_list = UserProfile.objects.all()
return render(request, 'userprofile/list_userprofiles.html',
{'profile_list': profile_list})
@login_required
def userprofile_detail(request, userprofile_id):
profile = get_object_or_404(UserProfile, pk=userprofile_id)
activities = actor_stream(profile.user)
paginator = Paginator(activities, 50, allow_empty_first_page=True) # Show 50 activities per page
page = request.GET.get('page')
try:
activities = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
activities = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
activities = paginator.page(paginator.num_pages)
total_activities = '{:,}'.format(activities.paginator.count)
return render(request, 'userprofile/userprofile_detail.html',
{'profile': profile, 'activities': activities, 'total_activities': total_activities})
| 42.46875 | 101 | 0.743194 | from apps.userprofile.models import UserProfile
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render, get_object_or_404
from actstream.models import actor_stream
@login_required
def list_userprofiles(request):
profile_list = UserProfile.objects.all()
return render(request, 'userprofile/list_userprofiles.html',
{'profile_list': profile_list})
@login_required
def userprofile_detail(request, userprofile_id):
profile = get_object_or_404(UserProfile, pk=userprofile_id)
activities = actor_stream(profile.user)
paginator = Paginator(activities, 50, allow_empty_first_page=True) page = request.GET.get('page')
try:
activities = paginator.page(page)
except PageNotAnInteger:
activities = paginator.page(1)
except EmptyPage:
activities = paginator.page(paginator.num_pages)
total_activities = '{:,}'.format(activities.paginator.count)
return render(request, 'userprofile/userprofile_detail.html',
{'profile': profile, 'activities': activities, 'total_activities': total_activities})
| true | true |
1c4ab45c50dc2c8013cb457044e00259c45ba137 | 1,102 | py | Python | app/__init__.py | Shindler7/libpraks | c7d09ef7c485d98e4c1d368ae4ebaf70ef77e410 | [
"BSD-3-Clause"
] | 2 | 2020-04-02T13:35:57.000Z | 2020-08-28T09:21:33.000Z | app/__init__.py | Shindler7/libpraks | c7d09ef7c485d98e4c1d368ae4ebaf70ef77e410 | [
"BSD-3-Clause"
] | 4 | 2020-04-12T17:37:25.000Z | 2022-01-13T02:49:04.000Z | app/__init__.py | Shindler7/libpraks | c7d09ef7c485d98e4c1d368ae4ebaf70ef77e410 | [
"BSD-3-Clause"
] | 4 | 2020-04-01T14:11:50.000Z | 2020-05-10T19:20:03.000Z | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import logging
from flask import Flask
from flask_images import Images
from flask_login import LoginManager
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy as SQLA
from flask_sslify import SSLify
from flask_wtf.csrf import CSRFProtect
from flask_cors import CORS
from config import Config
logging.basicConfig(
level=logging.DEBUG,
filename='app/logs/get_screen.log',
filemode='a'
)
# Flask
application = Flask(__name__)
application.config.from_object(Config)
# Подключение (обслуживание) SSL
sslify = SSLify(application)
# CORS
CORS(application)
# SQLAlchemy + Migrate
db_lib = SQLA(application)
migrate = Migrate(application, db_lib)
# Login
login_manager = LoginManager(application)
login_manager.login_view = 'login'
# CSRF
csrf = CSRFProtect(application)
# Flask image
images = Images(application)
from app import views # noqa
from app import viewsfuture # noqa
from app import admin # noqa
from .api import ver_one # noqa
if __name__ == "__main__":
application.run(host='0.0.0.0', port=5000)
| 20.407407 | 47 | 0.768603 | import logging
from flask import Flask
from flask_images import Images
from flask_login import LoginManager
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy as SQLA
from flask_sslify import SSLify
from flask_wtf.csrf import CSRFProtect
from flask_cors import CORS
from config import Config
logging.basicConfig(
level=logging.DEBUG,
filename='app/logs/get_screen.log',
filemode='a'
)
application = Flask(__name__)
application.config.from_object(Config)
sslify = SSLify(application)
CORS(application)
db_lib = SQLA(application)
migrate = Migrate(application, db_lib)
login_manager = LoginManager(application)
login_manager.login_view = 'login'
csrf = CSRFProtect(application)
images = Images(application)
from app import views from app import viewsfuture from app import admin from .api import ver_one
if __name__ == "__main__":
application.run(host='0.0.0.0', port=5000)
| true | true |
1c4ab5d0b3ed0c4de6caa36c5588fd3dd0ac2b72 | 8,052 | py | Python | e2e/test_e2e.py | bentobox-dev/bento-box | 3e10c62f586c1251529e059b6af515d4d03c60e9 | [
"MIT"
] | 1 | 2021-01-02T02:50:15.000Z | 2021-01-02T02:50:15.000Z | e2e/test_e2e.py | joeltio/bento-box | 3e10c62f586c1251529e059b6af515d4d03c60e9 | [
"MIT"
] | 48 | 2020-10-21T07:42:30.000Z | 2021-02-15T19:34:55.000Z | e2e/test_e2e.py | joeltio/bento-box | 3e10c62f586c1251529e059b6af515d4d03c60e9 | [
"MIT"
] | null | null | null | #
# bento-box
# E2E Test
#
import pytest
from git import Repo
from math import cos, sin
from bento import types
from bento.sim import Simulation
from bento.utils import to_yaml_proto
from bento.graph.plotter import Plotter
from bento.spec.ecs import EntityDef, ComponentDef
from bento.example.specs import Velocity, Position
# define test components
Meta = ComponentDef(
name="meta",
schema={
"name": types.string,
"id": types.int64,
"version": types.int32,
},
)
Movement = ComponentDef(
name="movement",
schema={
"rotation": types.float32,
"speed": types.float64,
},
)
Keyboard = ComponentDef(
name="keyboard",
schema={
"up": types.boolean,
"down": types.boolean,
"left": types.boolean,
"right": types.boolean,
},
)
@pytest.fixture
def sim(client):
"""Applies the test Simulation to the Engine"""
sim = Simulation(
name="driving_sim",
components=[Keyboard, Movement, Velocity, Position, Meta],
entities=[
EntityDef(components=[Keyboard]),
EntityDef(components=[Movement, Velocity, Position, Meta]),
],
client=client,
)
@sim.init
def init_sim(g: Plotter):
controls = g.entity(components=[Keyboard])
controls[Keyboard].left = False
controls[Keyboard].right = False
controls[Keyboard].up = False
controls[Keyboard].down = False
car = g.entity(components=[Movement, Velocity, Position, Meta])
car[Meta].name = "beetle"
car[Meta].id = 512
car[Meta].version = 2
car[Movement].speed = 0.0
car[Movement].rotation = 90.0
car[Velocity].x = 0.0
car[Velocity].y = 0.0
car[Position].x = 0.0
car[Position].y = 0.0
@sim.system
def control_sys(g: Plotter):
controls = g.entity(components=[Keyboard])
car = g.entity(components=[Movement, Velocity, Position, Meta])
acceleration, max_speed, steer_rate = 5.0, 18.0, 10.0
# steer car
if controls[Keyboard].left:
car[Movement].rotation -= steer_rate
controls[Keyboard].left = False
elif controls[Keyboard].right:
car[Movement].rotation += steer_rate
controls[Keyboard].right = False
# accelerate/slow down car
if controls[Keyboard].up:
car[Movement].speed = g.min(car[Movement].speed + acceleration, max_speed)
controls[Keyboard].up = False
elif controls[Keyboard].down:
car[Movement].speed = g.max(car[Movement].speed - acceleration, 0.0)
controls[Keyboard].down = False
@sim.system
def physics_sys(g: Plotter):
# compute velocity from car's rotation and speed
car = g.entity(components=[Movement, Velocity, Position, Meta])
# rotation
heading_x, heading_y = g.cos(car[Movement].rotation), -g.sin(
car[Movement].rotation
)
# speed
car[Velocity].x = car[Movement].speed * heading_x
car[Velocity].y = car[Movement].speed * heading_y
# update car position based on current velocity
car[Position].x += car[Velocity].x
car[Position].y += car[Velocity].y
sim.start()
return sim
def test_e2e_sim_get_version(client):
# e2e test that we can obtain sim/engine's version via SDK
repo = Repo(search_parent_directories=True)
assert client.get_version() == repo.head.object.hexsha
def test_e2e_sim_apply_sim(sim):
# check the sim's entities have populated ids
assert len([e.id for e in sim.entities if e.id != 0]) == len(sim.entities)
def test_e2e_sim_list_sims(sim, client):
# check that sim is listed
assert client.list_sims()[0] == sim.name
def test_e2e_sim_get_sim(sim, client):
# check that sim's can be retrieved by name
applied_proto = client.get_sim(sim.name)
assert to_yaml_proto(applied_proto) == to_yaml_proto(sim.build())
# test error handling when getting nonexistent sim
has_error = False
try:
client.get_sim("not_found")
except LookupError:
has_error = True
assert has_error
def test_e2e_sim_remove(sim, client):
# test removing simulations
client.remove_sim(sim.name)
assert len(client.list_sims()) == 0
def test_e2e_sim_get_set_attr(sim, client):
# test setting/setting attributes for every primitive data type
controls = sim.entity(components=[Keyboard])
controls[Keyboard].left = True
assert controls[Keyboard].left == True
car = sim.entity(components=[Movement, Velocity, Position, Meta])
car[Meta].name = "sedan"
assert car[Meta].name == "sedan"
car[Meta].version = 10
assert car[Meta].version == 10
car[Movement].rotation = -134.2
# rounding required due to loss of precision when using float32
assert round(car[Movement].rotation, 4) == -134.2
car[Movement].speed = 23.5
assert car[Movement].speed == 23.5
def test_e2e_engine_implict_type_convert(sim, client):
# test implicit type conversion
car = sim.entity(components=[Movement, Velocity, Position, Meta])
controls = sim.entity(components=[Keyboard])
# setup test values to attributes
car[Meta].id = 1
car[Meta].version = 1
car[Movement].speed = 1.0
car[Movement].rotation = 1.0
# test implicit type conversion with combinations of numeric data types
# numeric data type => lambda to , get attribute) with that data type
dtype_attrs = {
"types.int64": (lambda: car[Meta].id),
"types.int32": (lambda: car[Meta].version),
"types.float64": (lambda: car[Movement].speed),
"types.float32": (lambda: car[Movement].rotation),
}
for dtype in dtype_attrs.keys():
other_dtypes = [t for t in dtype_attrs.keys() if t != dtype]
for other_dtype in other_dtypes:
value_attr = dtype_attrs[other_dtype]
if dtype == "types.int64":
car[Meta].id = value_attr()
elif dtype == "types.int32":
car[Meta].version = value_attr()
elif dtype == "types.float64":
car[Movement].speed = value_attr()
elif dtype == "types.float32":
car[Movement].rotation = value_attr()
else:
raise ValueError(f"Data type case not handled: {dtype}")
actual_attr = dtype_attrs[dtype]
assert actual_attr() == 1
def test_e2e_sim_step(sim, client):
# once https://github.com/joeltio/bento-box/issues/34 is fixed.
# test init
sim.step()
# check that values are set correctly by init graph
controls = sim.entity(components=[Keyboard])
assert controls[Keyboard].left == False
assert controls[Keyboard].right == False
assert controls[Keyboard].up == False
assert controls[Keyboard].left == False
car = sim.entity(components=[Movement, Velocity, Position, Meta])
assert car[Meta].name == "beetle"
assert car[Meta].version == 2
assert car[Meta].id == 512
assert car[Movement].speed == 0.0
assert car[Movement].rotation == 90.0
assert car[Velocity].x == 0.0
assert car[Velocity].y == 0.0
assert car[Position].x == 0.0
assert car[Position].y == 0.0
# test running simulation for one step
controls[Keyboard].up = True
controls[Keyboard].left = True
sim.step()
# test attributes have been updated by system
assert controls[Keyboard].left == False
assert controls[Keyboard].up == False
assert car[Movement].speed == 5
assert car[Movement].rotation == 80
# test running the simulation for one more step to exercise other conditional branch
controls[Keyboard].down = True
controls[Keyboard].right = True
sim.step()
# test attributes have been updated by system
assert controls[Keyboard].down == False
assert controls[Keyboard].right == False
assert car[Movement].speed == 0
assert car[Movement].rotation == 90
| 31.453125 | 88 | 0.638847 |
import pytest
from git import Repo
from math import cos, sin
from bento import types
from bento.sim import Simulation
from bento.utils import to_yaml_proto
from bento.graph.plotter import Plotter
from bento.spec.ecs import EntityDef, ComponentDef
from bento.example.specs import Velocity, Position
Meta = ComponentDef(
name="meta",
schema={
"name": types.string,
"id": types.int64,
"version": types.int32,
},
)
Movement = ComponentDef(
name="movement",
schema={
"rotation": types.float32,
"speed": types.float64,
},
)
Keyboard = ComponentDef(
name="keyboard",
schema={
"up": types.boolean,
"down": types.boolean,
"left": types.boolean,
"right": types.boolean,
},
)
@pytest.fixture
def sim(client):
sim = Simulation(
name="driving_sim",
components=[Keyboard, Movement, Velocity, Position, Meta],
entities=[
EntityDef(components=[Keyboard]),
EntityDef(components=[Movement, Velocity, Position, Meta]),
],
client=client,
)
@sim.init
def init_sim(g: Plotter):
controls = g.entity(components=[Keyboard])
controls[Keyboard].left = False
controls[Keyboard].right = False
controls[Keyboard].up = False
controls[Keyboard].down = False
car = g.entity(components=[Movement, Velocity, Position, Meta])
car[Meta].name = "beetle"
car[Meta].id = 512
car[Meta].version = 2
car[Movement].speed = 0.0
car[Movement].rotation = 90.0
car[Velocity].x = 0.0
car[Velocity].y = 0.0
car[Position].x = 0.0
car[Position].y = 0.0
@sim.system
def control_sys(g: Plotter):
controls = g.entity(components=[Keyboard])
car = g.entity(components=[Movement, Velocity, Position, Meta])
acceleration, max_speed, steer_rate = 5.0, 18.0, 10.0
if controls[Keyboard].left:
car[Movement].rotation -= steer_rate
controls[Keyboard].left = False
elif controls[Keyboard].right:
car[Movement].rotation += steer_rate
controls[Keyboard].right = False
if controls[Keyboard].up:
car[Movement].speed = g.min(car[Movement].speed + acceleration, max_speed)
controls[Keyboard].up = False
elif controls[Keyboard].down:
car[Movement].speed = g.max(car[Movement].speed - acceleration, 0.0)
controls[Keyboard].down = False
@sim.system
def physics_sys(g: Plotter):
car = g.entity(components=[Movement, Velocity, Position, Meta])
# rotation
heading_x, heading_y = g.cos(car[Movement].rotation), -g.sin(
car[Movement].rotation
)
# speed
car[Velocity].x = car[Movement].speed * heading_x
car[Velocity].y = car[Movement].speed * heading_y
# update car position based on current velocity
car[Position].x += car[Velocity].x
car[Position].y += car[Velocity].y
sim.start()
return sim
def test_e2e_sim_get_version(client):
# e2e test that we can obtain sim/engine's version via SDK
repo = Repo(search_parent_directories=True)
assert client.get_version() == repo.head.object.hexsha
def test_e2e_sim_apply_sim(sim):
assert len([e.id for e in sim.entities if e.id != 0]) == len(sim.entities)
def test_e2e_sim_list_sims(sim, client):
# check that sim is listed
assert client.list_sims()[0] == sim.name
def test_e2e_sim_get_sim(sim, client):
# check that sim's can be retrieved by name
applied_proto = client.get_sim(sim.name)
assert to_yaml_proto(applied_proto) == to_yaml_proto(sim.build())
has_error = False
try:
client.get_sim("not_found")
except LookupError:
has_error = True
assert has_error
def test_e2e_sim_remove(sim, client):
client.remove_sim(sim.name)
assert len(client.list_sims()) == 0
def test_e2e_sim_get_set_attr(sim, client):
controls = sim.entity(components=[Keyboard])
controls[Keyboard].left = True
assert controls[Keyboard].left == True
car = sim.entity(components=[Movement, Velocity, Position, Meta])
car[Meta].name = "sedan"
assert car[Meta].name == "sedan"
car[Meta].version = 10
assert car[Meta].version == 10
car[Movement].rotation = -134.2
assert round(car[Movement].rotation, 4) == -134.2
car[Movement].speed = 23.5
assert car[Movement].speed == 23.5
def test_e2e_engine_implict_type_convert(sim, client):
car = sim.entity(components=[Movement, Velocity, Position, Meta])
controls = sim.entity(components=[Keyboard])
car[Meta].id = 1
car[Meta].version = 1
car[Movement].speed = 1.0
car[Movement].rotation = 1.0
dtype_attrs = {
"types.int64": (lambda: car[Meta].id),
"types.int32": (lambda: car[Meta].version),
"types.float64": (lambda: car[Movement].speed),
"types.float32": (lambda: car[Movement].rotation),
}
for dtype in dtype_attrs.keys():
other_dtypes = [t for t in dtype_attrs.keys() if t != dtype]
for other_dtype in other_dtypes:
value_attr = dtype_attrs[other_dtype]
if dtype == "types.int64":
car[Meta].id = value_attr()
elif dtype == "types.int32":
car[Meta].version = value_attr()
elif dtype == "types.float64":
car[Movement].speed = value_attr()
elif dtype == "types.float32":
car[Movement].rotation = value_attr()
else:
raise ValueError(f"Data type case not handled: {dtype}")
actual_attr = dtype_attrs[dtype]
assert actual_attr() == 1
def test_e2e_sim_step(sim, client):
sim.step()
controls = sim.entity(components=[Keyboard])
assert controls[Keyboard].left == False
assert controls[Keyboard].right == False
assert controls[Keyboard].up == False
assert controls[Keyboard].left == False
car = sim.entity(components=[Movement, Velocity, Position, Meta])
assert car[Meta].name == "beetle"
assert car[Meta].version == 2
assert car[Meta].id == 512
assert car[Movement].speed == 0.0
assert car[Movement].rotation == 90.0
assert car[Velocity].x == 0.0
assert car[Velocity].y == 0.0
assert car[Position].x == 0.0
assert car[Position].y == 0.0
controls[Keyboard].up = True
controls[Keyboard].left = True
sim.step()
assert controls[Keyboard].left == False
assert controls[Keyboard].up == False
assert car[Movement].speed == 5
assert car[Movement].rotation == 80
controls[Keyboard].down = True
controls[Keyboard].right = True
sim.step()
assert controls[Keyboard].down == False
assert controls[Keyboard].right == False
assert car[Movement].speed == 0
assert car[Movement].rotation == 90
| true | true |
1c4ab5ee42184750c6098b8611c188c2f7b936ac | 7,817 | py | Python | lib/python/treadmill/cli/show.py | drienyov/treadmill | ce21537cd9a2fdb0567ac2aa3de1afcb2f6861de | [
"Apache-2.0"
] | 2 | 2017-10-31T18:48:20.000Z | 2018-03-04T20:35:20.000Z | lib/python/treadmill/cli/show.py | bretttegart/treadmill | 812109e31c503a6eddaee2d3f2e1faf2833b6aaf | [
"Apache-2.0"
] | null | null | null | lib/python/treadmill/cli/show.py | bretttegart/treadmill | 812109e31c503a6eddaee2d3f2e1faf2833b6aaf | [
"Apache-2.0"
] | null | null | null | """Manage Treadmill app manifest.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import click
from six.moves import urllib_parse
from treadmill import cli
from treadmill import restclient
from treadmill import context
from treadmill import utils
_LOGGER = logging.getLogger(__name__)
_FINISHED_STATES = ['finished', 'aborted', 'killed', 'terminated']
_STATE_FORMATTER = cli.make_formatter('instance-state')
_FINISHED_STATE_FORMATTER = cli.make_formatter('instance-finished-state')
_ENDPOINT_FORMATTER = cli.make_formatter('endpoint')
_APP_FORMATTER = cli.make_formatter('app')
def _get_state(apis, match=None, finished=False, partition=None):
"""Get cell state."""
url = '/state/'
query = {}
if match:
query['match'] = match
if finished:
query['finished'] = 'true'
if partition:
query['partition'] = partition
if query:
url += '?' + urllib_parse.urlencode(query)
response = restclient.get(apis, url)
return response.json()
def _show_state(apis, match=None, finished=False, partition=None):
"""Show cell state."""
state = _get_state(apis, match, finished, partition)
cli.out(_STATE_FORMATTER(state))
def _show_finished(apis, match=None, partition=None):
state = _get_state(apis, match=match, finished=True, partition=partition)
result = []
for item in state:
if item['state'] not in _FINISHED_STATES:
continue
details = None
if item.get('exitcode') is not None:
details = 'return code: {}'.format(item['exitcode'])
if item.get('signal') is not None:
details = 'signal: {}'.format(utils.signal2name(item['signal']))
if item.get('aborted_reason'):
details = 'reason: {}'.format(item['aborted_reason'])
if item.get('terminated_reason'):
details = 'reason: {}'.format(item['terminated_reason'])
if item.get('oom'):
details = 'out of memory'
result.append({
'name': item['name'],
'state': item['state'],
'host': item['host'],
'when': utils.strftime_utc(item['when']),
'details': details,
})
cli.out(_FINISHED_STATE_FORMATTER(result))
def _show_list(apis, match, states, finished=False, partition=None):
"""Show list of instnces in given state."""
state = _get_state(apis, match, finished, partition)
names = [item['name'] for item in state if item['state'] in states]
for name in names:
cli.out(name)
def _show_endpoints(apis, pattern, endpoint, proto):
"""Show cell endpoints."""
url = '/endpoint/%s' % urllib_parse.quote(pattern)
if endpoint:
if proto:
url += '/' + proto
else:
url += '/*'
url += '/' + endpoint
response = restclient.get(apis, url)
endpoints = [{
'name': end['name'],
'proto': end['proto'],
'endpoint': end['endpoint'],
'hostport': '{0}:{1}'.format(end['host'], end['port']),
'state': end.get('state')
} for end in response.json()]
cli.out(_ENDPOINT_FORMATTER(endpoints))
def _show_instance(apis, instance_id):
"""Show instance manifest."""
url = '/instance/%s' % urllib_parse.quote(instance_id)
response = restclient.get(apis, url)
cli.out(_APP_FORMATTER(response.json()))
def init():
"""Return top level command handler."""
ctx = {}
@click.group()
@click.option('--cell', required=True,
envvar='TREADMILL_CELL',
callback=cli.handle_context_opt,
expose_value=False)
@click.option('--api', required=False, help='API url to use.',
metavar='URL',
envvar='TREADMILL_STATEAPI')
def show(api):
"""Show state of scheduled applications."""
ctx['api'] = api
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.option('--match', help='Application name pattern match')
@click.option('--finished', is_flag=True, default=False,
help='Show finished instances.')
@click.option('--partition', help='Filter apps by partition')
def state(match, finished, partition):
"""Show state of Treadmill scheduled instances."""
apis = context.GLOBAL.state_api(ctx['api'])
return _show_state(apis, match, finished, partition)
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.option('--match', help='Application name pattern match')
@click.option('--partition', help='Filter apps by partition')
def pending(match, partition):
"""Show pending instances."""
apis = context.GLOBAL.state_api(ctx['api'])
return _show_list(apis, match, ['pending'], partition=partition)
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.option('--match', help='Application name pattern match')
@click.option('--partition', help='Filter apps by partition')
def running(match, partition):
"""Show running instances."""
apis = context.GLOBAL.state_api(ctx['api'])
return _show_list(apis, match, ['running'], partition=partition)
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.option('--match', help='Application name pattern match')
@click.option('--partition', help='Filter apps by partition')
@click.option('--details', is_flag=True, default=False,
help='Show details.')
def finished(match, partition, details):
"""Show finished instances."""
apis = context.GLOBAL.state_api(ctx['api'])
if details:
return _show_finished(apis, match, partition)
return _show_list(
apis, match, _FINISHED_STATES, finished=True, partition=partition
)
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.option('--match', help='Application name pattern match')
@click.option('--partition', help='Filter apps by partition')
def scheduled(match, partition):
"""Show scheduled instances."""
apis = context.GLOBAL.state_api(ctx['api'])
return _show_list(
apis, match, ['running', 'scheduled'], partition=partition
)
@show.command(name='all')
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.option('--match', help='Application name pattern match')
@click.option('--partition', help='Filter apps by partition')
def _all(match, partition):
"""Show scheduled instances."""
apis = context.GLOBAL.state_api(ctx['api'])
return _show_list(
apis,
match,
['pending', 'running', 'scheduled'],
partition=partition
)
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.argument('pattern')
@click.argument('endpoint', required=False)
@click.argument('proto', required=False)
def endpoints(pattern, endpoint, proto):
"""Show application endpoints."""
apis = context.GLOBAL.state_api(ctx['api'])
return _show_endpoints(apis, pattern, endpoint, proto)
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.argument('instance_id')
def instance(instance_id):
"""Show scheduled instance manifest."""
apis = context.GLOBAL.cell_api(ctx['api'])
return _show_instance(apis, instance_id)
del _all
del running
del scheduled
del pending
del finished
del instance
del state
del endpoints
return show
| 32.301653 | 77 | 0.63541 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import click
from six.moves import urllib_parse
from treadmill import cli
from treadmill import restclient
from treadmill import context
from treadmill import utils
_LOGGER = logging.getLogger(__name__)
_FINISHED_STATES = ['finished', 'aborted', 'killed', 'terminated']
_STATE_FORMATTER = cli.make_formatter('instance-state')
_FINISHED_STATE_FORMATTER = cli.make_formatter('instance-finished-state')
_ENDPOINT_FORMATTER = cli.make_formatter('endpoint')
_APP_FORMATTER = cli.make_formatter('app')
def _get_state(apis, match=None, finished=False, partition=None):
url = '/state/'
query = {}
if match:
query['match'] = match
if finished:
query['finished'] = 'true'
if partition:
query['partition'] = partition
if query:
url += '?' + urllib_parse.urlencode(query)
response = restclient.get(apis, url)
return response.json()
def _show_state(apis, match=None, finished=False, partition=None):
state = _get_state(apis, match, finished, partition)
cli.out(_STATE_FORMATTER(state))
def _show_finished(apis, match=None, partition=None):
state = _get_state(apis, match=match, finished=True, partition=partition)
result = []
for item in state:
if item['state'] not in _FINISHED_STATES:
continue
details = None
if item.get('exitcode') is not None:
details = 'return code: {}'.format(item['exitcode'])
if item.get('signal') is not None:
details = 'signal: {}'.format(utils.signal2name(item['signal']))
if item.get('aborted_reason'):
details = 'reason: {}'.format(item['aborted_reason'])
if item.get('terminated_reason'):
details = 'reason: {}'.format(item['terminated_reason'])
if item.get('oom'):
details = 'out of memory'
result.append({
'name': item['name'],
'state': item['state'],
'host': item['host'],
'when': utils.strftime_utc(item['when']),
'details': details,
})
cli.out(_FINISHED_STATE_FORMATTER(result))
def _show_list(apis, match, states, finished=False, partition=None):
state = _get_state(apis, match, finished, partition)
names = [item['name'] for item in state if item['state'] in states]
for name in names:
cli.out(name)
def _show_endpoints(apis, pattern, endpoint, proto):
url = '/endpoint/%s' % urllib_parse.quote(pattern)
if endpoint:
if proto:
url += '/' + proto
else:
url += '/*'
url += '/' + endpoint
response = restclient.get(apis, url)
endpoints = [{
'name': end['name'],
'proto': end['proto'],
'endpoint': end['endpoint'],
'hostport': '{0}:{1}'.format(end['host'], end['port']),
'state': end.get('state')
} for end in response.json()]
cli.out(_ENDPOINT_FORMATTER(endpoints))
def _show_instance(apis, instance_id):
url = '/instance/%s' % urllib_parse.quote(instance_id)
response = restclient.get(apis, url)
cli.out(_APP_FORMATTER(response.json()))
def init():
ctx = {}
@click.group()
@click.option('--cell', required=True,
envvar='TREADMILL_CELL',
callback=cli.handle_context_opt,
expose_value=False)
@click.option('--api', required=False, help='API url to use.',
metavar='URL',
envvar='TREADMILL_STATEAPI')
def show(api):
ctx['api'] = api
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.option('--match', help='Application name pattern match')
@click.option('--finished', is_flag=True, default=False,
help='Show finished instances.')
@click.option('--partition', help='Filter apps by partition')
def state(match, finished, partition):
apis = context.GLOBAL.state_api(ctx['api'])
return _show_state(apis, match, finished, partition)
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.option('--match', help='Application name pattern match')
@click.option('--partition', help='Filter apps by partition')
def pending(match, partition):
apis = context.GLOBAL.state_api(ctx['api'])
return _show_list(apis, match, ['pending'], partition=partition)
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.option('--match', help='Application name pattern match')
@click.option('--partition', help='Filter apps by partition')
def running(match, partition):
apis = context.GLOBAL.state_api(ctx['api'])
return _show_list(apis, match, ['running'], partition=partition)
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.option('--match', help='Application name pattern match')
@click.option('--partition', help='Filter apps by partition')
@click.option('--details', is_flag=True, default=False,
help='Show details.')
def finished(match, partition, details):
apis = context.GLOBAL.state_api(ctx['api'])
if details:
return _show_finished(apis, match, partition)
return _show_list(
apis, match, _FINISHED_STATES, finished=True, partition=partition
)
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.option('--match', help='Application name pattern match')
@click.option('--partition', help='Filter apps by partition')
def scheduled(match, partition):
apis = context.GLOBAL.state_api(ctx['api'])
return _show_list(
apis, match, ['running', 'scheduled'], partition=partition
)
@show.command(name='all')
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.option('--match', help='Application name pattern match')
@click.option('--partition', help='Filter apps by partition')
def _all(match, partition):
apis = context.GLOBAL.state_api(ctx['api'])
return _show_list(
apis,
match,
['pending', 'running', 'scheduled'],
partition=partition
)
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.argument('pattern')
@click.argument('endpoint', required=False)
@click.argument('proto', required=False)
def endpoints(pattern, endpoint, proto):
apis = context.GLOBAL.state_api(ctx['api'])
return _show_endpoints(apis, pattern, endpoint, proto)
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.argument('instance_id')
def instance(instance_id):
apis = context.GLOBAL.cell_api(ctx['api'])
return _show_instance(apis, instance_id)
del _all
del running
del scheduled
del pending
del finished
del instance
del state
del endpoints
return show
| true | true |
1c4ab644d7926e6cc00e6416eef269b9cd3f9640 | 7,055 | py | Python | docs/source/conf.py | dsjoerg/sc2reader | adeb6e3da80e57974b1a29b20e80a02411e693e2 | [
"MIT"
] | 2 | 2016-05-31T14:50:47.000Z | 2021-11-04T20:03:19.000Z | docs/source/conf.py | dsjoerg/sc2reader | adeb6e3da80e57974b1a29b20e80a02411e693e2 | [
"MIT"
] | null | null | null | docs/source/conf.py | dsjoerg/sc2reader | adeb6e3da80e57974b1a29b20e80a02411e693e2 | [
"MIT"
] | 2 | 2017-01-28T09:09:47.000Z | 2017-09-14T14:29:20.000Z | # -*- coding: utf-8 -*-
#
# sc2reader documentation build configuration file, created by
# sphinx-quickstart on Sun May 01 12:39:48 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sc2reader'
copyright = u'2011'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sc2readerdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'sc2reader.tex', u'sc2reader Documentation',
u'Graylin Kim, Bas Peschier', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sc2reader', u'sc2reader Documentation',
[u'Graylin Kim, Bas Peschier'], 1)
]
| 32.511521 | 80 | 0.71949 |
import sys, os
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'sphinx.ext.viewcode']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'sc2reader'
copyright = u'2011'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sc2readerdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'sc2reader.tex', u'sc2reader Documentation',
u'Graylin Kim, Bas Peschier', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sc2reader', u'sc2reader Documentation',
[u'Graylin Kim, Bas Peschier'], 1)
]
| true | true |
1c4ab7312d9e6256798662f4b5f774925bdfd988 | 563 | py | Python | HostsTool/gui/__init__.py | zte-lhg/chromium_org | 6174180179b3c6b71c2d93df68e734cadf6d8d49 | [
"Apache-2.0"
] | null | null | null | HostsTool/gui/__init__.py | zte-lhg/chromium_org | 6174180179b3c6b71c2d93df68e734cadf6d8d49 | [
"Apache-2.0"
] | null | null | null | HostsTool/gui/__init__.py | zte-lhg/chromium_org | 6174180179b3c6b71c2d93df68e734cadf6d8d49 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# __init__.py : Declare modules to be called in gui module.
#
# Copyleft (C) 2014 - huhamhire <[email protected]>
# =====================================================================
# Licensed under the GNU General Public License, version 3. You should
# have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# =====================================================================
from hostsutil import HostsUtil
__all__ = ["HostsUtil"]
| 35.1875 | 71 | 0.53286 |
from hostsutil import HostsUtil
__all__ = ["HostsUtil"]
| true | true |
1c4ab74f30c876baed75a6ac1163521f349e3b56 | 3,383 | py | Python | train.py | haziq9978/PythonChatbot | 8eb77140b32a4c6770dab20d4e26be03504ac5ee | [
"MIT"
] | 2 | 2021-01-04T16:23:07.000Z | 2021-01-05T03:25:19.000Z | train.py | haziq9978/PythonChatbot | 8eb77140b32a4c6770dab20d4e26be03504ac5ee | [
"MIT"
] | null | null | null | train.py | haziq9978/PythonChatbot | 8eb77140b32a4c6770dab20d4e26be03504ac5ee | [
"MIT"
] | 1 | 2021-01-04T16:28:57.000Z | 2021-01-04T16:28:57.000Z | import numpy as np
import random
import json
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from nltk_utils import bag_of_words, tokenize, stem
from model import NeuralNet
with open('dataCombine.json', 'r') as f:
intents = json.load(f)
all_words = []
tags = []
xy = []
# loop through each sentence in our intents patterns
for intent in intents['intents']:
tag = intent['tag']
# add to tag list
tags.append(tag)
for pattern in intent['patterns']:
# tokenize each word in the sentence
w = tokenize(pattern)
# add to our words list
all_words.extend(w)
# add to xy pair
xy.append((w, tag))
# stem and lower each word
ignore_words = ['?', '.', '!']
all_words = [stem(w) for w in all_words if w not in ignore_words]
# remove duplicates and sort
all_words = sorted(set(all_words))
tags = sorted(set(tags))
print(len(xy), "patterns")
print(len(tags), "tags:", tags)
print(len(all_words), "unique stemmed words:", all_words)
# create training data
X_train = []
y_train = []
for (pattern_sentence, tag) in xy:
# X: bag of words for each pattern_sentence
bag = bag_of_words(pattern_sentence, all_words)
X_train.append(bag)
# y: PyTorch CrossEntropyLoss needs only class labels, not one-hot
label = tags.index(tag)
y_train.append(label)
X_train = np.array(X_train)
y_train = np.array(y_train)
# Hyper-parameters
num_epochs = 1000
batch_size = 8
learning_rate = 0.001
input_size = len(X_train[0])
hidden_size = 8
output_size = len(tags)
print(input_size, output_size)
class ChatDataset(Dataset):
def __init__(self):
self.n_samples = len(X_train)
self.x_data = X_train
self.y_data = y_train
# support indexing such that dataset[i] can be used to get i-th sample
def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
# we can call len(dataset) to return the size
def __len__(self):
return self.n_samples
dataset = ChatDataset()
train_loader = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=0)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = NeuralNet(input_size, hidden_size, output_size).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
for epoch in range(num_epochs):
for (words, labels) in train_loader:
words = words.to(device)
labels = labels.to(dtype=torch.long).to(device)
# Forward pass
outputs = model(words)
# if y would be one-hot, we must apply
# labels = torch.max(labels, 1)[1]
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch+1) % 10 == 0:
print (f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.7f}')
print(f'final loss: {loss.item():.4f}')
data = {
"model_state": model.state_dict(),
"input_size": input_size,
"hidden_size": hidden_size,
"output_size": output_size,
"all_words": all_words,
"tags": tags
}
FILE = "data.pth"
torch.save(data, FILE)
print(f'training complete. file saved to {FILE}')
| 26.023077 | 74 | 0.657996 | import numpy as np
import random
import json
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from nltk_utils import bag_of_words, tokenize, stem
from model import NeuralNet
with open('dataCombine.json', 'r') as f:
intents = json.load(f)
all_words = []
tags = []
xy = []
for intent in intents['intents']:
tag = intent['tag']
tags.append(tag)
for pattern in intent['patterns']:
w = tokenize(pattern)
all_words.extend(w)
xy.append((w, tag))
ignore_words = ['?', '.', '!']
all_words = [stem(w) for w in all_words if w not in ignore_words]
all_words = sorted(set(all_words))
tags = sorted(set(tags))
print(len(xy), "patterns")
print(len(tags), "tags:", tags)
print(len(all_words), "unique stemmed words:", all_words)
X_train = []
y_train = []
for (pattern_sentence, tag) in xy:
bag = bag_of_words(pattern_sentence, all_words)
X_train.append(bag)
label = tags.index(tag)
y_train.append(label)
X_train = np.array(X_train)
y_train = np.array(y_train)
num_epochs = 1000
batch_size = 8
learning_rate = 0.001
input_size = len(X_train[0])
hidden_size = 8
output_size = len(tags)
print(input_size, output_size)
class ChatDataset(Dataset):
def __init__(self):
self.n_samples = len(X_train)
self.x_data = X_train
self.y_data = y_train
def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
def __len__(self):
return self.n_samples
dataset = ChatDataset()
train_loader = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=0)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = NeuralNet(input_size, hidden_size, output_size).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
for epoch in range(num_epochs):
for (words, labels) in train_loader:
words = words.to(device)
labels = labels.to(dtype=torch.long).to(device)
outputs = model(words)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch+1) % 10 == 0:
print (f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.7f}')
print(f'final loss: {loss.item():.4f}')
data = {
"model_state": model.state_dict(),
"input_size": input_size,
"hidden_size": hidden_size,
"output_size": output_size,
"all_words": all_words,
"tags": tags
}
FILE = "data.pth"
torch.save(data, FILE)
print(f'training complete. file saved to {FILE}')
| true | true |
1c4ab7a54084ae0c5afba16f8fe6659ad81d8e17 | 3,924 | py | Python | tools/similarity.py | whxf/nlp_api | a63b67287e9a90381cac14bb1c5b723ccbeb14a3 | [
"MIT"
] | 13 | 2019-11-06T02:37:28.000Z | 2022-03-21T06:16:18.000Z | tools/similarity.py | whxf/nlp_api | a63b67287e9a90381cac14bb1c5b723ccbeb14a3 | [
"MIT"
] | null | null | null | tools/similarity.py | whxf/nlp_api | a63b67287e9a90381cac14bb1c5b723ccbeb14a3 | [
"MIT"
] | 4 | 2020-05-07T10:49:32.000Z | 2021-12-31T04:03:19.000Z | """
@author: Li Xi
@file: similarity.py
@time: 2019/10/30 15:37
@desc:
计算文本相似度:
1. WordMoverDistance 基于词移距离的文本相似度计算 【比较文档的相似度】
2. WordVectorSimilarity word-vector的句子相似度计算 【比较句子的相似度】
注意事项:
* 两种方法都需要输入句子分词之后的结果,类型需要时list
* 为提升效率/效果,可对分词结果进行处理,如去除停用词等
* 具体使用方法见文件的最下
* 可自定义加载词向量文件
"""
import os
import gensim
import numpy as np
from tools.segment import LtpSegment
class WordMoverDistance(object):
"""词移距离 Word Mover's Distance"""
__vector_path = os.path.join("source", "sgns.renmin.word.bz2")
word2vec_model = gensim.models.KeyedVectors.load_word2vec_format(__vector_path)
word2vec_model.init_sims(replace=True) # normalizes vectors
def distance(self, tokens1, tokens2):
"""
计算词移距离
!!!: 这里需要输入句子的分词后结果
:param tokens1: [list]
:param tokens2: [list]
:return: score 值
"""
distance = self.word2vec_model.wmdistance(tokens1, tokens2)
return distance
class WordVectorSimilarity(object):
"""
基于word-vector的句子相似度计算(余弦相似度)
!!!: 不仅可以使用词向量也可使用字向量
"""
__vector_path = os.path.join("source", "sgns.renmin.word.bz2")
word2vec_model = gensim.models.KeyedVectors.load_word2vec_format(__vector_path)
def __init__(self, vector_dim=300):
"""
:param vector_dim: 词向量的维度
"""
self.vector_dim = vector_dim
def get_word_vector(self, word):
"""
获取词的词向量,如果没有找到,返回全零的embedding
:param word:
:return:
"""
try:
return self.word2vec_model[word]
except:
return np.zeros(self.vector_dim)
def similarity_cosine(self, tokens1, tokens2):
"""
计算句子的余弦相似度,其中句子向量等于字符向量求平均
!!!: 这里需要输入句子的分词后结果
:param tokens1:
:param tokens2:
:return:
"""
# 求 sentence1 的向量表示
sentence1 = np.zeros(self.vector_dim)
for _token in tokens1:
sentence1 += self.get_word_vector(_token)
sentence1 = sentence1 / len(tokens1)
# 求 sentence2 的向量表示
sentence2 = np.zeros(self.vector_dim)
for _token in tokens2:
sentence2 += self.get_word_vector(_token)
sentence2 = sentence2 / len(tokens2)
# 余弦相似度计算公式 sim = sum(a*b) / { sum[ sqrt(a^2) ] * sum[ sqrt(b^2) ] }
cos1 = np.sum(sentence1 * sentence2)
cos21 = np.sqrt(sum(sentence1 ** 2))
cos22 = np.sqrt(sum(sentence2 ** 2))
similarity = cos1 / float(cos21 * cos22)
return similarity
def distance(self, tokens1, tokens2):
"""
计算 WordVectorSimilarity
!!!: 这里需要输入句子的分词后结果
:param tokens1:
:param tokens2:
:return:
"""
return self.similarity_cosine(tokens1, tokens2)
if __name__ == "__main__":
# -------- Begin WordMoverDistance Test --------
# 初始化 WordMoverDistance
sim = WordMoverDistance()
# 初始化 LTP 用于分词
ltp = LtpSegment()
str1 = ltp.segment("我是中国人,我深爱着我的祖国") # 分词结果为list
str2 = ltp.segment("中国是我的母亲,我热爱她")
print("相似度:{}".format(sim.distance(str1, str2)))
# 相似度:0.5040331478972442
str1 = ltp.segment("小勇硕士毕业于北京语言大学,目前在中科院软件所工作")
str2 = ltp.segment("大方博士就读于首都师范大学,未来不知道会在哪里上班")
print("相似度:{}".format(sim.distance(str1, str2)))
# 相似度:0.8857186341563674
# -------- End WordMoverDistance Test --------
# -------- Begin WordVectorSimilarity Test --------
# 初始化 WordVectorSimilarity
sim = WordVectorSimilarity()
# 初始化 LTP 用于分词
ltp = LtpSegment()
str1 = ltp.segment("我是中国人,我深爱着我的祖国") # 分词结果为list
str2 = ltp.segment("中国是我的母亲,我热爱她")
print("相似度:{}".format(sim.distance(str1, str2)))
# 相似度:0.9048935250581785
str1 = ltp.segment("小勇硕士毕业于北京语言大学,目前在中科院软件所工作")
str2 = ltp.segment("大方博士就读于首都师范大学,未来不知道会在哪里上班")
print("相似度:{}".format(sim.distance(str1, str2)))
# 相似度:0.812708497722071
# -------- End WordVectorSimilarity Test --------
| 28.230216 | 83 | 0.622579 | import os
import gensim
import numpy as np
from tools.segment import LtpSegment
class WordMoverDistance(object):
__vector_path = os.path.join("source", "sgns.renmin.word.bz2")
word2vec_model = gensim.models.KeyedVectors.load_word2vec_format(__vector_path)
word2vec_model.init_sims(replace=True)
def distance(self, tokens1, tokens2):
distance = self.word2vec_model.wmdistance(tokens1, tokens2)
return distance
class WordVectorSimilarity(object):
__vector_path = os.path.join("source", "sgns.renmin.word.bz2")
word2vec_model = gensim.models.KeyedVectors.load_word2vec_format(__vector_path)
def __init__(self, vector_dim=300):
self.vector_dim = vector_dim
def get_word_vector(self, word):
try:
return self.word2vec_model[word]
except:
return np.zeros(self.vector_dim)
def similarity_cosine(self, tokens1, tokens2):
sentence1 = np.zeros(self.vector_dim)
for _token in tokens1:
sentence1 += self.get_word_vector(_token)
sentence1 = sentence1 / len(tokens1)
sentence2 = np.zeros(self.vector_dim)
for _token in tokens2:
sentence2 += self.get_word_vector(_token)
sentence2 = sentence2 / len(tokens2)
cos1 = np.sum(sentence1 * sentence2)
cos21 = np.sqrt(sum(sentence1 ** 2))
cos22 = np.sqrt(sum(sentence2 ** 2))
similarity = cos1 / float(cos21 * cos22)
return similarity
def distance(self, tokens1, tokens2):
return self.similarity_cosine(tokens1, tokens2)
if __name__ == "__main__":
sim = WordMoverDistance()
ltp = LtpSegment()
str1 = ltp.segment("我是中国人,我深爱着我的祖国") str2 = ltp.segment("中国是我的母亲,我热爱她")
print("相似度:{}".format(sim.distance(str1, str2)))
str1 = ltp.segment("小勇硕士毕业于北京语言大学,目前在中科院软件所工作")
str2 = ltp.segment("大方博士就读于首都师范大学,未来不知道会在哪里上班")
print("相似度:{}".format(sim.distance(str1, str2)))
sim = WordVectorSimilarity()
ltp = LtpSegment()
str1 = ltp.segment("我是中国人,我深爱着我的祖国") str2 = ltp.segment("中国是我的母亲,我热爱她")
print("相似度:{}".format(sim.distance(str1, str2)))
str1 = ltp.segment("小勇硕士毕业于北京语言大学,目前在中科院软件所工作")
str2 = ltp.segment("大方博士就读于首都师范大学,未来不知道会在哪里上班")
print("相似度:{}".format(sim.distance(str1, str2)))
| true | true |
1c4ab877b9f249b4301cd5d7ac6137a0a46850c9 | 1,339 | py | Python | tests/integration_tests/ring_managers_tests/test_horizontals_at_top_scanbeam.py | synapticarbors/wagyu | b98354611dceda8888f2951e9704f843a4e88c27 | [
"MIT"
] | 1 | 2021-01-20T05:49:13.000Z | 2021-01-20T05:49:13.000Z | tests/integration_tests/ring_managers_tests/test_horizontals_at_top_scanbeam.py | synapticarbors/wagyu | b98354611dceda8888f2951e9704f843a4e88c27 | [
"MIT"
] | 1 | 2020-11-20T18:21:24.000Z | 2020-11-20T18:21:37.000Z | tests/integration_tests/ring_managers_tests/test_horizontals_at_top_scanbeam.py | synapticarbors/wagyu | b98354611dceda8888f2951e9704f843a4e88c27 | [
"MIT"
] | 2 | 2020-11-20T18:17:31.000Z | 2021-01-20T14:58:22.000Z | from typing import Tuple
from hypothesis import given
from tests.integration_tests.utils import (
BoundPortedBoundsListsPair,
BoundPortedRingManagersPair,
are_bound_ported_bounds_lists_equal,
are_bound_ported_ring_managers_equal)
from tests.utils import equivalence
from wagyu.hints import Coordinate
from . import strategies
@given(strategies.ring_managers_pairs,
strategies.coordinates,
strategies.non_empty_initialized_bounds_lists_pairs_indices)
def test_basic(pair: BoundPortedRingManagersPair,
top_y: Coordinate,
active_bounds_pair_index: Tuple[BoundPortedBoundsListsPair, int]
) -> None:
bound, ported = pair
(bound_active_bounds,
ported_active_bounds), index = active_bounds_pair_index
(bound_active_bounds, bound_index,
bound_result) = bound.horizontals_at_top_scanbeam(
top_y, bound_active_bounds, index)
ported_index, ported_result = ported.horizontals_at_top_scanbeam(
top_y, ported_active_bounds, index)
assert equivalence(bound_result, ported_result)
assert bound_index == ported_index
assert are_bound_ported_bounds_lists_equal(bound_active_bounds,
ported_active_bounds)
assert are_bound_ported_ring_managers_equal(bound, ported)
| 36.189189 | 79 | 0.746826 | from typing import Tuple
from hypothesis import given
from tests.integration_tests.utils import (
BoundPortedBoundsListsPair,
BoundPortedRingManagersPair,
are_bound_ported_bounds_lists_equal,
are_bound_ported_ring_managers_equal)
from tests.utils import equivalence
from wagyu.hints import Coordinate
from . import strategies
@given(strategies.ring_managers_pairs,
strategies.coordinates,
strategies.non_empty_initialized_bounds_lists_pairs_indices)
def test_basic(pair: BoundPortedRingManagersPair,
top_y: Coordinate,
active_bounds_pair_index: Tuple[BoundPortedBoundsListsPair, int]
) -> None:
bound, ported = pair
(bound_active_bounds,
ported_active_bounds), index = active_bounds_pair_index
(bound_active_bounds, bound_index,
bound_result) = bound.horizontals_at_top_scanbeam(
top_y, bound_active_bounds, index)
ported_index, ported_result = ported.horizontals_at_top_scanbeam(
top_y, ported_active_bounds, index)
assert equivalence(bound_result, ported_result)
assert bound_index == ported_index
assert are_bound_ported_bounds_lists_equal(bound_active_bounds,
ported_active_bounds)
assert are_bound_ported_ring_managers_equal(bound, ported)
| true | true |
1c4ab8eaeea9b0696f105daca4407d3d104a98ea | 7,518 | py | Python | train_InfoGAN1.py | AnonymousExplorer/Conditional-GANs-Pytorch | 6c15ec67217156d6f041e34efe29ab62f9ef7c7d | [
"MIT"
] | 40 | 2018-12-11T02:14:19.000Z | 2022-03-19T06:16:26.000Z | train_InfoGAN1.py | AnonymousExplorer/Conditional-GANs-Pytorch | 6c15ec67217156d6f041e34efe29ab62f9ef7c7d | [
"MIT"
] | null | null | null | train_InfoGAN1.py | AnonymousExplorer/Conditional-GANs-Pytorch | 6c15ec67217156d6f041e34efe29ab62f9ef7c7d | [
"MIT"
] | 19 | 2019-03-21T19:11:14.000Z | 2022-01-17T05:54:13.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import model
import numpy as np
import pylib
import PIL.Image as Image
import tensorboardX
import torch
import torchvision
import torchvision.datasets as dsets
import torchvision.transforms as tforms
import torchlib
# ==============================================================================
# = param =
# ==============================================================================
# command line arguments
parser = argparse.ArgumentParser()
# model
parser.add_argument('--z_dim', dest='z_dim', type=int, default=100)
# training
parser.add_argument('--epoch', dest='epoch', type=int, default=50)
parser.add_argument('--batch_size', dest='batch_size', type=int, default=64)
parser.add_argument('--d_learning_rate', dest='d_learning_rate', type=float, default=0.0002)
parser.add_argument('--g_learning_rate', dest='g_learning_rate', type=float, default=0.001)
parser.add_argument('--n_d', dest='n_d', type=int, help='# of d updates per g update', default=1)
parser.add_argument('--loss_mode', dest='loss_mode', choices=['gan', 'lsgan', 'wgan', 'hinge_v1', 'hinge_v2'], default='hinge_v2')
parser.add_argument('--gp_mode', dest='gp_mode', choices=['none', 'dragan', 'wgan-gp'], default='none')
parser.add_argument('--gp_coef', dest='gp_coef', type=float, default=1.0)
parser.add_argument('--norm', dest='norm', choices=['none', 'batch_norm', 'instance_norm'], default='none')
parser.add_argument('--weight_norm', dest='weight_norm', choices=['none', 'spectral_norm', 'weight_norm'], default='spectral_norm')
# others
parser.add_argument('--experiment_name', dest='experiment_name', default='InfoGAN1_default')
# parse arguments
args = parser.parse_args()
# model
z_dim = args.z_dim
# training
epoch = args.epoch
batch_size = args.batch_size
d_learning_rate = args.d_learning_rate
g_learning_rate = args.g_learning_rate
n_d = args.n_d
loss_mode = args.loss_mode
gp_mode = args.gp_mode
gp_coef = args.gp_coef
norm = args.norm
weight_norm = args.weight_norm
# ohters
experiment_name = args.experiment_name
# save settings
pylib.mkdir('./output/%s' % experiment_name)
with open('./output/%s/setting.txt' % experiment_name, 'w') as f:
f.write(json.dumps(vars(args), indent=4, separators=(',', ':')))
# others
use_gpu = torch.cuda.is_available()
device = torch.device("cuda" if use_gpu else "cpu")
c_dim = 10
# ==============================================================================
# = setting =
# ==============================================================================
# data
transform = tforms.Compose(
[tforms.Scale(size=(32, 32), interpolation=Image.BICUBIC),
tforms.ToTensor(),
tforms.Lambda(lambda x: torch.cat((x, x, x), dim=0)),
tforms.Normalize(mean=[0.5] * 3, std=[0.5] * 3)]
)
train_loader = torch.utils.data.DataLoader(
dataset=dsets.FashionMNIST('data/FashionMNIST', train=True, download=True, transform=transform),
batch_size=batch_size,
shuffle=True,
num_workers=4,
pin_memory=use_gpu,
drop_last=True
)
# model
D = model.DiscriminatorInfoGAN1(x_dim=3, c_dim=c_dim, norm=norm, weight_norm=weight_norm).to(device)
G = model.GeneratorInfoGAN1(z_dim=z_dim, c_dim=c_dim).to(device)
# gan loss function
d_loss_fn, g_loss_fn = model.get_losses_fn(loss_mode)
# optimizer
d_optimizer = torch.optim.Adam(D.parameters(), lr=d_learning_rate, betas=(0.5, 0.999))
g_optimizer = torch.optim.Adam(G.parameters(), lr=g_learning_rate, betas=(0.5, 0.999))
# ==============================================================================
# = train =
# ==============================================================================
# load checkpoint
ckpt_dir = './output/%s/checkpoints' % experiment_name
pylib.mkdir(ckpt_dir)
try:
ckpt = torchlib.load_checkpoint(ckpt_dir)
start_ep = ckpt['epoch']
D.load_state_dict(ckpt['D'])
G.load_state_dict(ckpt['G'])
d_optimizer.load_state_dict(ckpt['d_optimizer'])
g_optimizer.load_state_dict(ckpt['g_optimizer'])
except:
print(' [*] No checkpoint!')
start_ep = 0
# writer
writer = tensorboardX.SummaryWriter('./output/%s/summaries' % experiment_name)
# run
z_sample = torch.randn(c_dim * 10, z_dim).to(device)
c_sample = torch.tensor(np.concatenate([np.eye(c_dim)] * 10), dtype=z_sample.dtype).to(device)
for ep in range(start_ep, epoch):
for i, (x, _) in enumerate(train_loader):
step = ep * len(train_loader) + i + 1
D.train()
G.train()
# train D and Q
x = x.to(device)
c_dense = torch.tensor(np.random.randint(c_dim, size=[batch_size])).to(device)
z = torch.randn(batch_size, z_dim).to(device)
c = torch.tensor(np.eye(c_dim)[c_dense.cpu().numpy()], dtype=z.dtype).to(device)
x_f = G(z, c).detach()
x_gan_logit, _ = D(x)
x_f_gan_logit, x_f_c_logit = D(x_f)
d_x_gan_loss, d_x_f_gan_loss = d_loss_fn(x_gan_logit, x_f_gan_logit)
d_x_f_c_logit = torch.nn.functional.cross_entropy(x_f_c_logit, c_dense)
gp = model.gradient_penalty(D, x, x_f, mode=gp_mode)
d_loss = d_x_gan_loss + d_x_f_gan_loss + gp * gp_coef + d_x_f_c_logit
D.zero_grad()
d_loss.backward()
d_optimizer.step()
writer.add_scalar('D/d_gan_loss', (d_x_gan_loss + d_x_f_gan_loss).data.cpu().numpy(), global_step=step)
writer.add_scalar('D/d_q_loss', d_x_f_c_logit.data.cpu().numpy(), global_step=step)
writer.add_scalar('D/gp', gp.data.cpu().numpy(), global_step=step)
# train G
if step % n_d == 0:
c_dense = torch.tensor(np.random.randint(c_dim, size=[batch_size])).to(device)
c = torch.tensor(np.eye(c_dim)[c_dense.cpu().numpy()], dtype=z.dtype).to(device)
z = torch.randn(batch_size, z_dim).to(device)
x_f = G(z, c)
x_f_gan_logit, x_f_c_logit = D(x_f)
g_gan_loss = g_loss_fn(x_f_gan_logit)
d_x_f_c_logit = torch.nn.functional.cross_entropy(x_f_c_logit, c_dense)
g_loss = g_gan_loss + d_x_f_c_logit
G.zero_grad()
g_loss.backward()
g_optimizer.step()
writer.add_scalar('G/g_gan_loss', g_gan_loss.data.cpu().numpy(), global_step=step)
writer.add_scalar('G/g_q_loss', d_x_f_c_logit.data.cpu().numpy(), global_step=step)
# display
if step % 1 == 0:
print("Epoch: (%3d) (%5d/%5d)" % (ep, i + 1, len(train_loader)))
# sample
if step % 100 == 0:
G.eval()
x_f_sample = (G(z_sample, c_sample) + 1) / 2.0
save_dir = './output/%s/sample_training' % experiment_name
pylib.mkdir(save_dir)
torchvision.utils.save_image(x_f_sample, '%s/Epoch_(%d)_(%dof%d).jpg' % (save_dir, ep, i + 1, len(train_loader)), nrow=10)
torchlib.save_checkpoint({'epoch': ep + 1,
'D': D.state_dict(),
'G': G.state_dict(),
'd_optimizer': d_optimizer.state_dict(),
'g_optimizer': g_optimizer.state_dict()},
'%s/Epoch_(%d).ckpt' % (ckpt_dir, ep + 1),
max_keep=2)
| 38.357143 | 134 | 0.600958 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import model
import numpy as np
import pylib
import PIL.Image as Image
import tensorboardX
import torch
import torchvision
import torchvision.datasets as dsets
import torchvision.transforms as tforms
import torchlib
parser = argparse.ArgumentParser()
parser.add_argument('--z_dim', dest='z_dim', type=int, default=100)
parser.add_argument('--epoch', dest='epoch', type=int, default=50)
parser.add_argument('--batch_size', dest='batch_size', type=int, default=64)
parser.add_argument('--d_learning_rate', dest='d_learning_rate', type=float, default=0.0002)
parser.add_argument('--g_learning_rate', dest='g_learning_rate', type=float, default=0.001)
parser.add_argument('--n_d', dest='n_d', type=int, help='# of d updates per g update', default=1)
parser.add_argument('--loss_mode', dest='loss_mode', choices=['gan', 'lsgan', 'wgan', 'hinge_v1', 'hinge_v2'], default='hinge_v2')
parser.add_argument('--gp_mode', dest='gp_mode', choices=['none', 'dragan', 'wgan-gp'], default='none')
parser.add_argument('--gp_coef', dest='gp_coef', type=float, default=1.0)
parser.add_argument('--norm', dest='norm', choices=['none', 'batch_norm', 'instance_norm'], default='none')
parser.add_argument('--weight_norm', dest='weight_norm', choices=['none', 'spectral_norm', 'weight_norm'], default='spectral_norm')
parser.add_argument('--experiment_name', dest='experiment_name', default='InfoGAN1_default')
args = parser.parse_args()
z_dim = args.z_dim
epoch = args.epoch
batch_size = args.batch_size
d_learning_rate = args.d_learning_rate
g_learning_rate = args.g_learning_rate
n_d = args.n_d
loss_mode = args.loss_mode
gp_mode = args.gp_mode
gp_coef = args.gp_coef
norm = args.norm
weight_norm = args.weight_norm
experiment_name = args.experiment_name
pylib.mkdir('./output/%s' % experiment_name)
with open('./output/%s/setting.txt' % experiment_name, 'w') as f:
f.write(json.dumps(vars(args), indent=4, separators=(',', ':')))
use_gpu = torch.cuda.is_available()
device = torch.device("cuda" if use_gpu else "cpu")
c_dim = 10
transform = tforms.Compose(
[tforms.Scale(size=(32, 32), interpolation=Image.BICUBIC),
tforms.ToTensor(),
tforms.Lambda(lambda x: torch.cat((x, x, x), dim=0)),
tforms.Normalize(mean=[0.5] * 3, std=[0.5] * 3)]
)
train_loader = torch.utils.data.DataLoader(
dataset=dsets.FashionMNIST('data/FashionMNIST', train=True, download=True, transform=transform),
batch_size=batch_size,
shuffle=True,
num_workers=4,
pin_memory=use_gpu,
drop_last=True
)
D = model.DiscriminatorInfoGAN1(x_dim=3, c_dim=c_dim, norm=norm, weight_norm=weight_norm).to(device)
G = model.GeneratorInfoGAN1(z_dim=z_dim, c_dim=c_dim).to(device)
d_loss_fn, g_loss_fn = model.get_losses_fn(loss_mode)
d_optimizer = torch.optim.Adam(D.parameters(), lr=d_learning_rate, betas=(0.5, 0.999))
g_optimizer = torch.optim.Adam(G.parameters(), lr=g_learning_rate, betas=(0.5, 0.999))
ckpt_dir = './output/%s/checkpoints' % experiment_name
pylib.mkdir(ckpt_dir)
try:
ckpt = torchlib.load_checkpoint(ckpt_dir)
start_ep = ckpt['epoch']
D.load_state_dict(ckpt['D'])
G.load_state_dict(ckpt['G'])
d_optimizer.load_state_dict(ckpt['d_optimizer'])
g_optimizer.load_state_dict(ckpt['g_optimizer'])
except:
print(' [*] No checkpoint!')
start_ep = 0
writer = tensorboardX.SummaryWriter('./output/%s/summaries' % experiment_name)
z_sample = torch.randn(c_dim * 10, z_dim).to(device)
c_sample = torch.tensor(np.concatenate([np.eye(c_dim)] * 10), dtype=z_sample.dtype).to(device)
for ep in range(start_ep, epoch):
for i, (x, _) in enumerate(train_loader):
step = ep * len(train_loader) + i + 1
D.train()
G.train()
x = x.to(device)
c_dense = torch.tensor(np.random.randint(c_dim, size=[batch_size])).to(device)
z = torch.randn(batch_size, z_dim).to(device)
c = torch.tensor(np.eye(c_dim)[c_dense.cpu().numpy()], dtype=z.dtype).to(device)
x_f = G(z, c).detach()
x_gan_logit, _ = D(x)
x_f_gan_logit, x_f_c_logit = D(x_f)
d_x_gan_loss, d_x_f_gan_loss = d_loss_fn(x_gan_logit, x_f_gan_logit)
d_x_f_c_logit = torch.nn.functional.cross_entropy(x_f_c_logit, c_dense)
gp = model.gradient_penalty(D, x, x_f, mode=gp_mode)
d_loss = d_x_gan_loss + d_x_f_gan_loss + gp * gp_coef + d_x_f_c_logit
D.zero_grad()
d_loss.backward()
d_optimizer.step()
writer.add_scalar('D/d_gan_loss', (d_x_gan_loss + d_x_f_gan_loss).data.cpu().numpy(), global_step=step)
writer.add_scalar('D/d_q_loss', d_x_f_c_logit.data.cpu().numpy(), global_step=step)
writer.add_scalar('D/gp', gp.data.cpu().numpy(), global_step=step)
if step % n_d == 0:
c_dense = torch.tensor(np.random.randint(c_dim, size=[batch_size])).to(device)
c = torch.tensor(np.eye(c_dim)[c_dense.cpu().numpy()], dtype=z.dtype).to(device)
z = torch.randn(batch_size, z_dim).to(device)
x_f = G(z, c)
x_f_gan_logit, x_f_c_logit = D(x_f)
g_gan_loss = g_loss_fn(x_f_gan_logit)
d_x_f_c_logit = torch.nn.functional.cross_entropy(x_f_c_logit, c_dense)
g_loss = g_gan_loss + d_x_f_c_logit
G.zero_grad()
g_loss.backward()
g_optimizer.step()
writer.add_scalar('G/g_gan_loss', g_gan_loss.data.cpu().numpy(), global_step=step)
writer.add_scalar('G/g_q_loss', d_x_f_c_logit.data.cpu().numpy(), global_step=step)
if step % 1 == 0:
print("Epoch: (%3d) (%5d/%5d)" % (ep, i + 1, len(train_loader)))
if step % 100 == 0:
G.eval()
x_f_sample = (G(z_sample, c_sample) + 1) / 2.0
save_dir = './output/%s/sample_training' % experiment_name
pylib.mkdir(save_dir)
torchvision.utils.save_image(x_f_sample, '%s/Epoch_(%d)_(%dof%d).jpg' % (save_dir, ep, i + 1, len(train_loader)), nrow=10)
torchlib.save_checkpoint({'epoch': ep + 1,
'D': D.state_dict(),
'G': G.state_dict(),
'd_optimizer': d_optimizer.state_dict(),
'g_optimizer': g_optimizer.state_dict()},
'%s/Epoch_(%d).ckpt' % (ckpt_dir, ep + 1),
max_keep=2)
| true | true |
1c4abab21c61cff4175293662effd5d6d19d1025 | 715 | py | Python | test.py | Yvictor/simdjson | 2e43ea714a75def3b55f0d6033acb36e31c6497b | [
"Apache-2.0"
] | null | null | null | test.py | Yvictor/simdjson | 2e43ea714a75def3b55f0d6033acb36e31c6497b | [
"Apache-2.0"
] | null | null | null | test.py | Yvictor/simdjson | 2e43ea714a75def3b55f0d6033acb36e31c6497b | [
"Apache-2.0"
] | null | null | null | import sjson
import json
import pytest
test_case = [
'{"a": 1}',
'{"a": 1.1}',
'{"a": null}',
'{"a": "string test"}',
'{"a": true}',
'{"a": false}',
'{"a": 1, "b": 2}',
'{"a": 1, "b": 2, "c": 3}',
'{"a": 1, "b": 2, "c": 3, "d": 1.1}',
'{"a": [1, 1.1], "b": 2}',
'{"a": [1, 1.1], "b": {"nest": "a"} }',
'{"a": [1, 1.1], "b": {"nest": [1, 3, 5]} }',
'{"a": [1, 1.1], "b": {"nest": {"d": 1} } }',
'{"a": [1, 1.1], "b": {"nest": {"d": [1, 3, 2.1]} } }',
]
@pytest.mark.parametrize('json_string', test_case)
def test_json_loads(json_string):
assert sjson.loads(json_string) == json.loads(json_string)
if __name__ == "__main__":
pytest.main([__file__]) | 24.655172 | 62 | 0.418182 | import sjson
import json
import pytest
test_case = [
'{"a": 1}',
'{"a": 1.1}',
'{"a": null}',
'{"a": "string test"}',
'{"a": true}',
'{"a": false}',
'{"a": 1, "b": 2}',
'{"a": 1, "b": 2, "c": 3}',
'{"a": 1, "b": 2, "c": 3, "d": 1.1}',
'{"a": [1, 1.1], "b": 2}',
'{"a": [1, 1.1], "b": {"nest": "a"} }',
'{"a": [1, 1.1], "b": {"nest": [1, 3, 5]} }',
'{"a": [1, 1.1], "b": {"nest": {"d": 1} } }',
'{"a": [1, 1.1], "b": {"nest": {"d": [1, 3, 2.1]} } }',
]
@pytest.mark.parametrize('json_string', test_case)
def test_json_loads(json_string):
assert sjson.loads(json_string) == json.loads(json_string)
if __name__ == "__main__":
pytest.main([__file__]) | true | true |
1c4abbc45219aa0b02fb8ac79f287143752f95fa | 2,004 | py | Python | netket/hilbert/random/particle.py | VolodyaCO/netket | 629e885212d981d7748d155310abca4a1f9d5481 | [
"Apache-2.0"
] | null | null | null | netket/hilbert/random/particle.py | VolodyaCO/netket | 629e885212d981d7748d155310abca4a1f9d5481 | [
"Apache-2.0"
] | 26 | 2021-08-06T15:27:57.000Z | 2022-03-30T16:55:18.000Z | netket/hilbert/random/particle.py | VolodyaCO/netket | 629e885212d981d7748d155310abca4a1f9d5481 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jax
from jax import numpy as jnp
from netket.hilbert import Particle
from netket.utils.dispatch import dispatch
@dispatch
def random_state(hilb: Particle, key, batches: int, *, dtype):
"""Positions particles w.r.t. normal distribution,
if no periodic boundary conditions are applied
in a spatial dimension. Otherwise the particles are
positioned evenly along the box from 0 to L, with Gaussian noise
of certain width."""
pbc = jnp.array(hilb.n_particles * hilb.pbc)
boundary = jnp.tile(pbc, (batches, 1))
Ls = jnp.array(hilb.n_particles * hilb.extent)
modulus = jnp.where(jnp.equal(pbc, False), jnp.inf, Ls)
gaussian = jax.random.normal(key, shape=(batches, hilb.size))
width = jnp.min(modulus) / (4.0 * hilb.n_particles)
# The width gives the noise level. In the periodic case the
# particles are evenly distributed between 0 and min(L). The
# distance between the particles coordinates is therefore given by
# min(L) / hilb.N. To avoid particles to have coincident
# positions the noise level should be smaller than half this distance.
# We choose width = min(L) / (4*hilb.N)
noise = gaussian * width
uniform = jnp.tile(jnp.linspace(0.0, jnp.min(modulus), hilb.size), (batches, 1))
rs = jnp.where(jnp.equal(boundary, False), gaussian, (uniform + noise) % modulus)
return jnp.asarray(rs, dtype=dtype)
| 41.75 | 85 | 0.722056 | import jax
from jax import numpy as jnp
from netket.hilbert import Particle
from netket.utils.dispatch import dispatch
@dispatch
def random_state(hilb: Particle, key, batches: int, *, dtype):
pbc = jnp.array(hilb.n_particles * hilb.pbc)
boundary = jnp.tile(pbc, (batches, 1))
Ls = jnp.array(hilb.n_particles * hilb.extent)
modulus = jnp.where(jnp.equal(pbc, False), jnp.inf, Ls)
gaussian = jax.random.normal(key, shape=(batches, hilb.size))
width = jnp.min(modulus) / (4.0 * hilb.n_particles)
noise = gaussian * width
uniform = jnp.tile(jnp.linspace(0.0, jnp.min(modulus), hilb.size), (batches, 1))
rs = jnp.where(jnp.equal(boundary, False), gaussian, (uniform + noise) % modulus)
return jnp.asarray(rs, dtype=dtype)
| true | true |
1c4abfe636b358eb142c79f641327426a2e082d1 | 1,273 | py | Python | pychron/furnace/firmware/__init__.py | ASUPychron/pychron | dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76 | [
"Apache-2.0"
] | 31 | 2016-03-07T02:38:17.000Z | 2022-02-14T18:23:43.000Z | pychron/furnace/firmware/__init__.py | ASUPychron/pychron | dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76 | [
"Apache-2.0"
] | 1,626 | 2015-01-07T04:52:35.000Z | 2022-03-25T19:15:59.000Z | pychron/furnace/firmware/__init__.py | UIllinoisHALPychron/pychron | f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc | [
"Apache-2.0"
] | 26 | 2015-05-23T00:10:06.000Z | 2022-03-07T16:51:57.000Z | # ===============================================================================
# Copyright 2016 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
__version__ = "1.0"
PARAMETER_REGISTRY = {
"furnace_env_humidity": "001",
"furnace_env_temperature": "002",
"furnace_setpoint": "003",
"furnace_process_value": "004",
"feeder_position": "021",
"funnel_position": "031",
"switch_status": "041",
}
# ============= EOF =============================================
| 38.575758 | 81 | 0.533386 |
__version__ = "1.0"
PARAMETER_REGISTRY = {
"furnace_env_humidity": "001",
"furnace_env_temperature": "002",
"furnace_setpoint": "003",
"furnace_process_value": "004",
"feeder_position": "021",
"funnel_position": "031",
"switch_status": "041",
}
| true | true |
1c4ac00f27d955404c36f607e418e6eb54dace2d | 199 | py | Python | ui/mock_keyboard.py | amirhertz/SPAGHETTI | 660c4a565846090f73c3cadc3619255bca50d14f | [
"MIT"
] | 10 | 2022-02-03T06:19:13.000Z | 2022-03-29T12:32:19.000Z | ui/mock_keyboard.py | amirhertz/SPAGHETTI | 660c4a565846090f73c3cadc3619255bca50d14f | [
"MIT"
] | null | null | null | ui/mock_keyboard.py | amirhertz/SPAGHETTI | 660c4a565846090f73c3cadc3619255bca50d14f | [
"MIT"
] | null | null | null |
class Key:
ctrl_l = 'control_l'
class Controller:
@staticmethod
def press(key: str) -> str:
return key
@staticmethod
def release(key: str) -> str:
return key
| 13.266667 | 33 | 0.582915 |
class Key:
ctrl_l = 'control_l'
class Controller:
@staticmethod
def press(key: str) -> str:
return key
@staticmethod
def release(key: str) -> str:
return key
| true | true |
1c4ac0d7a55d4170cae7b7ec8b9808ffa64edcae | 910 | py | Python | dsvfile/Models/FactorySystem/InserterComponent.py | phoenixx-666/dsvread | 8a073c12343b2f0d34f9b728282dfefe10999f24 | [
"MIT"
] | 2 | 2021-03-01T19:57:20.000Z | 2021-08-02T20:54:48.000Z | dsvfile/Models/FactorySystem/InserterComponent.py | phoenixx-666/dsvread | 8a073c12343b2f0d34f9b728282dfefe10999f24 | [
"MIT"
] | null | null | null | dsvfile/Models/FactorySystem/InserterComponent.py | phoenixx-666/dsvread | 8a073c12343b2f0d34f9b728282dfefe10999f24 | [
"MIT"
] | null | null | null | from ...Fields import Int16Field, FloatField, BoolField
from ...Fields.Enums import EInserterStage, EItem
from . import Model, Int32Field
class InserterComponent(Model):
version = Int32Field()
id = Int32Field()
entityId = Int32Field()
pcId = Int32Field()
stage = EInserterStage()
speed = Int32Field()
time = Int32Field()
stt = Int32Field()
delay = Int32Field()
pickTarget = Int32Field()
insertTarget = Int32Field()
careNeeds = BoolField()
canStack = BoolField()
pickOffset = Int16Field()
insertOffset = Int16Field()
filter = Int32Field()
itemId = EItem()
stackCount = Int32Field()
stackSize = Int32Field()
pos2_x = FloatField()
pos2_y = FloatField()
pos2_z = FloatField()
rot2_x = FloatField()
rot2_y = FloatField()
rot2_z = FloatField()
rot2_w = FloatField()
t1 = Int16Field()
t2 = Int16Field()
| 26 | 55 | 0.653846 | from ...Fields import Int16Field, FloatField, BoolField
from ...Fields.Enums import EInserterStage, EItem
from . import Model, Int32Field
class InserterComponent(Model):
version = Int32Field()
id = Int32Field()
entityId = Int32Field()
pcId = Int32Field()
stage = EInserterStage()
speed = Int32Field()
time = Int32Field()
stt = Int32Field()
delay = Int32Field()
pickTarget = Int32Field()
insertTarget = Int32Field()
careNeeds = BoolField()
canStack = BoolField()
pickOffset = Int16Field()
insertOffset = Int16Field()
filter = Int32Field()
itemId = EItem()
stackCount = Int32Field()
stackSize = Int32Field()
pos2_x = FloatField()
pos2_y = FloatField()
pos2_z = FloatField()
rot2_x = FloatField()
rot2_y = FloatField()
rot2_z = FloatField()
rot2_w = FloatField()
t1 = Int16Field()
t2 = Int16Field()
| true | true |
1c4ac1528db9a11fa760116b25f5da776d7843b1 | 7,422 | py | Python | ncappzoo/apps/object-detector/object-detector.py | yockgen/movidius | cc32f1951a4d00d2250bb0d2b9000c5f2435b41a | [
"MIT"
] | null | null | null | ncappzoo/apps/object-detector/object-detector.py | yockgen/movidius | cc32f1951a4d00d2250bb0d2b9000c5f2435b41a | [
"MIT"
] | null | null | null | ncappzoo/apps/object-detector/object-detector.py | yockgen/movidius | cc32f1951a4d00d2250bb0d2b9000c5f2435b41a | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# ****************************************************************************
# Copyright(c) 2017 Intel Corporation.
# License: MIT See LICENSE file in root directory.
# ****************************************************************************
# How to run Single Shot Multibox Detectors (SSD)
# on Intel® Movidius™ Neural Compute Stick (NCS)
import os
import sys
import numpy as np
import ntpath
import argparse
import skimage.io
import skimage.transform
import mvnc.mvncapi as mvnc
from utils import visualize_output
from utils import deserialize_output
# Detection threshold: Minimum confidance to tag as valid detection
CONFIDANCE_THRESHOLD = 0.60 # 60% confidant
# Variable to store commandline arguments
ARGS = None
# ---- Step 1: Open the enumerated device and get a handle to it -------------
def open_ncs_device():
# Look for enumerated NCS device(s); quit program if none found.
devices = mvnc.EnumerateDevices()
if len( devices ) == 0:
print( "No devices found" )
quit()
# Get a handle to the first enumerated device and open it
device = mvnc.Device( devices[0] )
device.OpenDevice()
return device
# ---- Step 2: Load a graph file onto the NCS device -------------------------
def load_graph( device ):
# Read the graph file into a buffer
with open( ARGS.graph, mode='rb' ) as f:
blob = f.read()
# Load the graph buffer into the NCS
graph = device.AllocateGraph( blob )
return graph
# ---- Step 3: Pre-process the images ----------------------------------------
def pre_process_image( img_draw ):
# Resize image [Image size is defined during training]
img = skimage.transform.resize( img_draw, ARGS.dim, preserve_range=True )
# Convert RGB to BGR [skimage reads image in RGB, some networks may need BGR]
if( ARGS.colormode == "bgr" ):
img = img[:, :, ::-1]
# Mean subtraction & scaling [A common technique used to center the data]
img = img.astype( np.float16 )
img = ( img - np.float16( ARGS.mean ) ) * ARGS.scale
return img
# ---- Step 4: Read & print inference results from the NCS -------------------
def infer_image( graph, img ):
# Read original image, so we can perform visualization ops on it
img_draw = skimage.io.imread( ARGS.image )
# The first inference takes an additional ~20ms due to memory
# initializations, so we make a 'dummy forward pass'.
graph.LoadTensor( img, 'user object' )
output, userobj = graph.GetResult()
# Load the image as a half-precision floating point array
graph.LoadTensor( img, 'user object' )
# Get the results from NCS
output, userobj = graph.GetResult()
# Get execution time
inference_time = graph.GetGraphOption( mvnc.GraphOption.TIME_TAKEN )
# Deserialize the output into a python dictionary
if ARGS.network == 'SSD':
output_dict = deserialize_output.ssd( output, CONFIDANCE_THRESHOLD, img_draw.shape )
elif ARGS.network == 'TinyYolo':
output_dict = deserialize_output.tinyyolo( output, CONFIDANCE_THRESHOLD, img_draw.shape )
# Print the results
print( "\n==============================================================" )
print( "I found these objects in", ntpath.basename( ARGS.image ) )
print( "Execution time: " + str( np.sum( inference_time ) ) + "ms" )
print( "--------------------------------------------------------------" )
for i in range( 0, output_dict['num_detections'] ):
print( "%3.1f%%\t" % output_dict['detection_scores_' + str(i)]
+ labels[ int(output_dict['detection_classes_' + str(i)]) ]
+ ": Top Left: " + str( output_dict['detection_boxes_' + str(i)][0] )
+ " Bottom Right: " + str( output_dict['detection_boxes_' + str(i)][1] ) )
# Draw bounding boxes around valid detections
(y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
(y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]
# Prep string to overlay on the image
display_str = (
labels[output_dict.get('detection_classes_' + str(i))]
+ ": "
+ str( output_dict.get('detection_scores_' + str(i) ) )
+ "%" )
img_draw = visualize_output.draw_bounding_box(
y1, x1, y2, x2,
img_draw,
thickness=4,
color=(255, 255, 0),
display_str=display_str )
print( "==============================================================\n" )
# If a display is available, show the image on which inference was performed
if 'DISPLAY' in os.environ:
skimage.io.imshow( img_draw )
skimage.io.show()
# ---- Step 5: Unload the graph and close the device -------------------------
def close_ncs_device( device, graph ):
graph.DeallocateGraph()
device.CloseDevice()
# ---- Main function (entry point for this script ) --------------------------
def main():
device = open_ncs_device()
graph = load_graph( device )
img_draw = skimage.io.imread( ARGS.image )
img = pre_process_image( img_draw )
infer_image( graph, img )
close_ncs_device( device, graph )
# ---- Define 'main' function as the entry point for this script -------------
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Object detection using SSD on \
Intel® Movidius™ Neural Compute Stick." )
parser.add_argument( '-n', '--network', type=str,
default='SSD',
help="network name: SSD or TinyYolo." )
parser.add_argument( '-g', '--graph', type=str,
default='/home/pi/movidius/ncappzoo/caffe/SSD_MobileNet/graph',
help="Absolute path to the neural network graph file." )
parser.add_argument( '-i', '--image', type=str,
default='../../data/images/nps_chair.png',
help="Absolute path to the image that needs to be inferred." )
parser.add_argument( '-l', '--labels', type=str,
default='/home/pi/movidius/ncappzoo/caffe/SSD_MobileNet/labels.txt',
help="Absolute path to labels file." )
parser.add_argument( '-M', '--mean', type=float,
nargs='+',
default=[127.5, 127.5, 127.5],
help="',' delimited floating point values for image mean." )
parser.add_argument( '-S', '--scale', type=float,
default=0.00789,
help="Absolute path to labels file." )
parser.add_argument( '-D', '--dim', type=int,
nargs='+',
default=[300, 300],
help="Image dimensions. ex. -D 224 224" )
parser.add_argument( '-c', '--colormode', type=str,
default="bgr",
help="RGB vs BGR color sequence. This is network dependent." )
ARGS = parser.parse_args()
# Load the labels file
labels =[ line.rstrip('\n') for line in
open( ARGS.labels ) if line != 'classes\n']
main()
# ==== End of file ===========================================================
| 35.511962 | 97 | 0.551603 |
import os
import sys
import numpy as np
import ntpath
import argparse
import skimage.io
import skimage.transform
import mvnc.mvncapi as mvnc
from utils import visualize_output
from utils import deserialize_output
CONFIDANCE_THRESHOLD = 0.60
ARGS = None
def open_ncs_device():
devices = mvnc.EnumerateDevices()
if len( devices ) == 0:
print( "No devices found" )
quit()
device = mvnc.Device( devices[0] )
device.OpenDevice()
return device
def load_graph( device ):
with open( ARGS.graph, mode='rb' ) as f:
blob = f.read()
graph = device.AllocateGraph( blob )
return graph
def pre_process_image( img_draw ):
img = skimage.transform.resize( img_draw, ARGS.dim, preserve_range=True )
if( ARGS.colormode == "bgr" ):
img = img[:, :, ::-1]
img = img.astype( np.float16 )
img = ( img - np.float16( ARGS.mean ) ) * ARGS.scale
return img
def infer_image( graph, img ):
img_draw = skimage.io.imread( ARGS.image )
graph.LoadTensor( img, 'user object' )
output, userobj = graph.GetResult()
graph.LoadTensor( img, 'user object' )
output, userobj = graph.GetResult()
inference_time = graph.GetGraphOption( mvnc.GraphOption.TIME_TAKEN )
if ARGS.network == 'SSD':
output_dict = deserialize_output.ssd( output, CONFIDANCE_THRESHOLD, img_draw.shape )
elif ARGS.network == 'TinyYolo':
output_dict = deserialize_output.tinyyolo( output, CONFIDANCE_THRESHOLD, img_draw.shape )
print( "\n==============================================================" )
print( "I found these objects in", ntpath.basename( ARGS.image ) )
print( "Execution time: " + str( np.sum( inference_time ) ) + "ms" )
print( "--------------------------------------------------------------" )
for i in range( 0, output_dict['num_detections'] ):
print( "%3.1f%%\t" % output_dict['detection_scores_' + str(i)]
+ labels[ int(output_dict['detection_classes_' + str(i)]) ]
+ ": Top Left: " + str( output_dict['detection_boxes_' + str(i)][0] )
+ " Bottom Right: " + str( output_dict['detection_boxes_' + str(i)][1] ) )
(y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
(y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]
display_str = (
labels[output_dict.get('detection_classes_' + str(i))]
+ ": "
+ str( output_dict.get('detection_scores_' + str(i) ) )
+ "%" )
img_draw = visualize_output.draw_bounding_box(
y1, x1, y2, x2,
img_draw,
thickness=4,
color=(255, 255, 0),
display_str=display_str )
print( "==============================================================\n" )
if 'DISPLAY' in os.environ:
skimage.io.imshow( img_draw )
skimage.io.show()
def close_ncs_device( device, graph ):
graph.DeallocateGraph()
device.CloseDevice()
def main():
device = open_ncs_device()
graph = load_graph( device )
img_draw = skimage.io.imread( ARGS.image )
img = pre_process_image( img_draw )
infer_image( graph, img )
close_ncs_device( device, graph )
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Object detection using SSD on \
Intel® Movidius™ Neural Compute Stick." )
parser.add_argument( '-n', '--network', type=str,
default='SSD',
help="network name: SSD or TinyYolo." )
parser.add_argument( '-g', '--graph', type=str,
default='/home/pi/movidius/ncappzoo/caffe/SSD_MobileNet/graph',
help="Absolute path to the neural network graph file." )
parser.add_argument( '-i', '--image', type=str,
default='../../data/images/nps_chair.png',
help="Absolute path to the image that needs to be inferred." )
parser.add_argument( '-l', '--labels', type=str,
default='/home/pi/movidius/ncappzoo/caffe/SSD_MobileNet/labels.txt',
help="Absolute path to labels file." )
parser.add_argument( '-M', '--mean', type=float,
nargs='+',
default=[127.5, 127.5, 127.5],
help="',' delimited floating point values for image mean." )
parser.add_argument( '-S', '--scale', type=float,
default=0.00789,
help="Absolute path to labels file." )
parser.add_argument( '-D', '--dim', type=int,
nargs='+',
default=[300, 300],
help="Image dimensions. ex. -D 224 224" )
parser.add_argument( '-c', '--colormode', type=str,
default="bgr",
help="RGB vs BGR color sequence. This is network dependent." )
ARGS = parser.parse_args()
labels =[ line.rstrip('\n') for line in
open( ARGS.labels ) if line != 'classes\n']
main()
| true | true |
1c4ac1901b03408ddb92516d19c818932cbc8832 | 2,161 | py | Python | app/auth/forms.py | pointerboy/ModHub | 5b2bdf31bdf409c677e1009f879794f91e636a7b | [
"MIT"
] | null | null | null | app/auth/forms.py | pointerboy/ModHub | 5b2bdf31bdf409c677e1009f879794f91e636a7b | [
"MIT"
] | 1 | 2022-01-13T02:52:49.000Z | 2022-01-13T02:52:49.000Z | app/auth/forms.py | pointerboy/ModHub | 5b2bdf31bdf409c677e1009f879794f91e636a7b | [
"MIT"
] | null | null | null | from flask_babel import _, lazy_gettext as _l
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Length
from wtforms_validators import AlphaNumeric
from app.models import User
class LoginForm(FlaskForm):
username = StringField(_l('Username'), validators=[DataRequired()])
password = PasswordField(_l('Password'), validators=[DataRequired()])
remember_me = BooleanField(_l('Remember Me'))
submit = SubmitField(_l('Sign In'), render_kw={'class': "btn btn-lg btn-primary btn-block btn-login text-uppercase font-weight-bold mb-2"})
class RegistrationForm(FlaskForm):
username = StringField(_l('Username'), validators=[Length(min=4), DataRequired(),
AlphaNumeric()])
email = StringField(_l('Email'), validators=[DataRequired(), Email()])
password = PasswordField(_l('Password'), validators=[Length(min=4), DataRequired()])
password2 = PasswordField(
_l('Repeat Password'), validators=[DataRequired(),
EqualTo('password')])
submit = SubmitField(_l('Make an account!'))
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError(_('Please use a different username.'))
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError(_('Please use a different email address.'))
class ResetPasswordRequestForm(FlaskForm):
email = StringField(_l('Email'), validators=[DataRequired(), Email()])
submit = SubmitField(_l('Request Password Reset'))
class ResetPasswordForm(FlaskForm):
password = PasswordField(_l('Password'), validators=[DataRequired()])
password2 = PasswordField(
_l('Repeat Password'), validators=[DataRequired(),
EqualTo('password')])
submit = SubmitField(_l('Request Password Reset'))
| 43.22 | 143 | 0.672837 | from flask_babel import _, lazy_gettext as _l
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Length
from wtforms_validators import AlphaNumeric
from app.models import User
class LoginForm(FlaskForm):
username = StringField(_l('Username'), validators=[DataRequired()])
password = PasswordField(_l('Password'), validators=[DataRequired()])
remember_me = BooleanField(_l('Remember Me'))
submit = SubmitField(_l('Sign In'), render_kw={'class': "btn btn-lg btn-primary btn-block btn-login text-uppercase font-weight-bold mb-2"})
class RegistrationForm(FlaskForm):
username = StringField(_l('Username'), validators=[Length(min=4), DataRequired(),
AlphaNumeric()])
email = StringField(_l('Email'), validators=[DataRequired(), Email()])
password = PasswordField(_l('Password'), validators=[Length(min=4), DataRequired()])
password2 = PasswordField(
_l('Repeat Password'), validators=[DataRequired(),
EqualTo('password')])
submit = SubmitField(_l('Make an account!'))
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError(_('Please use a different username.'))
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError(_('Please use a different email address.'))
class ResetPasswordRequestForm(FlaskForm):
email = StringField(_l('Email'), validators=[DataRequired(), Email()])
submit = SubmitField(_l('Request Password Reset'))
class ResetPasswordForm(FlaskForm):
password = PasswordField(_l('Password'), validators=[DataRequired()])
password2 = PasswordField(
_l('Repeat Password'), validators=[DataRequired(),
EqualTo('password')])
submit = SubmitField(_l('Request Password Reset'))
| true | true |
1c4ac1e423458e0a0a187f8698a9e64231b6a196 | 3,067 | py | Python | tube/tests/test_tflAPI.py | adamgilman/tube-python | 3d94e79f7d367eed95ed68b53d0ab13a36cc3219 | [
"BSD-3-Clause"
] | 5 | 2017-01-26T00:06:08.000Z | 2020-06-03T16:07:09.000Z | tube/tests/test_tflAPI.py | adamgilman/tube-python | 3d94e79f7d367eed95ed68b53d0ab13a36cc3219 | [
"BSD-3-Clause"
] | null | null | null | tube/tests/test_tflAPI.py | adamgilman/tube-python | 3d94e79f7d367eed95ed68b53d0ab13a36cc3219 | [
"BSD-3-Clause"
] | 1 | 2021-11-22T16:23:14.000Z | 2021-11-22T16:23:14.000Z | import unittest
from tube.tflAPI import TFLapi
import vcr
my_vcr = vcr.VCR(
serializer = 'json',
cassette_library_dir = 'tube/tests/fixtures/cassettes',
record_mode = 'once',
match_on = ['uri', 'method'],
)
import logging
logging.basicConfig() # you need to initialize logging, otherwise you will not see anything from vcrpy
vcr_log = logging.getLogger("vcr")
vcr_log.setLevel(logging.ERROR)
class TestTFLapiByURL(unittest.TestCase):
def setUp(self):
self.api = TFLapi()
def test_VerifyCorrectURLFetched(self):
with my_vcr.use_cassette('Detail-OXC-B.json'):
detail = self.api.getDetailed(station="OXC", line="B")
self.assertEqual(detail.station, "OXC")
self.assertEqual(detail.line, "B")
def test_VerifyPlatformsQuantities(self):
#camden town has 4 northern line platforms
with my_vcr.use_cassette('Detail-CTN-N.json'):
detail = self.api.getDetailed(station="CTN", line="N")
self.assertEqual(detail.station, "CTN")
self.assertIsInstance(detail.platforms, list)
self.assertEqual( len(detail.platforms), 4)
#oxford circus has 2 bakerloo platforms
with my_vcr.use_cassette('Detail-OXC-B.json'):
detail = self.api.getDetailed(station="OXC", line="B")
self.assertEqual(detail.station, "OXC")
self.assertIsInstance(detail.platforms, list)
self.assertEqual( len(detail.platforms), 2)
def test_VerifyPlatformsIdentified(self):
with my_vcr.use_cassette('Detail-CTN-N.json'):
detail = self.api.getDetailed(station="CTN", line="N")
self.assertEqual(detail.platforms[0].name, "Northbound - Platform 1")
self.assertEqual(detail.platforms[1].name, "Southbound - Platform 2")
self.assertEqual(detail.platforms[2].name, "Northbound - Platform 3")
self.assertEqual(detail.platforms[3].name, "Southbound - Platform 4")
def test_VerifyTrainsOnPlatforms(self):
#need testcase for no trains on platforms
with my_vcr.use_cassette('Detail-OXC-B(TrainCode).json'):
detail = self.api.getDetailed(station="OXC", line="B")
self.assertIsInstance(detail.platforms[0].trains, list)
self.assertEqual(detail.platforms[0].trains[0].leadingcar_id, "1031576")
self.assertEqual(detail.platforms[0].trains[0].set_number, "236")
self.assertEqual(detail.platforms[0].trains[0].trip_number, "12")
self.assertEqual(detail.platforms[0].trains[0].arrival_seconds, "24")
self.assertEqual(detail.platforms[0].trains[0].arrival_time, "0:30")
self.assertEqual(detail.platforms[0].trains[0].current_location, "Between Regents Park and Oxford Circus")
self.assertEqual(detail.platforms[0].trains[0].destination, "Elephant and Castle")
self.assertEqual(detail.platforms[0].trains[0].destination_code, "154")
self.assertEqual(detail.platforms[0].trains[0].platform_departure_time, "15:28:23")
self.assertEqual(detail.platforms[0].trains[0].interval_between_previous_train, "24")
self.assertEqual(detail.platforms[0].trains[0].departed_current_station, "0")
self.assertEqual(detail.platforms[0].trains[0].direction, "0")
self.assertEqual(detail.platforms[0].trains[0].track_code, "TB391B") | 44.449275 | 109 | 0.750245 | import unittest
from tube.tflAPI import TFLapi
import vcr
my_vcr = vcr.VCR(
serializer = 'json',
cassette_library_dir = 'tube/tests/fixtures/cassettes',
record_mode = 'once',
match_on = ['uri', 'method'],
)
import logging
logging.basicConfig() vcr_log = logging.getLogger("vcr")
vcr_log.setLevel(logging.ERROR)
class TestTFLapiByURL(unittest.TestCase):
def setUp(self):
self.api = TFLapi()
def test_VerifyCorrectURLFetched(self):
with my_vcr.use_cassette('Detail-OXC-B.json'):
detail = self.api.getDetailed(station="OXC", line="B")
self.assertEqual(detail.station, "OXC")
self.assertEqual(detail.line, "B")
def test_VerifyPlatformsQuantities(self):
with my_vcr.use_cassette('Detail-CTN-N.json'):
detail = self.api.getDetailed(station="CTN", line="N")
self.assertEqual(detail.station, "CTN")
self.assertIsInstance(detail.platforms, list)
self.assertEqual( len(detail.platforms), 4)
with my_vcr.use_cassette('Detail-OXC-B.json'):
detail = self.api.getDetailed(station="OXC", line="B")
self.assertEqual(detail.station, "OXC")
self.assertIsInstance(detail.platforms, list)
self.assertEqual( len(detail.platforms), 2)
def test_VerifyPlatformsIdentified(self):
with my_vcr.use_cassette('Detail-CTN-N.json'):
detail = self.api.getDetailed(station="CTN", line="N")
self.assertEqual(detail.platforms[0].name, "Northbound - Platform 1")
self.assertEqual(detail.platforms[1].name, "Southbound - Platform 2")
self.assertEqual(detail.platforms[2].name, "Northbound - Platform 3")
self.assertEqual(detail.platforms[3].name, "Southbound - Platform 4")
def test_VerifyTrainsOnPlatforms(self):
with my_vcr.use_cassette('Detail-OXC-B(TrainCode).json'):
detail = self.api.getDetailed(station="OXC", line="B")
self.assertIsInstance(detail.platforms[0].trains, list)
self.assertEqual(detail.platforms[0].trains[0].leadingcar_id, "1031576")
self.assertEqual(detail.platforms[0].trains[0].set_number, "236")
self.assertEqual(detail.platforms[0].trains[0].trip_number, "12")
self.assertEqual(detail.platforms[0].trains[0].arrival_seconds, "24")
self.assertEqual(detail.platforms[0].trains[0].arrival_time, "0:30")
self.assertEqual(detail.platforms[0].trains[0].current_location, "Between Regents Park and Oxford Circus")
self.assertEqual(detail.platforms[0].trains[0].destination, "Elephant and Castle")
self.assertEqual(detail.platforms[0].trains[0].destination_code, "154")
self.assertEqual(detail.platforms[0].trains[0].platform_departure_time, "15:28:23")
self.assertEqual(detail.platforms[0].trains[0].interval_between_previous_train, "24")
self.assertEqual(detail.platforms[0].trains[0].departed_current_station, "0")
self.assertEqual(detail.platforms[0].trains[0].direction, "0")
self.assertEqual(detail.platforms[0].trains[0].track_code, "TB391B") | true | true |
1c4ac264208d85dfc358c39ac2b842a93a43d268 | 2,008 | py | Python | nova/db/sqlalchemy/migrate_repo/versions/087_add_uuid_to_bw_usage_cache.py | bopopescu/extra-specs-1 | 6a14d8d7807727023b4d589af47e8a9605f12db1 | [
"Apache-2.0"
] | null | null | null | nova/db/sqlalchemy/migrate_repo/versions/087_add_uuid_to_bw_usage_cache.py | bopopescu/extra-specs-1 | 6a14d8d7807727023b4d589af47e8a9605f12db1 | [
"Apache-2.0"
] | 1 | 2020-07-24T14:14:13.000Z | 2020-07-24T14:14:13.000Z | nova/db/sqlalchemy/migrate_repo/versions/087_add_uuid_to_bw_usage_cache.py | bopopescu/extra-specs-1 | 6a14d8d7807727023b4d589af47e8a9605f12db1 | [
"Apache-2.0"
] | 1 | 2020-07-24T10:40:59.000Z | 2020-07-24T10:40:59.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime, BigInteger
from sqlalchemy import MetaData, Integer, String, Table
from nova import log as logging
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# add column:
bw_usage_cache = Table('bw_usage_cache', meta, autoload=True)
uuid = Column('uuid', String(36))
# clear the cache to get rid of entries with no uuid
migrate_engine.execute(bw_usage_cache.delete())
bw_usage_cache.create_column(uuid)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# drop column:
bw_usage_cache = Table('bw_usage_cache', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('mac', String(255)),
Column('uuid', String(36)),
Column('start_period', DateTime(timezone=False), nullable=False),
Column('last_refreshed', DateTime(timezone=False)),
Column('bw_in', BigInteger()),
Column('bw_out', BigInteger()),
useexisting=True)
bw_usage_cache.drop_column('uuid')
| 34.033898 | 78 | 0.697709 |
from sqlalchemy import Boolean, Column, DateTime, BigInteger
from sqlalchemy import MetaData, Integer, String, Table
from nova import log as logging
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
bw_usage_cache = Table('bw_usage_cache', meta, autoload=True)
uuid = Column('uuid', String(36))
migrate_engine.execute(bw_usage_cache.delete())
bw_usage_cache.create_column(uuid)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
bw_usage_cache = Table('bw_usage_cache', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('mac', String(255)),
Column('uuid', String(36)),
Column('start_period', DateTime(timezone=False), nullable=False),
Column('last_refreshed', DateTime(timezone=False)),
Column('bw_in', BigInteger()),
Column('bw_out', BigInteger()),
useexisting=True)
bw_usage_cache.drop_column('uuid')
| true | true |
1c4ac32fd1e2599ccd47b8d66ea9daef42b250e7 | 2,232 | py | Python | tests/test_weighting.py | DimitrisAlivas/StarQE | c17676e5f1e3f19c0c4c117a50abe2ce22ffef28 | [
"MIT"
] | 11 | 2021-06-17T15:01:36.000Z | 2022-02-04T16:48:27.000Z | tests/test_weighting.py | DimitrisAlivas/StarQE | c17676e5f1e3f19c0c4c117a50abe2ce22ffef28 | [
"MIT"
] | null | null | null | tests/test_weighting.py | DimitrisAlivas/StarQE | c17676e5f1e3f19c0c4c117a50abe2ce22ffef28 | [
"MIT"
] | 1 | 2022-03-28T03:55:33.000Z | 2022-03-28T03:55:33.000Z | """Tests for weighting."""
from typing import Any, MutableMapping
import torch
import unittest_templates
from mphrqe.layer.weighting import AttentionMessageWeighting, MessageWeighting, SymmetricMessageWeighting
class MessageWeightingTests(unittest_templates.GenericTestCase[MessageWeighting]):
"""Tests for message weighting."""
num_entities: int = 33
num_edges: int = 101
dim: int = 3
def test_forward(self):
# prepare data
x_e = torch.rand(self.num_entities, self.dim)
edge_index = torch.randint(self.num_entities, size=(2, self.num_edges))
message = torch.rand(self.num_edges, self.dim, requires_grad=True)
# forward pass
out = self.instance(
edge_index=edge_index,
message=message,
x_e=x_e,
)
# check type
assert isinstance(out, tuple)
assert len(out) == 2
message_, weight_ = out
assert torch.is_tensor(message_)
assert torch.is_tensor(weight_)
# check shape
assert message_.shape[0] == self.num_edges
assert weight_.shape[0] == self.num_edges
weighted_message = message_ * weight_.unsqueeze(dim=-1)
# try backward pass
weighted_message.mean().backward()
class SymmetricMessageWeightingTests(MessageWeightingTests):
"""Tests for static symmetric message weighting."""
cls = SymmetricMessageWeighting
class AttentionMessageWeightingTests(MessageWeightingTests):
"""Tests for message weighting by attention."""
cls = AttentionMessageWeighting
# make divisible by number of heads
dim = 8
num_heads = 2
def _pre_instantiation_hook(self, kwargs: MutableMapping[str, Any]) -> MutableMapping[str, Any]: # noqa: D102
kwargs = super()._pre_instantiation_hook(kwargs=kwargs)
# make sure that the output dimension is divisible by the number of heads.
kwargs["num_heads"] = self.num_heads
kwargs["output_dim"] = self.dim
return kwargs
class MessageWeightingMetaTest(unittest_templates.MetaTestCase[MessageWeighting]):
"""Test for tests for message weightings."""
base_cls = MessageWeighting
base_test = MessageWeightingTests
| 30.162162 | 114 | 0.68862 | from typing import Any, MutableMapping
import torch
import unittest_templates
from mphrqe.layer.weighting import AttentionMessageWeighting, MessageWeighting, SymmetricMessageWeighting
class MessageWeightingTests(unittest_templates.GenericTestCase[MessageWeighting]):
num_entities: int = 33
num_edges: int = 101
dim: int = 3
def test_forward(self):
x_e = torch.rand(self.num_entities, self.dim)
edge_index = torch.randint(self.num_entities, size=(2, self.num_edges))
message = torch.rand(self.num_edges, self.dim, requires_grad=True)
out = self.instance(
edge_index=edge_index,
message=message,
x_e=x_e,
)
assert isinstance(out, tuple)
assert len(out) == 2
message_, weight_ = out
assert torch.is_tensor(message_)
assert torch.is_tensor(weight_)
assert message_.shape[0] == self.num_edges
assert weight_.shape[0] == self.num_edges
weighted_message = message_ * weight_.unsqueeze(dim=-1)
weighted_message.mean().backward()
class SymmetricMessageWeightingTests(MessageWeightingTests):
cls = SymmetricMessageWeighting
class AttentionMessageWeightingTests(MessageWeightingTests):
cls = AttentionMessageWeighting
dim = 8
num_heads = 2
def _pre_instantiation_hook(self, kwargs: MutableMapping[str, Any]) -> MutableMapping[str, Any]: kwargs = super()._pre_instantiation_hook(kwargs=kwargs)
kwargs["num_heads"] = self.num_heads
kwargs["output_dim"] = self.dim
return kwargs
class MessageWeightingMetaTest(unittest_templates.MetaTestCase[MessageWeighting]):
base_cls = MessageWeighting
base_test = MessageWeightingTests
| true | true |
1c4ac39c33fc74a087b76922f9853b80be409055 | 1,078 | py | Python | seedorf/sports/migrations/0003_add_sport_category_table_tennis.py | SportySpots/seedorf | 3f09c720ea8df0d1171022b68b494c2758f75d44 | [
"MIT"
] | 3 | 2018-04-22T10:11:01.000Z | 2018-11-16T22:00:34.000Z | seedorf/sports/migrations/0003_add_sport_category_table_tennis.py | SportySpots/seedorf | 3f09c720ea8df0d1171022b68b494c2758f75d44 | [
"MIT"
] | 87 | 2018-03-14T13:42:55.000Z | 2022-03-21T21:15:16.000Z | seedorf/sports/migrations/0003_add_sport_category_table_tennis.py | SportySpots/seedorf | 3f09c720ea8df0d1171022b68b494c2758f75d44 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.2 on 2019-03-29 13:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("sports", "0002_auto_20180602_2110")]
operations = [
migrations.AlterField(
model_name="sport",
name="category",
field=models.CharField(
choices=[
("basketball", "Basketball"),
("beach_volleyball", "Beach Volleyball"),
("bootcamp", "Bootcamp"),
("boules", "Boules"),
("fitness", "Fitness"),
("others", "Others"),
("skating", "Skating"),
("soccer", "Soccer"),
("tennis", "Tennis"),
("table_tennis", "Table Tennis"),
],
default="others",
help_text="Name of the main category of the sport (e.g. Soccer).",
max_length=50,
verbose_name="Sport Category",
),
)
]
| 31.705882 | 82 | 0.454545 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("sports", "0002_auto_20180602_2110")]
operations = [
migrations.AlterField(
model_name="sport",
name="category",
field=models.CharField(
choices=[
("basketball", "Basketball"),
("beach_volleyball", "Beach Volleyball"),
("bootcamp", "Bootcamp"),
("boules", "Boules"),
("fitness", "Fitness"),
("others", "Others"),
("skating", "Skating"),
("soccer", "Soccer"),
("tennis", "Tennis"),
("table_tennis", "Table Tennis"),
],
default="others",
help_text="Name of the main category of the sport (e.g. Soccer).",
max_length=50,
verbose_name="Sport Category",
),
)
]
| true | true |
1c4ac3f3a8c481f3e1541b4748ebe97e017ea4e8 | 1,239 | py | Python | BaseAdapter/EUAOSSHClient.py | leonevo/euao | ff7a2c9fa76c4eed297856ef82ac3d2baa8976c1 | [
"Apache-2.0"
] | 2 | 2015-01-16T07:36:19.000Z | 2017-03-10T06:11:55.000Z | BaseAdapter/EUAOSSHClient.py | leonevo/euao | ff7a2c9fa76c4eed297856ef82ac3d2baa8976c1 | [
"Apache-2.0"
] | null | null | null | BaseAdapter/EUAOSSHClient.py | leonevo/euao | ff7a2c9fa76c4eed297856ef82ac3d2baa8976c1 | [
"Apache-2.0"
] | null | null | null | import paramiko
class EUAOSSHClient(paramiko.SSHClient):
## overload the exec_command method
def exec_command(self, command, bufsize=-1, timeout=None):
chan = self._transport.open_session()
chan.settimeout(timeout)
chan.exec_command(command)
stdin = chan.makefile('wb', bufsize)
stdout = chan.makefile('rb', bufsize)
stderr = chan.makefile_stderr('rb', bufsize)
return stdin, stdout, stderr
if __name__ == '__main__':
cmd=r'mksyscfg -r lpar -m Server-9117-MMA-SN06D6D82 -i "name=testEUAOclient,profile_name=default,lpar_env=aixlinux,min_mem=1024,desired_mem=2048,max_mem=32768,proc_mode=shared,min_procs=1,desired_procs=2,max_procs=16,min_proc_units=0.1,desired_proc_units=0.5,max_proc_units=16,sharing_mode=uncap,uncap_weight=128,auto_start=1,boot_mode=norm,max_virtual_slots=1000,\"virtual_eth_adapters=22/0/1///1,23/0/2///1\",\"virtual_scsi_adapters=20/client//VIOserver1/23/1,21/client//VIOserver2/23/1\""'
#ExecuteSimpleCMDviaSSH2('182.247.251.247','hscroot','abc1234',cmd)
#ExecuteCMDviaSSH2('182.247.251.247','hscroot','abc1234',cmd,connect_timeout=5,command_timeout=20,cmd_prompt='hscroot@localhost:~>')
sc=EUAOSSHClient() | 72.882353 | 497 | 0.727199 | import paramiko
class EUAOSSHClient(paramiko.SSHClient):
def exec_command(self, command, bufsize=-1, timeout=None):
chan = self._transport.open_session()
chan.settimeout(timeout)
chan.exec_command(command)
stdin = chan.makefile('wb', bufsize)
stdout = chan.makefile('rb', bufsize)
stderr = chan.makefile_stderr('rb', bufsize)
return stdin, stdout, stderr
if __name__ == '__main__':
cmd=r'mksyscfg -r lpar -m Server-9117-MMA-SN06D6D82 -i "name=testEUAOclient,profile_name=default,lpar_env=aixlinux,min_mem=1024,desired_mem=2048,max_mem=32768,proc_mode=shared,min_procs=1,desired_procs=2,max_procs=16,min_proc_units=0.1,desired_proc_units=0.5,max_proc_units=16,sharing_mode=uncap,uncap_weight=128,auto_start=1,boot_mode=norm,max_virtual_slots=1000,\"virtual_eth_adapters=22/0/1///1,23/0/2///1\",\"virtual_scsi_adapters=20/client//VIOserver1/23/1,21/client//VIOserver2/23/1\""'
sc=EUAOSSHClient() | true | true |
1c4ac43fe52487a7f0bd887554dc6cd88c8beb6f | 3,233 | py | Python | maml_examples/cluster_maml_trpo_ant.py | JakobStruye/maml_rl | ec92a5138127a86a4c15925c70e61dbdf038cd18 | [
"MIT"
] | null | null | null | maml_examples/cluster_maml_trpo_ant.py | JakobStruye/maml_rl | ec92a5138127a86a4c15925c70e61dbdf038cd18 | [
"MIT"
] | null | null | null | maml_examples/cluster_maml_trpo_ant.py | JakobStruye/maml_rl | ec92a5138127a86a4c15925c70e61dbdf038cd18 | [
"MIT"
] | null | null | null | from sandbox.rocky.tf.algos.maml_trpo import MAMLTRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.baselines.gaussian_mlp_baseline import GaussianMLPBaseline
from rllab.envs.mujoco.ant_env_rand import AntEnvRand
from rllab.envs.mujoco.ant_env_rand_goal import AntEnvRandGoal
from rllab.envs.mujoco.ant_env_rand_direc import AntEnvRandDirec
from rllab.envs.normalized_env import normalize
from rllab.misc.instrument import stub, run_experiment_lite
from sandbox.rocky.tf.policies.maml_minimal_gauss_mlp_policy import MAMLGaussianMLPPolicy
from sandbox.rocky.tf.envs.base import TfEnv
import tensorflow as tf
stub(globals())
from rllab.misc.instrument import VariantGenerator, variant
class VG(VariantGenerator):
@variant
def fast_lr(self):
return [0.1]
@variant
def meta_step_size(self):
return [0.01] # sometimes 0.02 better
@variant
def fast_batch_size(self):
return [20]
@variant
def meta_batch_size(self):
return [40] # at least a total batch size of 400. (meta batch size*fast batch size)
@variant
def seed(self):
return [1]
@variant
def task_var(self): # fwd/bwd task or goal vel task
# 0 for fwd/bwd, 1 for goal vel (kind of), 2 for goal pose
return [2]
# should also code up alternative KL thing
variants = VG().variants()
max_path_length = 200
num_grad_updates = 1
use_maml=True
for v in variants:
task_var = v['task_var']
if task_var == 0:
env = TfEnv(normalize(AntEnvRandDirec()))
task_var = 'direc'
elif task_var == 1:
env = TfEnv(normalize(AntEnvRand()))
task_var = 'vel'
elif task_var == 2:
env = TfEnv(normalize(AntEnvRandGoal()))
task_var = 'pos'
policy = MAMLGaussianMLPPolicy(
name="policy",
env_spec=env.spec,
grad_step_size=v['fast_lr'],
hidden_nonlinearity=tf.nn.relu,
hidden_sizes=(100,100),
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = MAMLTRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=v['fast_batch_size'], # number of trajs for grad update
max_path_length=max_path_length,
meta_batch_size=v['meta_batch_size'],
num_grad_updates=num_grad_updates,
n_itr=4,
use_maml=use_maml,
step_size=v['meta_step_size'],
plot=False,
)
run_experiment_lite(
algo.train(),
exp_prefix='posticml_trpo_maml_ant' + task_var + '_' + str(max_path_length),
exp_name='maml'+str(int(use_maml))+'_fbs'+str(v['fast_batch_size'])+'_mbs'+str(v['meta_batch_size'])+'_flr_' + str(v['fast_lr']) + '_mlr' + str(v['meta_step_size']),
# Number of parallel workers for sampling
n_parallel=4,
# Only keep the snapshot parameters for the last iteration
snapshot_mode="all",
snapshot_gap=1,
sync_s3_pkl=True,
# Specifies the seed for the experiment. If this is not provided, a random seed
# will be used
seed=v["seed"],
mode="local",
#mode="ec2",
variant=v,
# plot=True,
# terminate_machine=False,
)
| 29.66055 | 174 | 0.666564 | from sandbox.rocky.tf.algos.maml_trpo import MAMLTRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.baselines.gaussian_mlp_baseline import GaussianMLPBaseline
from rllab.envs.mujoco.ant_env_rand import AntEnvRand
from rllab.envs.mujoco.ant_env_rand_goal import AntEnvRandGoal
from rllab.envs.mujoco.ant_env_rand_direc import AntEnvRandDirec
from rllab.envs.normalized_env import normalize
from rllab.misc.instrument import stub, run_experiment_lite
from sandbox.rocky.tf.policies.maml_minimal_gauss_mlp_policy import MAMLGaussianMLPPolicy
from sandbox.rocky.tf.envs.base import TfEnv
import tensorflow as tf
stub(globals())
from rllab.misc.instrument import VariantGenerator, variant
class VG(VariantGenerator):
@variant
def fast_lr(self):
return [0.1]
@variant
def meta_step_size(self):
return [0.01]
@variant
def fast_batch_size(self):
return [20]
@variant
def meta_batch_size(self):
return [40]
@variant
def seed(self):
return [1]
@variant
def task_var(self): return [2]
variants = VG().variants()
max_path_length = 200
num_grad_updates = 1
use_maml=True
for v in variants:
task_var = v['task_var']
if task_var == 0:
env = TfEnv(normalize(AntEnvRandDirec()))
task_var = 'direc'
elif task_var == 1:
env = TfEnv(normalize(AntEnvRand()))
task_var = 'vel'
elif task_var == 2:
env = TfEnv(normalize(AntEnvRandGoal()))
task_var = 'pos'
policy = MAMLGaussianMLPPolicy(
name="policy",
env_spec=env.spec,
grad_step_size=v['fast_lr'],
hidden_nonlinearity=tf.nn.relu,
hidden_sizes=(100,100),
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = MAMLTRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=v['fast_batch_size'], max_path_length=max_path_length,
meta_batch_size=v['meta_batch_size'],
num_grad_updates=num_grad_updates,
n_itr=4,
use_maml=use_maml,
step_size=v['meta_step_size'],
plot=False,
)
run_experiment_lite(
algo.train(),
exp_prefix='posticml_trpo_maml_ant' + task_var + '_' + str(max_path_length),
exp_name='maml'+str(int(use_maml))+'_fbs'+str(v['fast_batch_size'])+'_mbs'+str(v['meta_batch_size'])+'_flr_' + str(v['fast_lr']) + '_mlr' + str(v['meta_step_size']),
n_parallel=4,
snapshot_mode="all",
snapshot_gap=1,
sync_s3_pkl=True,
seed=v["seed"],
mode="local",
variant=v,
)
| true | true |
1c4ac4962fab48761d3549b1ff774f9ba8c83d10 | 3,182 | py | Python | v2/cleanup/clean_relatedness_data.py | amoudgl/pun-model | bf18335a58ecfb8624d5b40b619a2bf7bbb72714 | [
"BSD-3-Clause"
] | 13 | 2019-04-18T10:09:16.000Z | 2022-03-01T21:42:51.000Z | v2/cleanup/clean_relatedness_data.py | amoudgl/pun-model | bf18335a58ecfb8624d5b40b619a2bf7bbb72714 | [
"BSD-3-Clause"
] | 1 | 2022-03-01T22:24:14.000Z | 2022-03-01T22:24:14.000Z | v2/cleanup/clean_relatedness_data.py | amoudgl/pun-model | bf18335a58ecfb8624d5b40b619a2bf7bbb72714 | [
"BSD-3-Clause"
] | 3 | 2019-04-16T03:21:50.000Z | 2021-06-04T05:34:08.000Z | #!/usr/bin/env python3
from utils import data2numpy, map_puntypeID_to_idx
relatedness_near_datapath = ("./wordPair_relatedness_" +
"smoothedTrigrams_near.csv")
relatedness_identical_datapath = ("./wordPair_relatedness_" +
"smoothedTrigrams_identical.csv")
# get m1 and m2 (two meanings/interpretations) for each sentence in the dataset
meanings = {}
puns_datapath = '../../data/data-agg.csv'
puns_data = data2numpy(puns_datapath)
for i, row in enumerate(puns_data):
meanings[i] = [row[-3], row[-2]]
# map puntype (near/identical) and puntypeID to index in puns dataset
# for example, m['near'][1] will yield index of the pun with 'near' homophone
# and having 'near' sentence ID = 1 in puns dataset [data-agg.csv]
get_idx = map_puntypeID_to_idx(data2numpy(puns_datapath))
# clean up relatedness data
relatedness = {}
data = data2numpy(relatedness_near_datapath)
for row in data:
id = int(row[0])
idx = get_idx['near'][id]
m1 = meanings[idx][0]
m2 = meanings[idx][1]
word = row[3]
m1_relatedness = float(row[4])
m2_relatedness = float(row[5])
# build word pairs
t1 = [word, m1]
t1.sort()
t1 = tuple(t1)
t2 = [word, m2]
t2.sort()
t2 = tuple(t2)
# save relatedness data
if t1 not in relatedness:
relatedness[t1] = []
if t2 not in relatedness:
relatedness[t2] = []
# log duplicate data, if any for a given tuple
if m1_relatedness not in relatedness[t1]:
relatedness[t1].append(m1_relatedness)
if m2_relatedness not in relatedness[t2]:
relatedness[t2].append(m2_relatedness)
data = data2numpy(relatedness_identical_datapath)
for row in data:
id = int(row[0])
idx = get_idx['identical'][id]
m1 = meanings[idx][0]
m2 = meanings[idx][1]
word = row[3]
m1_relatedness = float(row[4])
m2_relatedness = float(row[5])
# build word pairs
t1 = [word, m1]
t1.sort()
t1 = tuple(t1)
t2 = [word, m2]
t2.sort()
t2 = tuple(t2)
# save relatedness data
if t1 not in relatedness:
relatedness[t1] = []
if t2 not in relatedness:
relatedness[t2] = []
# log duplicate data, if any for a given tuple
if m1_relatedness not in relatedness[t1]:
relatedness[t1].append(m1_relatedness)
if m2_relatedness not in relatedness[t2]:
relatedness[t2].append(m2_relatedness)
# for tuple key "t", relatedness[t] should return a list containing single
# element; let's see how many duplicates we have
keys = relatedness.keys()
for key in keys:
if (len(relatedness[key]) > 1):
print("multiple relatedness values " +
"found for tuple ({}, {}):".format(key[0], key[1]), end=" ")
print(relatedness[key])
# pick a single value for tuples with mutliple relatedness values
# and save clean data
output_path = "../data/relatedness_clean.csv"
f = open(output_path, "w")
f.write("word1,word2,relatedness\n")
for key in keys:
row = [key[0], key[1], str(relatedness[key][0])]
row_str = ",".join(row) + "\n"
f.write(row_str)
print("saved clean relatedness data at {}".format(output_path))
| 31.196078 | 79 | 0.650849 | from utils import data2numpy, map_puntypeID_to_idx
relatedness_near_datapath = ("./wordPair_relatedness_" +
"smoothedTrigrams_near.csv")
relatedness_identical_datapath = ("./wordPair_relatedness_" +
"smoothedTrigrams_identical.csv")
meanings = {}
puns_datapath = '../../data/data-agg.csv'
puns_data = data2numpy(puns_datapath)
for i, row in enumerate(puns_data):
meanings[i] = [row[-3], row[-2]]
get_idx = map_puntypeID_to_idx(data2numpy(puns_datapath))
relatedness = {}
data = data2numpy(relatedness_near_datapath)
for row in data:
id = int(row[0])
idx = get_idx['near'][id]
m1 = meanings[idx][0]
m2 = meanings[idx][1]
word = row[3]
m1_relatedness = float(row[4])
m2_relatedness = float(row[5])
t1 = [word, m1]
t1.sort()
t1 = tuple(t1)
t2 = [word, m2]
t2.sort()
t2 = tuple(t2)
if t1 not in relatedness:
relatedness[t1] = []
if t2 not in relatedness:
relatedness[t2] = []
if m1_relatedness not in relatedness[t1]:
relatedness[t1].append(m1_relatedness)
if m2_relatedness not in relatedness[t2]:
relatedness[t2].append(m2_relatedness)
data = data2numpy(relatedness_identical_datapath)
for row in data:
id = int(row[0])
idx = get_idx['identical'][id]
m1 = meanings[idx][0]
m2 = meanings[idx][1]
word = row[3]
m1_relatedness = float(row[4])
m2_relatedness = float(row[5])
t1 = [word, m1]
t1.sort()
t1 = tuple(t1)
t2 = [word, m2]
t2.sort()
t2 = tuple(t2)
if t1 not in relatedness:
relatedness[t1] = []
if t2 not in relatedness:
relatedness[t2] = []
if m1_relatedness not in relatedness[t1]:
relatedness[t1].append(m1_relatedness)
if m2_relatedness not in relatedness[t2]:
relatedness[t2].append(m2_relatedness)
keys = relatedness.keys()
for key in keys:
if (len(relatedness[key]) > 1):
print("multiple relatedness values " +
"found for tuple ({}, {}):".format(key[0], key[1]), end=" ")
print(relatedness[key])
# pick a single value for tuples with mutliple relatedness values
# and save clean data
output_path = "../data/relatedness_clean.csv"
f = open(output_path, "w")
f.write("word1,word2,relatedness\n")
for key in keys:
row = [key[0], key[1], str(relatedness[key][0])]
row_str = ",".join(row) + "\n"
f.write(row_str)
print("saved clean relatedness data at {}".format(output_path))
| true | true |
1c4ac5f669e93e7f785530f47d72998b4960e2bd | 745 | py | Python | mvc/router.py | yashpokar/mvc | f524973739bfd63a85dfa06bdfc7fd62472c19dc | [
"MIT"
] | null | null | null | mvc/router.py | yashpokar/mvc | f524973739bfd63a85dfa06bdfc7fd62472c19dc | [
"MIT"
] | null | null | null | mvc/router.py | yashpokar/mvc | f524973739bfd63a85dfa06bdfc7fd62472c19dc | [
"MIT"
] | null | null | null | from werkzeug.routing import Map, Rule
class Router:
_registry = {}
_controllers = {}
@staticmethod
def get(pattern, callback):
Router._register('GET', pattern, callback)
@staticmethod
def post(pattern, callback):
Router._register('POST', pattern, callback)
@staticmethod
def _register(method, pattern, callback):
if pattern in Router._registry:
Router._registry[pattern]['methods'].append(method)
else:
Router._registry[pattern] = {
'endpoint': callback,
'methods': [method],
}
@staticmethod
def getRules():
return Map([Rule(pattern, **rule) for pattern, rule in Router._registry.items()])
| 25.689655 | 89 | 0.601342 | from werkzeug.routing import Map, Rule
class Router:
_registry = {}
_controllers = {}
@staticmethod
def get(pattern, callback):
Router._register('GET', pattern, callback)
@staticmethod
def post(pattern, callback):
Router._register('POST', pattern, callback)
@staticmethod
def _register(method, pattern, callback):
if pattern in Router._registry:
Router._registry[pattern]['methods'].append(method)
else:
Router._registry[pattern] = {
'endpoint': callback,
'methods': [method],
}
@staticmethod
def getRules():
return Map([Rule(pattern, **rule) for pattern, rule in Router._registry.items()])
| true | true |
1c4ac5f9721e8cfa983599bd81afe585bdecdf5f | 2,530 | py | Python | config/settings/local.py | gladgod/zhiliao | 573dfbe56734388c9657cb6749d267f4a8885d5b | [
"BSD-3-Clause"
] | null | null | null | config/settings/local.py | gladgod/zhiliao | 573dfbe56734388c9657cb6749d267f4a8885d5b | [
"BSD-3-Clause"
] | null | null | null | config/settings/local.py | gladgod/zhiliao | 573dfbe56734388c9657cb6749d267f4a8885d5b | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
'''
import environ
from .mysqlconf import *
from .common import * # noqa
# mezzanine app settings
from .mezzaconf import *
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env("DJANGO_SECRET_KEY", default='dwqfjwf&q-8+#ko-#8k)jpf#h8bp(@515@x@l#=w3ktswereue')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
'JQUERY_URL': 'http://libs.baidu.com/jquery/2.1.4/jquery.min.js',
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from zhiliao.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals()) | 29.418605 | 99 | 0.549407 | import environ
from .mysqlconf import *
from .common import *
from .mezzaconf import *
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
SECRET_KEY = env("DJANGO_SECRET_KEY", default='dwqfjwf&q-8+#ko-#8k)jpf#h8bp(@515@x@l#=w3ktswereue')
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
'JQUERY_URL': 'http://libs.baidu.com/jquery/2.1.4/jquery.min.js',
}
INSTALLED_APPS += ('django_extensions', )
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# required.
try:
from zhiliao.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals()) | true | true |
1c4ac6ac9b7ea1e11c442e577ca5452024f8d69a | 4,309 | py | Python | main.py | jakestrouse00/Python-Art | f5ddea614789ef8e13881f888f80b94d6aae5f9e | [
"MIT"
] | 1 | 2020-03-05T08:54:29.000Z | 2020-03-05T08:54:29.000Z | main.py | jakestrouse00/Python-Art | f5ddea614789ef8e13881f888f80b94d6aae5f9e | [
"MIT"
] | null | null | null | main.py | jakestrouse00/Python-Art | f5ddea614789ef8e13881f888f80b94d6aae5f9e | [
"MIT"
] | null | null | null | from turtle import *
import random
import threading
from tkinter import *
# generate random seed
num = random.randint(1897348294, 18495729473285739)
print("\n\nUsing Seed: " + str(num))
# set the seed for all randomization
random.seed(num)
# save the current seed to a text file
with open('current_seed.txt', 'w') as f:
f.write(str(num))
# colors
colors = ['blue', 'red', 'purple', 'yellow', 'green', 'orange', 'hot_colors']
# create the turtle
turtle1 = Turtle()
# make it so there is no arrow drawing the lines
turtle1.ht()
# get window size
screen = Screen()
# set the background color
screen.bgcolor('white')
# set the screen size
screen.screensize(canvwidth=512, canvheight=512)
# get the screen height and width
w = screen.window_width()
h = screen.window_height()
# printing just for reference
print(screen.screensize())
# enable the following line to have a more precise image
# w, h = w // 2, h // 2
# set the turtle speed
turtle1.speed(0) # max speed is 0
def chooseColor():
"""
chooses random color then opens that color's respective shade text file.
then it randomly chooses a shade for the previous chosen color.
"""
color = random.choice(colors)
with open("colors/" + color + '.txt', 'r') as f:
shades = f.read().splitlines()
rgb = random.choice(shades)
print("Using " + color + " with rgb " + rgb)
return rgb
def draw_background(a_turtle):
""" Draw a background rectangle. """
ts = a_turtle.getscreen()
canvas = ts.getcanvas()
height = ts.getcanvas()._canvas.winfo_height()
width = ts.getcanvas()._canvas.winfo_width()
turtleheading = turtle1.heading()
turtlespeed = turtle1.speed()
penposn = turtle1.position()
penstate = turtle1.pen()
turtle1.penup()
turtle1.speed(0) # fastest
turtle1.goto(-width / 2 - 2, -height / 2 + 3)
turtle1.fillcolor(Screen().bgcolor())
turtle1.begin_fill()
turtle1.setheading(0)
turtle1.forward(width)
turtle1.setheading(90)
turtle1.forward(height)
turtle1.setheading(180)
turtle1.forward(width)
turtle1.setheading(270)
turtle1.forward(height)
turtle1.end_fill()
turtle1.penup()
turtle1.setposition(*penposn)
turtle1.pen(penstate)
turtle1.setheading(turtleheading)
turtle1.speed(turtlespeed)
draw_background(turtle1)
def square():
"""
Draws square with angles of 70 to 91 degrees, with
side lengths of 100 to 201
Guess you can't call it a square anymore
"""
m = random.randint(70, 91)
d = random.randint(100, 201)
for i in range(4):
rgb = chooseColor()
turtle1.pencolor(rgb)
turtle1.right(m)
turtle1.forward(d)
def hexagon():
"""
Draws hexagon with angles of 70 to 91 degrees, with
side lengths of 100 to 201
Guess you can't call it a hexagon anymore
"""
m = random.randint(70, 91)
d = random.randint(100, 201)
turtle1.right(90)
for i in range(4):
rgb = chooseColor()
turtle1.pencolor(rgb)
turtle1.forward(m)
turtle1.right(d)
turtle1.forward(d)
def triangle():
"""
Draws triangle with angles of 70 to 91 degrees, with
side lengths of 100 to 201
Guess you can't call it a triangle anymore
"""
m = random.randint(70, 91)
d = random.randint(100, 201)
for i in range(3):
rgb = chooseColor()
turtle1.pencolor(rgb)
turtle1.forward(m)
turtle1.right(-d)
# set variables for counting
j = 0
m = 50
while True:
x, y = turtle1.pos() # Get x, y positions.
if abs(x) > w or abs(y) > h: # Check if pen is outside of frame
# reset pen to random position on X and Y between 0 and the frame border
theX = random.randint(0, w - 100)
theY = random.randint(0, h - 100)
turtle1.setx(theX)
turtle1.sety(theY)
# draw a triangle, a hexagon and a square
triangle()
hexagon()
square()
j += 1
# if program has run the above 50 times, its time for another save
if j == m:
print("\n\nSAVING!!!!!!!!!\n\n")
# get the current screen
ts = turtle1.getscreen()
# save the drawing to a post script
ts.getcanvas().postscript(file="art_save.eps")
m += 50
| 26.115152 | 80 | 0.635182 | from turtle import *
import random
import threading
from tkinter import *
num = random.randint(1897348294, 18495729473285739)
print("\n\nUsing Seed: " + str(num))
random.seed(num)
with open('current_seed.txt', 'w') as f:
f.write(str(num))
colors = ['blue', 'red', 'purple', 'yellow', 'green', 'orange', 'hot_colors']
turtle1 = Turtle()
turtle1.ht()
screen = Screen()
screen.bgcolor('white')
screen.screensize(canvwidth=512, canvheight=512)
w = screen.window_width()
h = screen.window_height()
print(screen.screensize())
turtle1.speed(0)
def chooseColor():
color = random.choice(colors)
with open("colors/" + color + '.txt', 'r') as f:
shades = f.read().splitlines()
rgb = random.choice(shades)
print("Using " + color + " with rgb " + rgb)
return rgb
def draw_background(a_turtle):
ts = a_turtle.getscreen()
canvas = ts.getcanvas()
height = ts.getcanvas()._canvas.winfo_height()
width = ts.getcanvas()._canvas.winfo_width()
turtleheading = turtle1.heading()
turtlespeed = turtle1.speed()
penposn = turtle1.position()
penstate = turtle1.pen()
turtle1.penup()
turtle1.speed(0) turtle1.goto(-width / 2 - 2, -height / 2 + 3)
turtle1.fillcolor(Screen().bgcolor())
turtle1.begin_fill()
turtle1.setheading(0)
turtle1.forward(width)
turtle1.setheading(90)
turtle1.forward(height)
turtle1.setheading(180)
turtle1.forward(width)
turtle1.setheading(270)
turtle1.forward(height)
turtle1.end_fill()
turtle1.penup()
turtle1.setposition(*penposn)
turtle1.pen(penstate)
turtle1.setheading(turtleheading)
turtle1.speed(turtlespeed)
draw_background(turtle1)
def square():
m = random.randint(70, 91)
d = random.randint(100, 201)
for i in range(4):
rgb = chooseColor()
turtle1.pencolor(rgb)
turtle1.right(m)
turtle1.forward(d)
def hexagon():
m = random.randint(70, 91)
d = random.randint(100, 201)
turtle1.right(90)
for i in range(4):
rgb = chooseColor()
turtle1.pencolor(rgb)
turtle1.forward(m)
turtle1.right(d)
turtle1.forward(d)
def triangle():
m = random.randint(70, 91)
d = random.randint(100, 201)
for i in range(3):
rgb = chooseColor()
turtle1.pencolor(rgb)
turtle1.forward(m)
turtle1.right(-d)
j = 0
m = 50
while True:
x, y = turtle1.pos() if abs(x) > w or abs(y) > h: theX = random.randint(0, w - 100)
theY = random.randint(0, h - 100)
turtle1.setx(theX)
turtle1.sety(theY)
triangle()
hexagon()
square()
j += 1
if j == m:
print("\n\nSAVING!!!!!!!!!\n\n")
ts = turtle1.getscreen()
ts.getcanvas().postscript(file="art_save.eps")
m += 50
| true | true |
1c4ac7c71bb9d7a5423ddf8200fe2fede8b354f7 | 87 | py | Python | python/testData/docstrings/googleDescriptionOfReturnValueOnNextLine.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/docstrings/googleDescriptionOfReturnValueOnNextLine.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/docstrings/googleDescriptionOfReturnValueOnNextLine.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | def func():
"""
Returns:
int:
return value description
""" | 14.5 | 33 | 0.448276 | def func(): | true | true |
1c4ac865607d0326e4cf6dc830274b68eb3d48d7 | 28 | py | Python | pyrcc/__init__.py | vishalbelsare/pyrcc | 3dfb9721a41f5c253690fd42a4540f639534ce9b | [
"MIT"
] | 96 | 2017-09-16T16:00:51.000Z | 2021-12-07T07:53:57.000Z | pyrcc/__init__.py | jtpils/pyrcc | 3dfb9721a41f5c253690fd42a4540f639534ce9b | [
"MIT"
] | 1 | 2021-04-21T16:49:23.000Z | 2021-04-21T16:49:23.000Z | pyrcc/__init__.py | jtpils/pyrcc | 3dfb9721a41f5c253690fd42a4540f639534ce9b | [
"MIT"
] | 30 | 2017-09-18T01:18:13.000Z | 2020-05-05T05:43:54.000Z | from .rcc import RccCluster
| 14 | 27 | 0.821429 | from .rcc import RccCluster
| true | true |
1c4ac916ab8d1749743c2e814fb947997a9a280c | 4,709 | py | Python | utils.py | NateEaton/igrill | 30caa7af3d6861ecfcb004fb34dfc7c316ff7b74 | [
"MIT"
] | null | null | null | utils.py | NateEaton/igrill | 30caa7af3d6861ecfcb004fb34dfc7c316ff7b74 | [
"MIT"
] | null | null | null | utils.py | NateEaton/igrill | 30caa7af3d6861ecfcb004fb34dfc7c316ff7b74 | [
"MIT"
] | null | null | null | from builtins import range
from config import strip_config
from igrill import IGrillMiniPeripheral, IGrillV2Peripheral, IGrillV3Peripheral, Pulse2000Peripheral, DeviceThread
import logging
import paho.mqtt.client as mqtt
config_requirements = {
'specs': {
'required_entries': {'devices': list, 'mqtt': dict},
},
'children': {
'devices': {
'specs': {
'required_entries': {'name': str, 'type': str, 'address': str, 'topic': str, 'interval': int},
'optional_entries': {'publish_missing_probes': bool, 'missing_probe_value': str},
'list_type': dict
}
},
'mqtt': {
'specs': {
'required_entries': {'host': str},
'optional_entries': {'port': int,
'keepalive': int,
'auth': dict,
'tls': dict}
},
'children': {
'auth': {
'specs': {
'required_entries': {'username': str},
'optional_entries': {'password': str}
}
},
'tls': {
'specs': {
'optional_entries': {'ca_certs': str,
'certfile': str,
'keyfile': str,
'cert_reqs': str,
'tls_version': str,
'ciphers': str}
}
}
}
}
}
}
config_defaults = {
'mqtt': {
'host': 'localhost'
}
}
def log_setup(log_level, logfile):
"""Setup application logging"""
numeric_level = logging.getLevelName(log_level.upper())
if not isinstance(numeric_level, int):
raise TypeError("Invalid log level: {0}".format(log_level))
if logfile != '':
logging.info("Logging redirected to: ".format(logfile))
# Need to replace the current handler on the root logger:
file_handler = logging.FileHandler(logfile, 'a')
formatter = logging.Formatter('%(asctime)s %(threadName)s %(levelname)s: %(message)s')
file_handler.setFormatter(formatter)
log = logging.getLogger() # root logger
for handler in log.handlers: # remove all old handlers
log.removeHandler(handler)
log.addHandler(file_handler)
else:
logging.basicConfig(format='%(asctime)s %(threadName)s %(levelname)s: %(message)s')
logging.getLogger().setLevel(numeric_level)
logging.info("log_level set to: {0}".format(log_level))
def mqtt_init(mqtt_config):
"""Setup mqtt connection"""
mqtt_client = mqtt.Client()
if 'auth' in mqtt_config:
auth = mqtt_config['auth']
mqtt_client.username_pw_set(**auth)
if 'tls' in mqtt_config:
if mqtt_config['tls']:
tls_config = mqtt_config['tls']
mqtt_client.tls_set(**tls_config)
else:
mqtt_client.tls_set()
mqtt_client.connect(**strip_config(mqtt_config, ['host', 'port', 'keepalive']))
return mqtt_client
def publish(temperatures, battery, heating_element, device_state, client, base_topic, device_name):
for i in range(1, 5):
if temperatures[i]:
client.publish("{0}/{1}/probe{2}".format(base_topic, device_name, i), temperatures[i])
if battery:
client.publish("{0}/{1}/battery".format(base_topic, device_name), battery, retain=True)
if heating_element:
client.publish("{0}/{1}/heating_element".format(base_topic, device_name), heating_element)
if device_state:
logging.debug("Publish device_state as {}".format(device_state))
client.publish("{0}/{1}/device_state".format(base_topic, device_name), device_state, retain=True)
def get_devices(device_config):
if device_config is None:
logging.warn('No devices in config')
return {}
device_types = {'igrill_mini': IGrillMiniPeripheral,
'igrill_v2': IGrillV2Peripheral,
'igrill_v3': IGrillV3Peripheral,
'pulse_2000': Pulse2000Peripheral}
return [device_types[d['type']](**strip_config(d, ['address', 'name'])) for d in device_config]
def get_device_threads(device_config, mqtt_config, run_event):
if device_config is None:
logging.warn('No devices in config')
return {}
return [DeviceThread(ind, mqtt_config, run_event, **d) for ind, d in
enumerate(device_config)]
| 35.141791 | 114 | 0.551922 | from builtins import range
from config import strip_config
from igrill import IGrillMiniPeripheral, IGrillV2Peripheral, IGrillV3Peripheral, Pulse2000Peripheral, DeviceThread
import logging
import paho.mqtt.client as mqtt
config_requirements = {
'specs': {
'required_entries': {'devices': list, 'mqtt': dict},
},
'children': {
'devices': {
'specs': {
'required_entries': {'name': str, 'type': str, 'address': str, 'topic': str, 'interval': int},
'optional_entries': {'publish_missing_probes': bool, 'missing_probe_value': str},
'list_type': dict
}
},
'mqtt': {
'specs': {
'required_entries': {'host': str},
'optional_entries': {'port': int,
'keepalive': int,
'auth': dict,
'tls': dict}
},
'children': {
'auth': {
'specs': {
'required_entries': {'username': str},
'optional_entries': {'password': str}
}
},
'tls': {
'specs': {
'optional_entries': {'ca_certs': str,
'certfile': str,
'keyfile': str,
'cert_reqs': str,
'tls_version': str,
'ciphers': str}
}
}
}
}
}
}
config_defaults = {
'mqtt': {
'host': 'localhost'
}
}
def log_setup(log_level, logfile):
numeric_level = logging.getLevelName(log_level.upper())
if not isinstance(numeric_level, int):
raise TypeError("Invalid log level: {0}".format(log_level))
if logfile != '':
logging.info("Logging redirected to: ".format(logfile))
file_handler = logging.FileHandler(logfile, 'a')
formatter = logging.Formatter('%(asctime)s %(threadName)s %(levelname)s: %(message)s')
file_handler.setFormatter(formatter)
log = logging.getLogger() for handler in log.handlers: log.removeHandler(handler)
log.addHandler(file_handler)
else:
logging.basicConfig(format='%(asctime)s %(threadName)s %(levelname)s: %(message)s')
logging.getLogger().setLevel(numeric_level)
logging.info("log_level set to: {0}".format(log_level))
def mqtt_init(mqtt_config):
mqtt_client = mqtt.Client()
if 'auth' in mqtt_config:
auth = mqtt_config['auth']
mqtt_client.username_pw_set(**auth)
if 'tls' in mqtt_config:
if mqtt_config['tls']:
tls_config = mqtt_config['tls']
mqtt_client.tls_set(**tls_config)
else:
mqtt_client.tls_set()
mqtt_client.connect(**strip_config(mqtt_config, ['host', 'port', 'keepalive']))
return mqtt_client
def publish(temperatures, battery, heating_element, device_state, client, base_topic, device_name):
for i in range(1, 5):
if temperatures[i]:
client.publish("{0}/{1}/probe{2}".format(base_topic, device_name, i), temperatures[i])
if battery:
client.publish("{0}/{1}/battery".format(base_topic, device_name), battery, retain=True)
if heating_element:
client.publish("{0}/{1}/heating_element".format(base_topic, device_name), heating_element)
if device_state:
logging.debug("Publish device_state as {}".format(device_state))
client.publish("{0}/{1}/device_state".format(base_topic, device_name), device_state, retain=True)
def get_devices(device_config):
if device_config is None:
logging.warn('No devices in config')
return {}
device_types = {'igrill_mini': IGrillMiniPeripheral,
'igrill_v2': IGrillV2Peripheral,
'igrill_v3': IGrillV3Peripheral,
'pulse_2000': Pulse2000Peripheral}
return [device_types[d['type']](**strip_config(d, ['address', 'name'])) for d in device_config]
def get_device_threads(device_config, mqtt_config, run_event):
if device_config is None:
logging.warn('No devices in config')
return {}
return [DeviceThread(ind, mqtt_config, run_event, **d) for ind, d in
enumerate(device_config)]
| true | true |
1c4ac9638d00c55a6fc59df6fd2a50f24ece27ef | 563 | py | Python | pythonExercicios/ex115/sistema.py | Yhago-Carvalho/CursoPython | 343ccabb1a61e16c6078de9672c78c56deed2589 | [
"MIT"
] | null | null | null | pythonExercicios/ex115/sistema.py | Yhago-Carvalho/CursoPython | 343ccabb1a61e16c6078de9672c78c56deed2589 | [
"MIT"
] | null | null | null | pythonExercicios/ex115/sistema.py | Yhago-Carvalho/CursoPython | 343ccabb1a61e16c6078de9672c78c56deed2589 | [
"MIT"
] | null | null | null | from lib.arquivo import *
from lib.interface import *
if not arquivo_existe('cursoemvideo.txt'):
criar_arquivo('cursoemvideo.txt')
while True:
o = menu('Sua opção: ')
if o == 1:
#Mostra as pessoas cadastradas
ler_arquivo('cursoemvideo.txt')
elif o == 2:
#Cadastra uma pessoa
titulo('NOVO CADASTRO')
nome = input('Nome: ')
idade = leiaInt('Idade: ')
cadastrar('cursoemvideo.txt', nome, idade)
else:
#Sai do sistema
titulo('SISTEMA FINALIZADO... ATÉ LOGO!')
break | 26.809524 | 50 | 0.598579 | from lib.arquivo import *
from lib.interface import *
if not arquivo_existe('cursoemvideo.txt'):
criar_arquivo('cursoemvideo.txt')
while True:
o = menu('Sua opção: ')
if o == 1:
ler_arquivo('cursoemvideo.txt')
elif o == 2:
titulo('NOVO CADASTRO')
nome = input('Nome: ')
idade = leiaInt('Idade: ')
cadastrar('cursoemvideo.txt', nome, idade)
else:
titulo('SISTEMA FINALIZADO... ATÉ LOGO!')
break | true | true |
1c4ac9be98e27c1ea3752c2bd2d934194b92a207 | 1,492 | py | Python | tweepy-bots/followerextract.py | kassuahun/Twitterprj | 23c7272201a0a62f2aaa9d1081a5f1d74b66b144 | [
"Apache-2.0"
] | null | null | null | tweepy-bots/followerextract.py | kassuahun/Twitterprj | 23c7272201a0a62f2aaa9d1081a5f1d74b66b144 | [
"Apache-2.0"
] | null | null | null | tweepy-bots/followerextract.py | kassuahun/Twitterprj | 23c7272201a0a62f2aaa9d1081a5f1d74b66b144 | [
"Apache-2.0"
] | null | null | null | import tweepy
import time
import sys
from datetime import datetime
from config import create_api, create_api_List, create_api_test
from limits import limits
import random
import os
import utils
import logging
import atexit
api = create_api()
# the ID of the user
id = 1168167671151628290
# fetching the user
user = api.get_user(id)
print("USER NAME = ", user.screen_name)
keywords=["Consulate", "consulate", "Embassy", "embassy", "Ambassador"]
potential_deplomats = []
ids=[]
def filewriter(fname, deplomats):
print("writing deplomats ---------")
f_name = os.path.dirname(os.path.realpath(__file__)) + os.sep + fname
with open(fname, 'w') as filetowrite:
for deplomat in deplomats:
filetowrite.write(deplomat)
for page in tweepy.Cursor(api.followers_ids, screen_name="AbiyAhmedAli").pages():
ids.extend(page)
for id in page:
try:
user = api.get_user(id)
if "Ethiopia" in user.description:
for key in keywords:
if key in user.description:
potential_deplomats.append(id)
potential_deplomats.append([user.screen_name, user.description])
print("Deplomat added ", user.screen_name, user.description)
except tweepy.TweepError as e:
print(e.reason)
continue
time.sleep(60)
print(len(ids))
atexit.register(utils.exit_handler,potential_deplomats,"deplomats.txt")
| 26.642857 | 88 | 0.654155 | import tweepy
import time
import sys
from datetime import datetime
from config import create_api, create_api_List, create_api_test
from limits import limits
import random
import os
import utils
import logging
import atexit
api = create_api()
id = 1168167671151628290
user = api.get_user(id)
print("USER NAME = ", user.screen_name)
keywords=["Consulate", "consulate", "Embassy", "embassy", "Ambassador"]
potential_deplomats = []
ids=[]
def filewriter(fname, deplomats):
print("writing deplomats ---------")
f_name = os.path.dirname(os.path.realpath(__file__)) + os.sep + fname
with open(fname, 'w') as filetowrite:
for deplomat in deplomats:
filetowrite.write(deplomat)
for page in tweepy.Cursor(api.followers_ids, screen_name="AbiyAhmedAli").pages():
ids.extend(page)
for id in page:
try:
user = api.get_user(id)
if "Ethiopia" in user.description:
for key in keywords:
if key in user.description:
potential_deplomats.append(id)
potential_deplomats.append([user.screen_name, user.description])
print("Deplomat added ", user.screen_name, user.description)
except tweepy.TweepError as e:
print(e.reason)
continue
time.sleep(60)
print(len(ids))
atexit.register(utils.exit_handler,potential_deplomats,"deplomats.txt")
| true | true |
1c4aca9d8963bb2cd9e31b728c9373a6ac656cdd | 16,441 | py | Python | dovetail/utils/dovetail_utils.py | xudan16/dovetail | 6230a0d2c47c32d970f3197021efee8f9669282b | [
"Apache-2.0"
] | 3 | 2018-11-14T14:42:45.000Z | 2018-12-20T21:56:48.000Z | dovetail/utils/dovetail_utils.py | xudan16/dovetail | 6230a0d2c47c32d970f3197021efee8f9669282b | [
"Apache-2.0"
] | 4 | 2021-03-25T21:57:27.000Z | 2021-09-23T23:21:52.000Z | dovetail/utils/dovetail_utils.py | xudan16/dovetail | 6230a0d2c47c32d970f3197021efee8f9669282b | [
"Apache-2.0"
] | 20 | 2016-11-11T06:38:14.000Z | 2020-10-25T16:48:12.000Z | #!/usr/bin/env python
#
# Copyright (c) 2018 [email protected] and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
#
from __future__ import print_function
import sys
import os
import re
import requests
import subprocess
from collections import Mapping, Set, Sequence
import json
from datetime import datetime
from distutils.version import LooseVersion
import yaml
import python_hosts
import docker
from docker.types import Mount
from dovetail import constants
from dovetail.utils.dovetail_config import DovetailConfig as dt_cfg
from dovetail.utils.openstack_utils import OS_Utils
def exec_log(verbose, logger, msg, level, flush=False):
if not verbose:
return
if logger:
if level == 'info':
logger.info(msg)
elif level == 'error':
logger.error(msg)
elif level == 'debug':
logger.debug(msg)
else:
print(msg)
if flush:
sys.stdout.flush()
def exec_cmd(cmd, logger=None, exit_on_error=False, info=False,
exec_msg_on=True, err_msg='', verbose=True,
progress_bar=False):
msg_err = ("The command '%s' failed." % cmd) if not err_msg else err_msg
msg_exec = ("Executing command: '%s'" % cmd)
level = 'info' if info else 'debug'
if exec_msg_on:
exec_log(verbose, logger, msg_exec, level)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout = ''
if progress_bar:
count = 1
DEBUG = os.getenv('DEBUG')
for line in iter(p.stdout.readline, b''):
exec_log(verbose, logger, line.strip().decode('unicode-escape'),
level, True)
stdout += str(line)
if progress_bar and (DEBUG is None or DEBUG.lower() != 'true'):
show_progress_bar(count)
count += 1
stdout = stdout.strip()
returncode = p.wait()
p.stdout.close()
if returncode != 0:
exec_log(verbose, logger, msg_err, 'error')
if exit_on_error:
sys.exit(1)
return returncode, stdout
# walkthrough the object, yield path and value
# dual python 2/3 compatibility, inspired by the "six" library
string_types = (str, 'unicode') if str is bytes else (str, bytes)
# items = lambda mapping: getattr(mapping, 'items', mapping.items)()
def items(mapping):
return getattr(mapping, 'items', mapping.items)()
def objwalk(obj, path=(), memo=None):
if memo is None:
memo = set()
iterator = None
if isinstance(obj, Mapping):
iterator = items
elif isinstance(obj, (Sequence, Set)) and not isinstance(obj,
string_types):
iterator = enumerate
if iterator:
if id(obj) not in memo:
memo.add(id(obj))
for path_component, value in iterator(obj):
for result in objwalk(value, path + (path_component,), memo):
yield result
memo.remove(id(obj))
else:
yield path, obj
def get_obj_by_path(obj, dst_path):
for path, obj in objwalk(obj):
if path == dst_path:
return obj
return None
def source_env(env_file):
with open(env_file, 'r') as f:
lines = f.readlines()
for line in lines:
if line.lstrip().startswith('export'):
for match in re.findall(r"export (.*)=(.*)", line):
match = (match[0].strip('\"'), match[1].strip('\"'))
match = (match[0].strip('\''), match[1].strip('\''))
os.environ.update({match[0]: match[1]})
def check_https_enabled(logger=None):
logger.debug('Checking if https enabled or not...')
os_auth_url = os.getenv('OS_AUTH_URL')
if os_auth_url.startswith('https'):
logger.debug('https is enabled')
return True
logger.debug('https is not enabled')
return False
def get_duration(start_date, stop_date, logger):
fmt = '%Y-%m-%d %H:%M:%S'
try:
datetime_start = datetime.strptime(start_date, fmt)
datetime_stop = datetime.strptime(stop_date, fmt)
delta = (datetime_stop - datetime_start).seconds
res = '%sm%ss' % (delta / 60, delta % 60)
return res
except ValueError as e:
logger.exception('ValueError: {}'.format(e))
return None
def show_progress_bar(length):
max_len = 50
length %= max_len
sys.stdout.write('Running ' + ' ' * max_len + '\r')
sys.stdout.flush()
sys.stdout.write('Running ' + '.' * length + '\r')
sys.stdout.flush()
def check_docker_version(logger=None):
client = docker.from_env()
server_ver = None
try:
server_ver = client.version()['Version']
except Exception:
logger.error('Failed to get Docker server version')
if server_ver and (LooseVersion(server_ver) >= LooseVersion('1.12.3')):
logger.debug('Docker server version: {}'.format(server_ver))
else:
logger.error("Don't support this Docker server version. "
"Docker server should be updated to at least 1.12.3.")
def add_hosts_info(ip, hostnames):
hosts = python_hosts.Hosts(path='/etc/hosts')
filtered_hostnames = [hostname for hostname in hostnames if hostname]
if not ip or not filtered_hostnames:
return
new_entry = python_hosts.HostsEntry(entry_type='ipv4',
address=ip,
names=filtered_hostnames)
hosts.add([new_entry])
hosts.write()
def get_hardware_info(logger=None):
pod_file = os.path.join(dt_cfg.dovetail_config['config_dir'],
dt_cfg.dovetail_config['pod_file'])
logger.info('Get hardware info of all nodes list in file {} ...'
.format(pod_file))
result_dir = dt_cfg.dovetail_config['result_dir']
info_file_path = os.path.join(result_dir, 'sut_hardware_info')
all_info_file = os.path.join(result_dir, 'all_hosts_info.json')
inventory_file = os.path.join(result_dir, 'inventory.ini')
if not get_inventory_file(pod_file, inventory_file, logger):
logger.error('Failed to get SUT hardware info.')
return None
ret, msg = exec_cmd('cd {} && ansible all -m setup -i {} --tree {}'
.format(constants.USERCONF_PATH, inventory_file,
info_file_path), verbose=False)
if not os.path.exists(info_file_path) or ret != 0:
logger.error('Failed to get SUT hardware info.')
return None
if not combine_files(info_file_path, all_info_file, logger):
logger.error('Failed to get all hardware info.')
return None
logger.info('Hardware info of all nodes are stored in file {}.'
.format(all_info_file))
return all_info_file
def get_inventory_file(pod_file, inventory_file, logger=None):
if not os.path.isfile(pod_file):
logger.error("File {} doesn't exist.".format(pod_file))
return False
try:
with open(pod_file, 'r') as f, open(inventory_file, 'w') as out_f:
pod_info = yaml.safe_load(f)
for host in pod_info['nodes']:
host_info = ('{} ansible_host={} ansible_user={}'
.format(host['name'], host['ip'], host['user']))
if 'password' in host.keys():
host_info += (' ansible_ssh_pass={}\n'
.format(host['password']))
elif 'key_filename' in host.keys():
key = os.path.join(dt_cfg.dovetail_config['config_dir'],
'id_rsa')
host_info += (' ansible_ssh_private_key_file={}\n'
.format(key))
else:
logger.error('No password or key_filename in file {}.'
.format(pod_file))
return False
out_f.write(host_info)
logger.debug('Ansible inventory file is {}.'.format(inventory_file))
return True
except KeyError as e:
logger.exception('KeyError {}.'.format(e))
return False
except Exception:
logger.exception('Failed to read file {}.'.format(pod_file))
return False
def combine_files(file_path, result_file, logger=None):
all_info = {}
info_files = os.listdir(file_path)
for info_file in info_files:
try:
absolute_file_path = os.path.join(file_path, info_file)
with open(absolute_file_path, 'r') as f:
all_info[info_file] = json.load(f)
except Exception:
logger.error('Failed to read file {}.'.format(absolute_file_path))
return None
try:
with open(result_file, 'w') as f:
f.write(json.dumps(all_info))
except Exception:
logger.exception('Failed to write file {}.'.format(result_file))
return None
return result_file
def get_openstack_endpoint(logger=None):
https_enabled = check_https_enabled(logger)
insecure = os.getenv('OS_INSECURE')
if https_enabled and insecure and insecure.lower() == 'true':
os_utils = OS_Utils(verify=False)
else:
os_utils = OS_Utils()
res_endpoints, msg_endpoints = os_utils.search_endpoints()
if not res_endpoints:
logger.error('Failed to list endpoints. Exception message, {}'
.format(msg_endpoints))
return None
endpoints_info = []
for item in msg_endpoints:
endpoint = {'URL': item['url'], 'Enabled': item['enabled']}
res_services, msg_services = os_utils.search_services(
service_id=item['service_id'])
if not res_services:
logger.error('Failed to list services. Exception message, {}'
.format(msg_services))
return None
endpoint['Service Type'] = msg_services[0]['service_type']
endpoint['Service Name'] = msg_services[0]['name']
endpoints_info.append(endpoint)
result_file = os.path.join(dt_cfg.dovetail_config['result_dir'],
'endpoint_info.json')
try:
with open(result_file, 'w') as f:
json.dump(endpoints_info, f)
logger.debug('Record all endpoint info into file {}.'
.format(result_file))
return endpoints_info
except Exception:
logger.exception('Failed to write endpoint info into file.')
return None
def check_cacert_file(cacert, logger=None):
if not os.path.isfile(cacert):
logger.error('OS_CACERT is {}, but the file does not exist.'
.format(cacert))
return False
if not dt_cfg.dovetail_config['config_dir'] == os.path.dirname(cacert):
logger.error('Credential file must be put under {}, '
'which can be mounted into other container.'
.format(dt_cfg.dovetail_config['config_dir']))
return False
return True
def get_hosts_info(logger=None):
hosts_config = {}
hosts_config_file = os.path.join(dt_cfg.dovetail_config['config_dir'],
'hosts.yaml')
if not os.path.isfile(hosts_config_file):
logger.warn('There is no hosts file {}. This may cause some issues '
'with domain name resolution.'.format(hosts_config_file))
return hosts_config
with open(hosts_config_file) as f:
hosts_yaml = yaml.safe_load(f)
if not hosts_yaml:
logger.debug('File {} is empty.'.format(hosts_config_file))
return hosts_config
hosts_info = hosts_yaml.get('hosts_info', None)
if not hosts_info:
logger.error('There is no key hosts_info in file {}'
.format(hosts_config_file))
return hosts_config
for ip, hostnames in hosts_info.items():
if not hostnames:
continue
add_hosts_info(ip, hostnames)
names_str = ' '.join(hostname for hostname in hostnames
if hostname)
if not names_str:
continue
hosts_config[names_str] = ip
logger.debug('Get hosts info {}:{}.'.format(ip, names_str))
return hosts_config
def read_yaml_file(file_path, logger=None):
if not os.path.isfile(file_path):
logger.error("File {} doesn't exist.".format(file_path))
return None
try:
with open(file_path, 'r') as f:
content = yaml.safe_load(f)
return content
except Exception as e:
logger.exception('Failed to read file {}, exception: {}'
.format(file_path, e))
return None
def read_plain_file(file_path, logger=None):
if not os.path.isfile(file_path):
logger.error("File {} doesn't exist.".format(file_path))
return None
try:
with open(file_path, 'r') as f:
content = f.read()
return content
except Exception as e:
logger.exception('Failed to read file {}, exception: {}'
.format(file_path, e))
return None
def get_value_from_dict(key_path, input_dict):
"""
Returns the value of a key in input_dict
key_path must be given in string format with dots
Example: result.dir
"""
if not isinstance(key_path, str) or not isinstance(input_dict, dict):
return None
for key in key_path.split('.'):
input_dict = input_dict.get(key)
if not input_dict:
return None
return input_dict
def get_openstack_info(logger):
"""
When the sut is an OpenStack deployment, its software and hardware info
are needed.
Software info is the endpoint list.
Hardware info is every node's cpu, disk ...
"""
openrc = os.path.join(dt_cfg.dovetail_config['config_dir'],
dt_cfg.dovetail_config['env_file'])
if not os.path.isfile(openrc):
logger.error('File {} does not exist.'.format(openrc))
return
source_env(openrc)
get_hosts_info(logger)
get_openstack_endpoint(logger)
get_hardware_info(logger)
def push_results_to_db(case_name, details, start_date, stop_date, logger):
"""
Push results to OPNFV TestAPI DB when running with OPNFV CI jobs.
All results can be filtered with TestAPI.
http://testresults.opnfv.org/test/#/results
"""
try:
url = os.getenv('TEST_DB_URL')
data = {'project_name': 'dovetail', 'case_name': case_name,
'details': details, 'start_date': start_date,
'stop_date': stop_date}
data['criteria'] = details['criteria'] if details else 'FAIL'
data['installer'] = os.getenv('INSTALLER_TYPE')
data['scenario'] = os.getenv('DEPLOY_SCENARIO')
data['pod_name'] = os.getenv('NODE_NAME')
data['build_tag'] = os.getenv('BUILD_TAG')
data['version'] = os.getenv('VERSION')
req = requests.post(url, data=json.dumps(data, sort_keys=True),
headers={'Content-Type': 'application/json'})
req.raise_for_status()
logger.debug('The results were successfully pushed to DB.')
return True
except Exception:
logger.exception('The results cannot be pushed to DB.')
return False
def get_mount_list(project_cfg):
mount_list = []
mounts = get_value_from_dict('mounts', project_cfg)
for mount in mounts:
if mount:
param_dict = {}
for param in mount.split(','):
key_word = param.split('=')
if len(key_word) != 2:
return None, 'Error mount {}.'.format(mount)
param_dict[key_word[0]] = key_word[1]
try:
mount_list.append(Mount(target=param_dict['target'],
source=param_dict['source'],
type='bind'))
except Exception as e:
return None, e
return mount_list, 'Successfully to get mount list.'
| 35.819172 | 78 | 0.599051 |
from __future__ import print_function
import sys
import os
import re
import requests
import subprocess
from collections import Mapping, Set, Sequence
import json
from datetime import datetime
from distutils.version import LooseVersion
import yaml
import python_hosts
import docker
from docker.types import Mount
from dovetail import constants
from dovetail.utils.dovetail_config import DovetailConfig as dt_cfg
from dovetail.utils.openstack_utils import OS_Utils
def exec_log(verbose, logger, msg, level, flush=False):
if not verbose:
return
if logger:
if level == 'info':
logger.info(msg)
elif level == 'error':
logger.error(msg)
elif level == 'debug':
logger.debug(msg)
else:
print(msg)
if flush:
sys.stdout.flush()
def exec_cmd(cmd, logger=None, exit_on_error=False, info=False,
exec_msg_on=True, err_msg='', verbose=True,
progress_bar=False):
msg_err = ("The command '%s' failed." % cmd) if not err_msg else err_msg
msg_exec = ("Executing command: '%s'" % cmd)
level = 'info' if info else 'debug'
if exec_msg_on:
exec_log(verbose, logger, msg_exec, level)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout = ''
if progress_bar:
count = 1
DEBUG = os.getenv('DEBUG')
for line in iter(p.stdout.readline, b''):
exec_log(verbose, logger, line.strip().decode('unicode-escape'),
level, True)
stdout += str(line)
if progress_bar and (DEBUG is None or DEBUG.lower() != 'true'):
show_progress_bar(count)
count += 1
stdout = stdout.strip()
returncode = p.wait()
p.stdout.close()
if returncode != 0:
exec_log(verbose, logger, msg_err, 'error')
if exit_on_error:
sys.exit(1)
return returncode, stdout
string_types = (str, 'unicode') if str is bytes else (str, bytes)
def items(mapping):
return getattr(mapping, 'items', mapping.items)()
def objwalk(obj, path=(), memo=None):
if memo is None:
memo = set()
iterator = None
if isinstance(obj, Mapping):
iterator = items
elif isinstance(obj, (Sequence, Set)) and not isinstance(obj,
string_types):
iterator = enumerate
if iterator:
if id(obj) not in memo:
memo.add(id(obj))
for path_component, value in iterator(obj):
for result in objwalk(value, path + (path_component,), memo):
yield result
memo.remove(id(obj))
else:
yield path, obj
def get_obj_by_path(obj, dst_path):
for path, obj in objwalk(obj):
if path == dst_path:
return obj
return None
def source_env(env_file):
with open(env_file, 'r') as f:
lines = f.readlines()
for line in lines:
if line.lstrip().startswith('export'):
for match in re.findall(r"export (.*)=(.*)", line):
match = (match[0].strip('\"'), match[1].strip('\"'))
match = (match[0].strip('\''), match[1].strip('\''))
os.environ.update({match[0]: match[1]})
def check_https_enabled(logger=None):
logger.debug('Checking if https enabled or not...')
os_auth_url = os.getenv('OS_AUTH_URL')
if os_auth_url.startswith('https'):
logger.debug('https is enabled')
return True
logger.debug('https is not enabled')
return False
def get_duration(start_date, stop_date, logger):
fmt = '%Y-%m-%d %H:%M:%S'
try:
datetime_start = datetime.strptime(start_date, fmt)
datetime_stop = datetime.strptime(stop_date, fmt)
delta = (datetime_stop - datetime_start).seconds
res = '%sm%ss' % (delta / 60, delta % 60)
return res
except ValueError as e:
logger.exception('ValueError: {}'.format(e))
return None
def show_progress_bar(length):
max_len = 50
length %= max_len
sys.stdout.write('Running ' + ' ' * max_len + '\r')
sys.stdout.flush()
sys.stdout.write('Running ' + '.' * length + '\r')
sys.stdout.flush()
def check_docker_version(logger=None):
client = docker.from_env()
server_ver = None
try:
server_ver = client.version()['Version']
except Exception:
logger.error('Failed to get Docker server version')
if server_ver and (LooseVersion(server_ver) >= LooseVersion('1.12.3')):
logger.debug('Docker server version: {}'.format(server_ver))
else:
logger.error("Don't support this Docker server version. "
"Docker server should be updated to at least 1.12.3.")
def add_hosts_info(ip, hostnames):
hosts = python_hosts.Hosts(path='/etc/hosts')
filtered_hostnames = [hostname for hostname in hostnames if hostname]
if not ip or not filtered_hostnames:
return
new_entry = python_hosts.HostsEntry(entry_type='ipv4',
address=ip,
names=filtered_hostnames)
hosts.add([new_entry])
hosts.write()
def get_hardware_info(logger=None):
pod_file = os.path.join(dt_cfg.dovetail_config['config_dir'],
dt_cfg.dovetail_config['pod_file'])
logger.info('Get hardware info of all nodes list in file {} ...'
.format(pod_file))
result_dir = dt_cfg.dovetail_config['result_dir']
info_file_path = os.path.join(result_dir, 'sut_hardware_info')
all_info_file = os.path.join(result_dir, 'all_hosts_info.json')
inventory_file = os.path.join(result_dir, 'inventory.ini')
if not get_inventory_file(pod_file, inventory_file, logger):
logger.error('Failed to get SUT hardware info.')
return None
ret, msg = exec_cmd('cd {} && ansible all -m setup -i {} --tree {}'
.format(constants.USERCONF_PATH, inventory_file,
info_file_path), verbose=False)
if not os.path.exists(info_file_path) or ret != 0:
logger.error('Failed to get SUT hardware info.')
return None
if not combine_files(info_file_path, all_info_file, logger):
logger.error('Failed to get all hardware info.')
return None
logger.info('Hardware info of all nodes are stored in file {}.'
.format(all_info_file))
return all_info_file
def get_inventory_file(pod_file, inventory_file, logger=None):
if not os.path.isfile(pod_file):
logger.error("File {} doesn't exist.".format(pod_file))
return False
try:
with open(pod_file, 'r') as f, open(inventory_file, 'w') as out_f:
pod_info = yaml.safe_load(f)
for host in pod_info['nodes']:
host_info = ('{} ansible_host={} ansible_user={}'
.format(host['name'], host['ip'], host['user']))
if 'password' in host.keys():
host_info += (' ansible_ssh_pass={}\n'
.format(host['password']))
elif 'key_filename' in host.keys():
key = os.path.join(dt_cfg.dovetail_config['config_dir'],
'id_rsa')
host_info += (' ansible_ssh_private_key_file={}\n'
.format(key))
else:
logger.error('No password or key_filename in file {}.'
.format(pod_file))
return False
out_f.write(host_info)
logger.debug('Ansible inventory file is {}.'.format(inventory_file))
return True
except KeyError as e:
logger.exception('KeyError {}.'.format(e))
return False
except Exception:
logger.exception('Failed to read file {}.'.format(pod_file))
return False
def combine_files(file_path, result_file, logger=None):
all_info = {}
info_files = os.listdir(file_path)
for info_file in info_files:
try:
absolute_file_path = os.path.join(file_path, info_file)
with open(absolute_file_path, 'r') as f:
all_info[info_file] = json.load(f)
except Exception:
logger.error('Failed to read file {}.'.format(absolute_file_path))
return None
try:
with open(result_file, 'w') as f:
f.write(json.dumps(all_info))
except Exception:
logger.exception('Failed to write file {}.'.format(result_file))
return None
return result_file
def get_openstack_endpoint(logger=None):
https_enabled = check_https_enabled(logger)
insecure = os.getenv('OS_INSECURE')
if https_enabled and insecure and insecure.lower() == 'true':
os_utils = OS_Utils(verify=False)
else:
os_utils = OS_Utils()
res_endpoints, msg_endpoints = os_utils.search_endpoints()
if not res_endpoints:
logger.error('Failed to list endpoints. Exception message, {}'
.format(msg_endpoints))
return None
endpoints_info = []
for item in msg_endpoints:
endpoint = {'URL': item['url'], 'Enabled': item['enabled']}
res_services, msg_services = os_utils.search_services(
service_id=item['service_id'])
if not res_services:
logger.error('Failed to list services. Exception message, {}'
.format(msg_services))
return None
endpoint['Service Type'] = msg_services[0]['service_type']
endpoint['Service Name'] = msg_services[0]['name']
endpoints_info.append(endpoint)
result_file = os.path.join(dt_cfg.dovetail_config['result_dir'],
'endpoint_info.json')
try:
with open(result_file, 'w') as f:
json.dump(endpoints_info, f)
logger.debug('Record all endpoint info into file {}.'
.format(result_file))
return endpoints_info
except Exception:
logger.exception('Failed to write endpoint info into file.')
return None
def check_cacert_file(cacert, logger=None):
if not os.path.isfile(cacert):
logger.error('OS_CACERT is {}, but the file does not exist.'
.format(cacert))
return False
if not dt_cfg.dovetail_config['config_dir'] == os.path.dirname(cacert):
logger.error('Credential file must be put under {}, '
'which can be mounted into other container.'
.format(dt_cfg.dovetail_config['config_dir']))
return False
return True
def get_hosts_info(logger=None):
hosts_config = {}
hosts_config_file = os.path.join(dt_cfg.dovetail_config['config_dir'],
'hosts.yaml')
if not os.path.isfile(hosts_config_file):
logger.warn('There is no hosts file {}. This may cause some issues '
'with domain name resolution.'.format(hosts_config_file))
return hosts_config
with open(hosts_config_file) as f:
hosts_yaml = yaml.safe_load(f)
if not hosts_yaml:
logger.debug('File {} is empty.'.format(hosts_config_file))
return hosts_config
hosts_info = hosts_yaml.get('hosts_info', None)
if not hosts_info:
logger.error('There is no key hosts_info in file {}'
.format(hosts_config_file))
return hosts_config
for ip, hostnames in hosts_info.items():
if not hostnames:
continue
add_hosts_info(ip, hostnames)
names_str = ' '.join(hostname for hostname in hostnames
if hostname)
if not names_str:
continue
hosts_config[names_str] = ip
logger.debug('Get hosts info {}:{}.'.format(ip, names_str))
return hosts_config
def read_yaml_file(file_path, logger=None):
if not os.path.isfile(file_path):
logger.error("File {} doesn't exist.".format(file_path))
return None
try:
with open(file_path, 'r') as f:
content = yaml.safe_load(f)
return content
except Exception as e:
logger.exception('Failed to read file {}, exception: {}'
.format(file_path, e))
return None
def read_plain_file(file_path, logger=None):
if not os.path.isfile(file_path):
logger.error("File {} doesn't exist.".format(file_path))
return None
try:
with open(file_path, 'r') as f:
content = f.read()
return content
except Exception as e:
logger.exception('Failed to read file {}, exception: {}'
.format(file_path, e))
return None
def get_value_from_dict(key_path, input_dict):
if not isinstance(key_path, str) or not isinstance(input_dict, dict):
return None
for key in key_path.split('.'):
input_dict = input_dict.get(key)
if not input_dict:
return None
return input_dict
def get_openstack_info(logger):
openrc = os.path.join(dt_cfg.dovetail_config['config_dir'],
dt_cfg.dovetail_config['env_file'])
if not os.path.isfile(openrc):
logger.error('File {} does not exist.'.format(openrc))
return
source_env(openrc)
get_hosts_info(logger)
get_openstack_endpoint(logger)
get_hardware_info(logger)
def push_results_to_db(case_name, details, start_date, stop_date, logger):
try:
url = os.getenv('TEST_DB_URL')
data = {'project_name': 'dovetail', 'case_name': case_name,
'details': details, 'start_date': start_date,
'stop_date': stop_date}
data['criteria'] = details['criteria'] if details else 'FAIL'
data['installer'] = os.getenv('INSTALLER_TYPE')
data['scenario'] = os.getenv('DEPLOY_SCENARIO')
data['pod_name'] = os.getenv('NODE_NAME')
data['build_tag'] = os.getenv('BUILD_TAG')
data['version'] = os.getenv('VERSION')
req = requests.post(url, data=json.dumps(data, sort_keys=True),
headers={'Content-Type': 'application/json'})
req.raise_for_status()
logger.debug('The results were successfully pushed to DB.')
return True
except Exception:
logger.exception('The results cannot be pushed to DB.')
return False
def get_mount_list(project_cfg):
mount_list = []
mounts = get_value_from_dict('mounts', project_cfg)
for mount in mounts:
if mount:
param_dict = {}
for param in mount.split(','):
key_word = param.split('=')
if len(key_word) != 2:
return None, 'Error mount {}.'.format(mount)
param_dict[key_word[0]] = key_word[1]
try:
mount_list.append(Mount(target=param_dict['target'],
source=param_dict['source'],
type='bind'))
except Exception as e:
return None, e
return mount_list, 'Successfully to get mount list.'
| true | true |
1c4acc2f700ed5667eb91162f430658b3774cfda | 4,744 | py | Python | webassign_grades_to_brightspace/webassign_grades_to_brightspace.py | christopherphan/brightspace-scripts | 6cfc588a9f0f3208f73fd5a893d0197d3dfeab50 | [
"MIT"
] | 1 | 2020-09-17T19:48:10.000Z | 2020-09-17T19:48:10.000Z | webassign_grades_to_brightspace/webassign_grades_to_brightspace.py | christopherphan/brightspace-scripts | 6cfc588a9f0f3208f73fd5a893d0197d3dfeab50 | [
"MIT"
] | null | null | null | webassign_grades_to_brightspace/webassign_grades_to_brightspace.py | christopherphan/brightspace-scripts | 6cfc588a9f0f3208f73fd5a893d0197d3dfeab50 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
# webassign_grades_to_brightspace.py
#
# Christopher L. Phan, Ph.D.
# [email protected]
# Last updated: 2019-05-23
#
#################################
# Copyright (c) 2019 Christopher L. Phan
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
help_string = """
webassign_grades_to_brightspace.py
Copyright (c) 2019 Christopher L. Phan
See README.md or source code for important license and disclaimer of warranty
notice.
Converts a WebAssign grade export file to a format that can be imported under D2L Brightspace.
For this script to work, you need 2 files:
First is a CSV file exported from the D2L Brightspace gradebook feature using with the following
options:
* Key field: "Both"
* User details: All three checked (last name, first name, and email)
* A "WebAssignUsername" field with their WebAssign Username
Second is the WebAssign grade export, exported in "tsv" format.
syntax: ./webassign_grades_to_brightspace.py d2l-input-file WeBWorK-input-file D2L-output-file
"""
import csv
import sys
if len(sys.argv) != 4:
print(help_string)
sys.exit()
infilename_d2l = sys.argv[1]
infilename_wa = sys.argv[2]
outfilename = sys.argv[3]
# Step 1: Read the list of students from the D2L download
studentlist = dict()
with open(infilename_d2l, 'rt') as infile:
studentreader = csv.reader(infile, delimiter=',')
for (index, row) in enumerate(studentreader):
if index == 0:
wa_un_idx = row.index("WebAssignUsername Text Grade <Text>")
else:
studentlist[row[wa_un_idx].strip()] = (row[0].strip(), row[1].strip())
# Step 2: Scrape out the scores
setnames = []
with open(infilename_wa, 'rt') as infile:
wareader = csv.reader(infile, delimiter='\t')
studentpart = False
for row in wareader:
if studentpart:
curstudent = row[1].strip()
if (studentpart and curstudent in studentlist):
output.append([studentlist[curstudent][0], studentlist[curstudent][1]])
for column in setnames:
curscore = row[column[1]].strip()
if (curscore == "NS" or curscore == "ND" or curscore == ""):
curscore = "0"
output[-1].append(curscore)
output[-1].append("#")
if (row != [] and not studentpart):
if (row[0].strip() == "Assignment Name"):
# Read off the set names and store in "setnames"
for (idx2, column) in enumerate(row):
if (column.strip() !="" and column.strip() != "Total" and
column.strip() != "Assignment Name"):
assign_name = column.strip()
assign_name = assign_name.replace(",", " ")
setnames.append([assign_name, idx2])
elif (row[0].strip() == "Totals"):
# Read off the value names and put in the set names
for column in setnames:
column.append(row[column[1]].strip())
elif (row[0].strip() == "Fullname"):
# We are ready to read the scores, set up the output
output = [['OrgDefinedId', 'Username']]
for column in setnames:
output[0].append(column[0] + " Points Grade <Numeric MaxPoints:" + column[2] + ">")
output[0].append("End-of-Line Indicator")
studentpart = True
# Step 3: Output
with open(outfilename, 'wt') as outfile:
for row in output:
outline = ""
for column in row:
outline += column
if (column != "#" and column != "End-of-Line Indicator"):
outline +=","
outfile.write(outline + "\n")
| 37.0625 | 103 | 0.634907 |
help_string = """
webassign_grades_to_brightspace.py
Copyright (c) 2019 Christopher L. Phan
See README.md or source code for important license and disclaimer of warranty
notice.
Converts a WebAssign grade export file to a format that can be imported under D2L Brightspace.
For this script to work, you need 2 files:
First is a CSV file exported from the D2L Brightspace gradebook feature using with the following
options:
* Key field: "Both"
* User details: All three checked (last name, first name, and email)
* A "WebAssignUsername" field with their WebAssign Username
Second is the WebAssign grade export, exported in "tsv" format.
syntax: ./webassign_grades_to_brightspace.py d2l-input-file WeBWorK-input-file D2L-output-file
"""
import csv
import sys
if len(sys.argv) != 4:
print(help_string)
sys.exit()
infilename_d2l = sys.argv[1]
infilename_wa = sys.argv[2]
outfilename = sys.argv[3]
studentlist = dict()
with open(infilename_d2l, 'rt') as infile:
studentreader = csv.reader(infile, delimiter=',')
for (index, row) in enumerate(studentreader):
if index == 0:
wa_un_idx = row.index("WebAssignUsername Text Grade <Text>")
else:
studentlist[row[wa_un_idx].strip()] = (row[0].strip(), row[1].strip())
setnames = []
with open(infilename_wa, 'rt') as infile:
wareader = csv.reader(infile, delimiter='\t')
studentpart = False
for row in wareader:
if studentpart:
curstudent = row[1].strip()
if (studentpart and curstudent in studentlist):
output.append([studentlist[curstudent][0], studentlist[curstudent][1]])
for column in setnames:
curscore = row[column[1]].strip()
if (curscore == "NS" or curscore == "ND" or curscore == ""):
curscore = "0"
output[-1].append(curscore)
output[-1].append("#")
if (row != [] and not studentpart):
if (row[0].strip() == "Assignment Name"):
for (idx2, column) in enumerate(row):
if (column.strip() !="" and column.strip() != "Total" and
column.strip() != "Assignment Name"):
assign_name = column.strip()
assign_name = assign_name.replace(",", " ")
setnames.append([assign_name, idx2])
elif (row[0].strip() == "Totals"):
for column in setnames:
column.append(row[column[1]].strip())
elif (row[0].strip() == "Fullname"):
output = [['OrgDefinedId', 'Username']]
for column in setnames:
output[0].append(column[0] + " Points Grade <Numeric MaxPoints:" + column[2] + ">")
output[0].append("End-of-Line Indicator")
studentpart = True
with open(outfilename, 'wt') as outfile:
for row in output:
outline = ""
for column in row:
outline += column
if (column != "#" and column != "End-of-Line Indicator"):
outline +=","
outfile.write(outline + "\n")
| true | true |
1c4acc4771c6410e1e1685fee0d975f4b24e233e | 370 | py | Python | output/models/nist_data/atomic/any_uri/schema_instance/nistschema_sv_iv_atomic_any_uri_enumeration_5_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/atomic/any_uri/schema_instance/nistschema_sv_iv_atomic_any_uri_enumeration_5_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/atomic/any_uri/schema_instance/nistschema_sv_iv_atomic_any_uri_enumeration_5_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.nist_data.atomic.any_uri.schema_instance.nistschema_sv_iv_atomic_any_uri_enumeration_5_xsd.nistschema_sv_iv_atomic_any_uri_enumeration_5 import (
NistschemaSvIvAtomicAnyUriEnumeration5,
NistschemaSvIvAtomicAnyUriEnumeration5Type,
)
__all__ = [
"NistschemaSvIvAtomicAnyUriEnumeration5",
"NistschemaSvIvAtomicAnyUriEnumeration5Type",
]
| 37 | 164 | 0.864865 | from output.models.nist_data.atomic.any_uri.schema_instance.nistschema_sv_iv_atomic_any_uri_enumeration_5_xsd.nistschema_sv_iv_atomic_any_uri_enumeration_5 import (
NistschemaSvIvAtomicAnyUriEnumeration5,
NistschemaSvIvAtomicAnyUriEnumeration5Type,
)
__all__ = [
"NistschemaSvIvAtomicAnyUriEnumeration5",
"NistschemaSvIvAtomicAnyUriEnumeration5Type",
]
| true | true |
1c4acc8d126515aa4dd30b64e9be465fd5ab13b8 | 2,237 | py | Python | examples/github/features/test_github_search.py | Z-Brueske/screenpy | 1c852a49eb3821727662458fd707b9bcf48bb8cf | [
"MIT"
] | null | null | null | examples/github/features/test_github_search.py | Z-Brueske/screenpy | 1c852a49eb3821727662458fd707b9bcf48bb8cf | [
"MIT"
] | null | null | null | examples/github/features/test_github_search.py | Z-Brueske/screenpy | 1c852a49eb3821727662458fd707b9bcf48bb8cf | [
"MIT"
] | null | null | null | """
An example of a test module that follows the typical pytest test
structure. These tests show off how to use custom tasks and questions,
though they are a little bit contrived.
"""
from typing import Generator
import pytest
from screenpy import Actor, AnActor, given, then, when
from screenpy.abilities import BrowseTheWeb
from screenpy.actions import Open
from screenpy.pacing import act, scene
from screenpy.resolutions import (
ContainsTheText,
ContainTheText,
DoesNot,
IsEqualTo,
ReadsExactly,
)
from selenium.webdriver import Firefox
from ..questions.number_of_search_results import NumberOfSearchResults
from ..questions.search_results_message import SearchResultsMessage
from ..tasks.search_github import SearchGitHub
from ..user_interface.github_home_page import URL
@pytest.fixture(scope="function", name="Perry")
def fixture_actor() -> Generator:
"""Create the actor for our example tests!"""
the_actor = Actor.named("Perry").who_can(BrowseTheWeb.using(Firefox()))
yield the_actor
the_actor.exit_stage_left()
@act("Search")
@scene("Search for the ScreenPy repository on GitHub")
def test_search_for_screenpy(Perry: AnActor) -> None:
"""GitHub search finds the screenpy repository."""
given(Perry).was_able_to(Open.their_browser_on(URL))
when(Perry).attempts_to(SearchGitHub.for_text("perrygoy/screenpy"))
then(Perry).should_see_that(
(SearchResultsMessage(), DoesNot(ContainTheText("couldn’t"))),
(SearchResultsMessage(), ReadsExactly("1 repository result")),
(NumberOfSearchResults(), IsEqualTo(1)),
)
@act("Search")
@scene("Search for a nonexistant repository on GitHub")
def test_search_for_nonexistent_repo(Perry: AnActor) -> None:
"""GitHub search fails to find a nonexistant repository."""
nonexistant_repository = "perrygoy/i-never-made-this-repo"
given(Perry).was_able_to(Open.their_browser_on(URL))
when(Perry).attempts_to(SearchGitHub.for_text(nonexistant_repository))
then(Perry).should_see_that(
(SearchResultsMessage(), ContainsTheText("We couldn’t find any")),
(SearchResultsMessage(), ContainsTheText(nonexistant_repository)),
(NumberOfSearchResults(), IsEqualTo(0)),
)
| 34.953125 | 75 | 0.753688 |
from typing import Generator
import pytest
from screenpy import Actor, AnActor, given, then, when
from screenpy.abilities import BrowseTheWeb
from screenpy.actions import Open
from screenpy.pacing import act, scene
from screenpy.resolutions import (
ContainsTheText,
ContainTheText,
DoesNot,
IsEqualTo,
ReadsExactly,
)
from selenium.webdriver import Firefox
from ..questions.number_of_search_results import NumberOfSearchResults
from ..questions.search_results_message import SearchResultsMessage
from ..tasks.search_github import SearchGitHub
from ..user_interface.github_home_page import URL
@pytest.fixture(scope="function", name="Perry")
def fixture_actor() -> Generator:
the_actor = Actor.named("Perry").who_can(BrowseTheWeb.using(Firefox()))
yield the_actor
the_actor.exit_stage_left()
@act("Search")
@scene("Search for the ScreenPy repository on GitHub")
def test_search_for_screenpy(Perry: AnActor) -> None:
given(Perry).was_able_to(Open.their_browser_on(URL))
when(Perry).attempts_to(SearchGitHub.for_text("perrygoy/screenpy"))
then(Perry).should_see_that(
(SearchResultsMessage(), DoesNot(ContainTheText("couldn’t"))),
(SearchResultsMessage(), ReadsExactly("1 repository result")),
(NumberOfSearchResults(), IsEqualTo(1)),
)
@act("Search")
@scene("Search for a nonexistant repository on GitHub")
def test_search_for_nonexistent_repo(Perry: AnActor) -> None:
nonexistant_repository = "perrygoy/i-never-made-this-repo"
given(Perry).was_able_to(Open.their_browser_on(URL))
when(Perry).attempts_to(SearchGitHub.for_text(nonexistant_repository))
then(Perry).should_see_that(
(SearchResultsMessage(), ContainsTheText("We couldn’t find any")),
(SearchResultsMessage(), ContainsTheText(nonexistant_repository)),
(NumberOfSearchResults(), IsEqualTo(0)),
)
| true | true |
1c4ace0ee97cb02a09668b10170665e6f09dee51 | 618 | py | Python | examples/animations/recursive_composite.py | goodhertz/coldtype | 2460b66abb28e9532f9e2b55167ae565f95366e7 | [
"Apache-2.0"
] | 142 | 2020-06-12T17:01:58.000Z | 2022-03-16T23:21:37.000Z | examples/animations/recursive_composite.py | goodhertz/coldtype | 2460b66abb28e9532f9e2b55167ae565f95366e7 | [
"Apache-2.0"
] | 35 | 2020-04-15T15:34:54.000Z | 2022-03-19T20:26:47.000Z | examples/animations/recursive_composite.py | goodhertz/coldtype | 2460b66abb28e9532f9e2b55167ae565f95366e7 | [
"Apache-2.0"
] | 14 | 2020-06-23T18:56:46.000Z | 2022-03-31T15:54:56.000Z | from coldtype import *
from coldtype.fx.skia import fill, phototype
@animation((1080, 1080), timeline=90, composites=1)
def recursive_composite(f):
return (DPS([
f.last_render(lambda p: p
.translate(1, -2)
.scale(0.997)
.ch(fill(1))),
(DP(Rect(200, 200))
.align(f.a.r.inset(100, 100), "mnx", "mxy")
.rotate(f.e("eeio")*-360)
.translate(f.a.r.w*0.6*f.e("ceio", 1), 0)
.f(0).s(1).sw(10) # invert for phototype
)])
.ch(phototype(f.a.r,
fill=hsl(0.90, 0.8), blur=3, cut=133, cutw=30))) | 34.333333 | 60 | 0.511327 | from coldtype import *
from coldtype.fx.skia import fill, phototype
@animation((1080, 1080), timeline=90, composites=1)
def recursive_composite(f):
return (DPS([
f.last_render(lambda p: p
.translate(1, -2)
.scale(0.997)
.ch(fill(1))),
(DP(Rect(200, 200))
.align(f.a.r.inset(100, 100), "mnx", "mxy")
.rotate(f.e("eeio")*-360)
.translate(f.a.r.w*0.6*f.e("ceio", 1), 0)
.f(0).s(1).sw(10) )])
.ch(phototype(f.a.r,
fill=hsl(0.90, 0.8), blur=3, cut=133, cutw=30))) | true | true |
1c4aceb4e5772df987ce568ec6025ed09d9c93df | 134 | py | Python | src/softfab/docs/reference/__init__.py | boxingbeetle/softfab | 0ecf899f66a1fb046ee869cbfa3b5374b3f8aa14 | [
"BSD-3-Clause"
] | 20 | 2019-02-07T17:03:04.000Z | 2020-03-16T20:45:19.000Z | src/softfab/docs/reference/__init__.py | boxingbeetle/softfab | 0ecf899f66a1fb046ee869cbfa3b5374b3f8aa14 | [
"BSD-3-Clause"
] | 36 | 2019-02-11T08:57:16.000Z | 2020-09-29T05:32:08.000Z | src/softfab/docs/reference/__init__.py | boxingbeetle/softfab | 0ecf899f66a1fb046ee869cbfa3b5374b3f8aa14 | [
"BSD-3-Clause"
] | null | null | null | # SPDX-License-Identifier: BSD-3-Clause
button = 'Reference'
children = ('api', 'cmdline', 'releases', 'wrappers')
icon = 'IconDocs'
| 22.333333 | 53 | 0.686567 |
button = 'Reference'
children = ('api', 'cmdline', 'releases', 'wrappers')
icon = 'IconDocs'
| true | true |
1c4acfb5c08ee98c29bfec3501371498e2d86cb2 | 4,508 | py | Python | eggs/Paste-1.7.5.1-py2.7.egg/paste/cascade.py | salayhin/talkofacta | 8b5a14245dd467bb1fda75423074c4840bd69fb7 | [
"MIT"
] | 19 | 2015-05-01T19:59:03.000Z | 2021-12-09T08:03:16.000Z | eggs/Paste-1.7.5.1-py2.7.egg/paste/cascade.py | salayhin/talkofacta | 8b5a14245dd467bb1fda75423074c4840bd69fb7 | [
"MIT"
] | 1 | 2018-01-03T15:26:49.000Z | 2018-01-03T15:26:49.000Z | eggs/Paste-1.7.5.1-py2.7.egg/paste/cascade.py | salayhin/talkofacta | 8b5a14245dd467bb1fda75423074c4840bd69fb7 | [
"MIT"
] | 30 | 2015-03-25T19:40:07.000Z | 2021-05-28T22:59:26.000Z | # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
Cascades through several applications, so long as applications
return ``404 Not Found``.
"""
from paste import httpexceptions
from paste.util import converters
import tempfile
from cStringIO import StringIO
__all__ = ['Cascade']
def make_cascade(loader, global_conf, catch='404', **local_conf):
"""
Entry point for Paste Deploy configuration
Expects configuration like::
[composit:cascade]
use = egg:Paste#cascade
# all start with 'app' and are sorted alphabetically
app1 = foo
app2 = bar
...
catch = 404 500 ...
"""
catch = map(int, converters.aslist(catch))
apps = []
for name, value in local_conf.items():
if not name.startswith('app'):
raise ValueError(
"Bad configuration key %r (=%r); all configuration keys "
"must start with 'app'"
% (name, value))
app = loader.get_app(value, global_conf=global_conf)
apps.append((name, app))
apps.sort()
apps = [app for name, app in apps]
return Cascade(apps, catch=catch)
class Cascade(object):
"""
Passed a list of applications, ``Cascade`` will try each of them
in turn. If one returns a status code listed in ``catch`` (by
default just ``404 Not Found``) then the next application is
tried.
If all applications fail, then the last application's failure
response is used.
Instances of this class are WSGI applications.
"""
def __init__(self, applications, catch=(404,)):
self.apps = applications
self.catch_codes = {}
self.catch_exceptions = []
for error in catch:
if isinstance(error, str):
error = int(error.split(None, 1)[0])
if isinstance(error, httpexceptions.HTTPException):
exc = error
code = error.code
else:
exc = httpexceptions.get_exception(error)
code = error
self.catch_codes[code] = exc
self.catch_exceptions.append(exc)
self.catch_exceptions = tuple(self.catch_exceptions)
def __call__(self, environ, start_response):
"""
WSGI application interface
"""
failed = []
def repl_start_response(status, headers, exc_info=None):
code = int(status.split(None, 1)[0])
if code in self.catch_codes:
failed.append(None)
return _consuming_writer
return start_response(status, headers, exc_info)
try:
length = int(environ.get('CONTENT_LENGTH', 0) or 0)
except ValueError:
length = 0
if length > 0:
# We have to copy wsgi.input
copy_wsgi_input = True
if length > 4096 or length < 0:
f = tempfile.TemporaryFile()
if length < 0:
f.write(environ['wsgi.input'].read())
else:
copy_len = length
while copy_len > 0:
chunk = environ['wsgi.input'].read(min(copy_len, 4096))
if not chunk:
raise IOError("Request body truncated")
f.write(chunk)
copy_len -= len(chunk)
f.seek(0)
else:
f = StringIO(environ['wsgi.input'].read(length))
environ['wsgi.input'] = f
else:
copy_wsgi_input = False
for app in self.apps[:-1]:
environ_copy = environ.copy()
if copy_wsgi_input:
environ_copy['wsgi.input'].seek(0)
failed = []
try:
v = app(environ_copy, repl_start_response)
if not failed:
return v
else:
if hasattr(v, 'close'):
# Exhaust the iterator first:
list(v)
# then close:
v.close()
except self.catch_exceptions, e:
pass
if copy_wsgi_input:
environ['wsgi.input'].seek(0)
return self.apps[-1](environ, start_response)
def _consuming_writer(s):
pass
| 33.641791 | 84 | 0.538598 |
"""
Cascades through several applications, so long as applications
return ``404 Not Found``.
"""
from paste import httpexceptions
from paste.util import converters
import tempfile
from cStringIO import StringIO
__all__ = ['Cascade']
def make_cascade(loader, global_conf, catch='404', **local_conf):
"""
Entry point for Paste Deploy configuration
Expects configuration like::
[composit:cascade]
use = egg:Paste#cascade
# all start with 'app' and are sorted alphabetically
app1 = foo
app2 = bar
...
catch = 404 500 ...
"""
catch = map(int, converters.aslist(catch))
apps = []
for name, value in local_conf.items():
if not name.startswith('app'):
raise ValueError(
"Bad configuration key %r (=%r); all configuration keys "
"must start with 'app'"
% (name, value))
app = loader.get_app(value, global_conf=global_conf)
apps.append((name, app))
apps.sort()
apps = [app for name, app in apps]
return Cascade(apps, catch=catch)
class Cascade(object):
"""
Passed a list of applications, ``Cascade`` will try each of them
in turn. If one returns a status code listed in ``catch`` (by
default just ``404 Not Found``) then the next application is
tried.
If all applications fail, then the last application's failure
response is used.
Instances of this class are WSGI applications.
"""
def __init__(self, applications, catch=(404,)):
self.apps = applications
self.catch_codes = {}
self.catch_exceptions = []
for error in catch:
if isinstance(error, str):
error = int(error.split(None, 1)[0])
if isinstance(error, httpexceptions.HTTPException):
exc = error
code = error.code
else:
exc = httpexceptions.get_exception(error)
code = error
self.catch_codes[code] = exc
self.catch_exceptions.append(exc)
self.catch_exceptions = tuple(self.catch_exceptions)
def __call__(self, environ, start_response):
"""
WSGI application interface
"""
failed = []
def repl_start_response(status, headers, exc_info=None):
code = int(status.split(None, 1)[0])
if code in self.catch_codes:
failed.append(None)
return _consuming_writer
return start_response(status, headers, exc_info)
try:
length = int(environ.get('CONTENT_LENGTH', 0) or 0)
except ValueError:
length = 0
if length > 0:
# We have to copy wsgi.input
copy_wsgi_input = True
if length > 4096 or length < 0:
f = tempfile.TemporaryFile()
if length < 0:
f.write(environ['wsgi.input'].read())
else:
copy_len = length
while copy_len > 0:
chunk = environ['wsgi.input'].read(min(copy_len, 4096))
if not chunk:
raise IOError("Request body truncated")
f.write(chunk)
copy_len -= len(chunk)
f.seek(0)
else:
f = StringIO(environ['wsgi.input'].read(length))
environ['wsgi.input'] = f
else:
copy_wsgi_input = False
for app in self.apps[:-1]:
environ_copy = environ.copy()
if copy_wsgi_input:
environ_copy['wsgi.input'].seek(0)
failed = []
try:
v = app(environ_copy, repl_start_response)
if not failed:
return v
else:
if hasattr(v, 'close'):
# Exhaust the iterator first:
list(v)
# then close:
v.close()
except self.catch_exceptions, e:
pass
if copy_wsgi_input:
environ['wsgi.input'].seek(0)
return self.apps[-1](environ, start_response)
def _consuming_writer(s):
pass
| false | true |
1c4acfe937c7a3c496387cf35b341673a39ff740 | 2,239 | py | Python | compressible_sr/problems/rt.py | zooechiu/pyro2 | 51874476e9c3c3c412c66850ab819ca70af0b20c | [
"BSD-3-Clause"
] | 151 | 2018-08-14T12:52:22.000Z | 2022-03-29T07:57:01.000Z | compressible_sr/problems/rt.py | gfjykldd/pyro2 | b0ca4aa7b1b0f0d445c6a8d0ab63fcc0bc8a431c | [
"BSD-3-Clause"
] | 40 | 2015-03-25T15:45:44.000Z | 2018-07-30T18:48:47.000Z | compressible_sr/problems/rt.py | gfjykldd/pyro2 | b0ca4aa7b1b0f0d445c6a8d0ab63fcc0bc8a431c | [
"BSD-3-Clause"
] | 56 | 2018-10-10T16:54:59.000Z | 2022-02-06T08:48:52.000Z | from __future__ import print_function
import numpy as np
import sys
import mesh.patch as patch
import compressible_sr.eos as eos
from util import msg
def init_data(my_data, rp):
""" initialize the rt problem """
msg.bold("initializing the rt problem...")
# make sure that we are passed a valid patch object
if not isinstance(my_data, patch.CellCenterData2d):
print("ERROR: patch invalid in rt.py")
print(my_data.__class__)
sys.exit()
# get the density, momenta, and energy as separate variables
dens = my_data.get_var("density")
xmom = my_data.get_var("x-momentum")
ymom = my_data.get_var("y-momentum")
ener = my_data.get_var("energy")
gamma = rp.get_param("eos.gamma")
grav = rp.get_param("compressible.grav")
dens1 = rp.get_param("rt.dens1")
dens2 = rp.get_param("rt.dens2")
p0 = rp.get_param("rt.p0")
amp = rp.get_param("rt.amp")
sigma = rp.get_param("rt.sigma")
# initialize the components, remember, that ener here is
# rho*eint + 0.5*rho*v**2, where eint is the specific
# internal energy (erg/g)
xmom[:, :] = 0.0
ymom[:, :] = 0.0
dens[:, :] = 0.0
# set the density to be stratified in the y-direction
myg = my_data.grid
ycenter = 0.5*(myg.ymin + myg.ymax)
p = myg.scratch_array()
p[:, :] = p0
dens[:, :] = dens1
for j in range(myg.jlo, myg.jhi+1):
if (myg.y[j] < ycenter):
dens[:, j] = dens1
p[:, j] = p0 + dens1*grav*myg.y[j]
else:
dens[:, j] = dens2
p[:, j] = p0 + dens1*grav*ycenter + dens2*grav*(myg.y[j] - ycenter)
ymom[:, :] = amp*np.cos(2.0*np.pi*myg.x2d/(myg.xmax-myg.xmin))*np.exp(-(myg.y2d-ycenter)**2/sigma**2)
rhoh = eos.rhoh_from_rho_p(gamma, dens, p)
u = xmom
v = ymom
W = 1./np.sqrt(1-u**2-v**2)
dens[:, :] *= W
xmom[:, :] *= rhoh[:, :]*W**2
ymom[:, :] *= rhoh[:, :]*W**2
ener[:, :] = rhoh[:, :]*W**2 - p - dens[:, :]
# set the energy (P = cs2*dens)
# ener[:, :] = p[:, :]/(gamma - 1.0) + \
# 0.5*(xmom[:, :]**2 + ymom[:, :]**2)/dens[:, :]
def finalize():
""" print out any information to the user at the end of the run """
pass
| 26.341176 | 105 | 0.564538 | from __future__ import print_function
import numpy as np
import sys
import mesh.patch as patch
import compressible_sr.eos as eos
from util import msg
def init_data(my_data, rp):
msg.bold("initializing the rt problem...")
if not isinstance(my_data, patch.CellCenterData2d):
print("ERROR: patch invalid in rt.py")
print(my_data.__class__)
sys.exit()
dens = my_data.get_var("density")
xmom = my_data.get_var("x-momentum")
ymom = my_data.get_var("y-momentum")
ener = my_data.get_var("energy")
gamma = rp.get_param("eos.gamma")
grav = rp.get_param("compressible.grav")
dens1 = rp.get_param("rt.dens1")
dens2 = rp.get_param("rt.dens2")
p0 = rp.get_param("rt.p0")
amp = rp.get_param("rt.amp")
sigma = rp.get_param("rt.sigma")
xmom[:, :] = 0.0
ymom[:, :] = 0.0
dens[:, :] = 0.0
myg = my_data.grid
ycenter = 0.5*(myg.ymin + myg.ymax)
p = myg.scratch_array()
p[:, :] = p0
dens[:, :] = dens1
for j in range(myg.jlo, myg.jhi+1):
if (myg.y[j] < ycenter):
dens[:, j] = dens1
p[:, j] = p0 + dens1*grav*myg.y[j]
else:
dens[:, j] = dens2
p[:, j] = p0 + dens1*grav*ycenter + dens2*grav*(myg.y[j] - ycenter)
ymom[:, :] = amp*np.cos(2.0*np.pi*myg.x2d/(myg.xmax-myg.xmin))*np.exp(-(myg.y2d-ycenter)**2/sigma**2)
rhoh = eos.rhoh_from_rho_p(gamma, dens, p)
u = xmom
v = ymom
W = 1./np.sqrt(1-u**2-v**2)
dens[:, :] *= W
xmom[:, :] *= rhoh[:, :]*W**2
ymom[:, :] *= rhoh[:, :]*W**2
ener[:, :] = rhoh[:, :]*W**2 - p - dens[:, :]
def finalize():
pass
| true | true |
1c4ad12ed636214868ae564499c2f27c7c5eb010 | 4,523 | py | Python | devresources/settings.py | Sachin-chaurasiya/devresources | a1aaee1d62b755483ff533bb07ed344a1a155a1f | [
"MIT"
] | null | null | null | devresources/settings.py | Sachin-chaurasiya/devresources | a1aaee1d62b755483ff533bb07ed344a1a155a1f | [
"MIT"
] | null | null | null | devresources/settings.py | Sachin-chaurasiya/devresources | a1aaee1d62b755483ff533bb07ed344a1a155a1f | [
"MIT"
] | null | null | null | """
Django settings for devresources project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
import dotenv
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
dotenv_file = os.path.join(BASE_DIR, ".env")
if os.path.isfile(dotenv_file):
dotenv.load_dotenv(dotenv_file)
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ["SECRET_KEY"]
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get("DEBUG", False)
ALLOWED_HOSTS = ["localhost", "127.0.0.1", "devresources.guru"]
# Application definition
INSTALLED_APPS = [
"jazzmin",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sitemaps",
# 3rd party apps
"corsheaders",
"hitcount",
# local apps
"core.apps.CoreConfig",
"accounts.apps.AccountsConfig",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"crum.CurrentRequestUserMiddleware",
]
ROOT_URLCONF = "devresources.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
BASE_DIR / "templates",
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "devresources.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = "en-in"
TIME_ZONE = "Asia/Calcutta"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = BASE_DIR / "static"
STATICFILES_DIRS = [
BASE_DIR / "static_files",
]
MEDIA_URL = "/media/"
MEDIA_ROOT = BASE_DIR / "media"
AUTH_USER_MODEL = "accounts.User"
LOGIN_URL = "signin_view"
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
# Imports below are intentionally placed here to override the existing values. e.g: DEBUG
try:
from devresources.local_settings import *
except ImportError as e:
pass
if DEBUG:
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
else:
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": os.environ["DB_NAME"],
"USER": os.environ["DB_USER"],
"PORT": os.environ["DB_PORT"],
"HOST": os.environ["DB_HOST"],
"PASSWORD": os.environ["DB_PASSWORD"],
}
}
from devresources.jazzmin_config import *
from devresources.hitcount_config import *
from devresources.ratelimit_config import *
| 25.994253 | 91 | 0.683617 | import os
import dotenv
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
dotenv_file = os.path.join(BASE_DIR, ".env")
if os.path.isfile(dotenv_file):
dotenv.load_dotenv(dotenv_file)
SECRET_KEY = os.environ["SECRET_KEY"]
DEBUG = os.environ.get("DEBUG", False)
ALLOWED_HOSTS = ["localhost", "127.0.0.1", "devresources.guru"]
# Application definition
INSTALLED_APPS = [
"jazzmin",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sitemaps",
# 3rd party apps
"corsheaders",
"hitcount",
# local apps
"core.apps.CoreConfig",
"accounts.apps.AccountsConfig",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"crum.CurrentRequestUserMiddleware",
]
ROOT_URLCONF = "devresources.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
BASE_DIR / "templates",
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "devresources.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = "en-in"
TIME_ZONE = "Asia/Calcutta"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = BASE_DIR / "static"
STATICFILES_DIRS = [
BASE_DIR / "static_files",
]
MEDIA_URL = "/media/"
MEDIA_ROOT = BASE_DIR / "media"
AUTH_USER_MODEL = "accounts.User"
LOGIN_URL = "signin_view"
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
# Imports below are intentionally placed here to override the existing values. e.g: DEBUG
try:
from devresources.local_settings import *
except ImportError as e:
pass
if DEBUG:
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
else:
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": os.environ["DB_NAME"],
"USER": os.environ["DB_USER"],
"PORT": os.environ["DB_PORT"],
"HOST": os.environ["DB_HOST"],
"PASSWORD": os.environ["DB_PASSWORD"],
}
}
from devresources.jazzmin_config import *
from devresources.hitcount_config import *
from devresources.ratelimit_config import *
| true | true |
1c4ad283daf0722e256ef228e2048c6cc80b222e | 1,485 | py | Python | new_package/seed/rename_sync_file.py | ziyixi/SeisScripts | a484bc1747eae52b2441f0bfd47ac7e093150f1d | [
"MIT"
] | null | null | null | new_package/seed/rename_sync_file.py | ziyixi/SeisScripts | a484bc1747eae52b2441f0bfd47ac7e093150f1d | [
"MIT"
] | null | null | null | new_package/seed/rename_sync_file.py | ziyixi/SeisScripts | a484bc1747eae52b2441f0bfd47ac7e093150f1d | [
"MIT"
] | null | null | null | """
rename generated sync asdf file based on the cmt solution files. (relocation)
"""
import subprocess
import click
import obspy
from os.path import join
from glob import glob
def rename_single(mapper, filepath):
# filename = filepath.split("/")[-1]
# filename_new = mapper[filename]
# filepath_new = join(".".join(filename[:-1]), filename_new)
filename = filepath.split("/")[-1]
key = filename.split(".")[0].split("_")[1]
key_new = mapper[key]
all_split = filename.split(".")[0].split("_")
all_split[1] = key_new
filename_new = "_".join(all_split)+".h5"
filepath_new = join("/".join(filepath.split("/")[:-1]), filename_new)
subprocess.call(f"mv {filepath} {filepath_new}", shell=True)
def get_mapper(cmts_dir):
event_path = glob(join(cmts_dir, "*"))
event_name = [item.split("/")[-1] for item in event_path]
result = {}
for path, name in zip(event_path, event_name):
event = obspy.read_events(path)[0]
id = event.origins[0].resource_id.id.split("/")[-2]
result[name] = id
return result
@click.command()
@click.option('--cmts_dir', required=True, type=str, help="the cmt directory")
@click.option('--files_dir', required=True, type=str, help="the asdf files directory")
def main(cmts_dir, files_dir):
all_files = glob(join(files_dir, "*"))
mapper = get_mapper(cmts_dir)
for filepath in all_files:
rename_single(mapper, filepath)
if __name__ == "__main__":
main()
| 30.306122 | 86 | 0.657239 | import subprocess
import click
import obspy
from os.path import join
from glob import glob
def rename_single(mapper, filepath):
filename = filepath.split("/")[-1]
key = filename.split(".")[0].split("_")[1]
key_new = mapper[key]
all_split = filename.split(".")[0].split("_")
all_split[1] = key_new
filename_new = "_".join(all_split)+".h5"
filepath_new = join("/".join(filepath.split("/")[:-1]), filename_new)
subprocess.call(f"mv {filepath} {filepath_new}", shell=True)
def get_mapper(cmts_dir):
event_path = glob(join(cmts_dir, "*"))
event_name = [item.split("/")[-1] for item in event_path]
result = {}
for path, name in zip(event_path, event_name):
event = obspy.read_events(path)[0]
id = event.origins[0].resource_id.id.split("/")[-2]
result[name] = id
return result
@click.command()
@click.option('--cmts_dir', required=True, type=str, help="the cmt directory")
@click.option('--files_dir', required=True, type=str, help="the asdf files directory")
def main(cmts_dir, files_dir):
all_files = glob(join(files_dir, "*"))
mapper = get_mapper(cmts_dir)
for filepath in all_files:
rename_single(mapper, filepath)
if __name__ == "__main__":
main()
| true | true |
1c4ad3f0fe7fc064e5ce1cba97d6cff0cee2f20b | 10,251 | py | Python | handlers/song.py | Bluehatcoders/Telegram_Vc_Bot | ba9605084275d0a6ec266407e54df5a4cacc5e86 | [
"Apache-2.0"
] | null | null | null | handlers/song.py | Bluehatcoders/Telegram_Vc_Bot | ba9605084275d0a6ec266407e54df5a4cacc5e86 | [
"Apache-2.0"
] | null | null | null | handlers/song.py | Bluehatcoders/Telegram_Vc_Bot | ba9605084275d0a6ec266407e54df5a4cacc5e86 | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
import os
import requests
import aiohttp
import youtube_dl
import wget
import math
from pyrogram import filters, Client
from youtube_search import YoutubeSearch
from urllib.parse import urlparse
import aiofiles
import os
from random import randint
from youtubesearchpython import SearchVideos
from pyrogram.errors import FloodWait, MessageNotModified
from pyrogram.types import Chat, Message, User
import asyncio
from typing import Callable, Coroutine, Dict, List, Tuple, Union
import sys
import time
from helpers.errors import DurationLimitError
@Client.on_message(filters.command('song') & ~filters.channel)
def song(client, message):
user_id = message.from_user.id
user_name = message.from_user.first_name
rpk = "["+user_name+"](tg://user?id="+str(user_id)+")"
query = ''
for i in message.command[1:]:
query += ' ' + str(i)
print(query)
m = message.reply('Rukja jadu se tere song ko dhudh rha hu (Hue hue)')
ydl_opts = {"format": "bestaudio/best"}
try:
results = YoutubeSearch(query, max_results=1).to_dict()
link = f"https://youtube.com{results[0]['url_suffix']}"
#print(results)
title = results[0]["title"][:40]
thumbnail = results[0]["thumbnails"][0]
thumb_name = f'thumb{title}.jpg'
thumb = requests.get(thumbnail, allow_redirects=True)
open(thumb_name, 'wb').write(thumb.content)
duration = results[0]["duration"]
url_suffix = results[0]["url_suffix"]
views = results[0]["views"]
except Exception as e:
m.edit(
"Abe kaun si duniya ka gana diya hai be youtube devta ke pass bhi nhi mila"
)
print(str(e))
return
m.edit("Jadu mantar mai karu song ka hogya download shuru")
try:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info_dict = ydl.extract_info(link, download=False)
audio_file = ydl.prepare_filename(info_dict)
ydl.process_info(info_dict)
rep = '**🎵 Uploaded by ❦︎𝗗𝗮𝘆𝗻𝗮𝗺𝗶𝗰 𝖝 𝗠𝘂𝘀𝗶𝗰 𝗕𝗼𝘁 **'
secmul, dur, dur_arr = 1, 0, duration.split(':')
for i in range(len(dur_arr)-1, -1, -1):
dur += (int(dur_arr[i]) * secmul)
secmul *= 60
message.reply_audio(audio_file, caption=rep, thumb=thumb_name, parse_mode='md', title=title, duration=dur)
m.delete()
except Exception as e:
m.edit('❌ Error')
print(e)
try:
os.remove(audio_file)
os.remove(thumb_name)
except Exception as e:
print(e)
def get_text(message: Message) -> [None, str]:
text_to_return = message.text
if message.text is None:
return None
if " " in text_to_return:
try:
return message.text.split(None, 1)[1]
except IndexError:
return None
else:
return None
def humanbytes(size):
if not size:
return ""
power = 2 ** 10
raised_to_pow = 0
dict_power_n = {0: "", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
raised_to_pow += 1
return str(round(size, 2)) + " " + dict_power_n[raised_to_pow] + "B"
async def progress(current, total, message, start, type_of_ps, file_name=None):
now = time.time()
diff = now - start
if round(diff % 10.00) == 0 or current == total:
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
if elapsed_time == 0:
return
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
progress_str = "{0}{1} {2}%\n".format(
"".join(["█" for i in range(math.floor(percentage / 10))]),
"".join(["░" for i in range(10 - math.floor(percentage / 10))]),
round(percentage, 2),
)
tmp = progress_str + "{0} of {1}\nETA: {2}".format(
humanbytes(current), humanbytes(total), time_formatter(estimated_total_time)
)
if file_name:
try:
await message.edit(
"{}\n**File Name:** `{}`\n{}".format(type_of_ps, file_name, tmp)
)
except FloodWait as e:
await asyncio.sleep(e.x)
except MessageNotModified:
pass
else:
try:
await message.edit("{}\n{}".format(type_of_ps, tmp))
except FloodWait as e:
await asyncio.sleep(e.x)
except MessageNotModified:
pass
def get_user(message: Message, text: str) -> [int, str, None]:
if text is None:
asplit = None
else:
asplit = text.split(" ", 1)
user_s = None
reason_ = None
if message.reply_to_message:
user_s = message.reply_to_message.from_user.id
reason_ = text if text else None
elif asplit is None:
return None, None
elif len(asplit[0]) > 0:
user_s = int(asplit[0]) if asplit[0].isdigit() else asplit[0]
if len(asplit) == 2:
reason_ = asplit[1]
return user_s, reason_
def get_readable_time(seconds: int) -> int:
count = 0
ping_time = ""
time_list = []
time_suffix_list = ["s", "m", "h", "days"]
while count < 4:
count += 1
if count < 3:
remainder, result = divmod(seconds, 60)
else:
remainder, result = divmod(seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
ping_time += time_list.pop() + ", "
time_list.reverse()
ping_time += ":".join(time_list)
return ping_time
def time_formatter(milliseconds: int) -> str:
seconds, milliseconds = divmod(int(milliseconds), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = (
((str(days) + " day(s), ") if days else "")
+ ((str(hours) + " hour(s), ") if hours else "")
+ ((str(minutes) + " minute(s), ") if minutes else "")
+ ((str(seconds) + " second(s), ") if seconds else "")
+ ((str(milliseconds) + " millisecond(s), ") if milliseconds else "")
)
return tmp[:-2]
ydl_opts = {
'format': 'bestaudio/best',
'writethumbnail': True,
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192'
}]
}
def get_file_extension_from_url(url):
url_path = urlparse(url).path
basename = os.path.basename(url_path)
return basename.split(".")[-1]
# Funtion To Download Song
async def download_song(url):
song_name = f"{randint(6969, 6999)}.mp3"
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
if resp.status == 200:
f = await aiofiles.open(song_name, mode='wb')
await f.write(await resp.read())
await f.close()
return song_name
is_downloading = False
def time_to_seconds(time):
stringt = str(time)
return sum(int(x) * 60 ** i for i, x in enumerate(reversed(stringt.split(':'))))
@Client.on_message(filters.command(["vsong", "vid"]))
async def ytmusic(client,message: Message):
global is_downloading
if is_downloading:
await message.reply_text("Kitna majduri karwaoge abhi 1 song download kar rha hu bad me aana jao bhago yha se")
return
urlissed = get_text(message)
pablo = await client.send_message(
message.chat.id,
f"`Getting {urlissed} From Youtube Servers. Please Wait.`")
if not urlissed:
await pablo.edit("Invalid Command Syntax, Please Check Help Menu To Know More!")
return
search = SearchVideos(f"{urlissed}", offset=1, mode="dict", max_results=1)
mi = search.result()
mio = mi["search_result"]
mo = mio[0]["link"]
thum = mio[0]["title"]
fridayz = mio[0]["id"]
thums = mio[0]["channel"]
kekme = f"https://img.youtube.com/vi/{fridayz}/hqdefault.jpg"
await asyncio.sleep(0.6)
url = mo
sedlyf = wget.download(kekme)
opts = {
"format": "best",
"addmetadata": True,
"key": "FFmpegMetadata",
"prefer_ffmpeg": True,
"geo_bypass": True,
"nocheckcertificate": True,
"postprocessors": [
{"key": "FFmpegVideoConvertor", "preferedformat": "mp4"}
],
"outtmpl": "%(id)s.mp4",
"logtostderr": False,
"quiet": True,
}
try:
is_downloading = True
with youtube_dl.YoutubeDL(opts) as ytdl:
infoo = ytdl.extract_info(url, False)
duration = round(infoo["duration"] / 60)
if duration > 8:
await pablo.edit(
f"❌ Dekh bro 8 minute se jyada ki video/song mai play nhi karta ja bhag aur ye song {duration} minute(s) ka hai"
)
is_downloading = False
return
ytdl_data = ytdl.extract_info(url, download=True)
except Exception as e:
#await pablo.edit(event, f"**Failed To Download** \n**Error :** `{str(e)}`")
is_downloading = False
return
c_time = time.time()
file_stark = f"{ytdl_data['id']}.mp4"
capy = f"**Video Name ➠** `{thum}` \n**Requested For :** `{urlissed}` \n**Channel :** `{thums}` \n**Link :** `{mo}`"
await client.send_video(message.chat.id, video = open(file_stark, "rb"), duration = int(ytdl_data["duration"]), file_name = str(ytdl_data["title"]), thumb = sedlyf, caption = capy, supports_streaming = True , progress=progress, progress_args=(pablo, c_time, f'`Uploading {urlissed} Song From YouTube Music! Khusi manao re `', file_stark))
await pablo.delete()
is_downloading = False
for files in (sedlyf, file_stark):
if files and os.path.exists(files):
os.remove(files)
| 32.961415 | 342 | 0.586674 | from __future__ import unicode_literals
import os
import requests
import aiohttp
import youtube_dl
import wget
import math
from pyrogram import filters, Client
from youtube_search import YoutubeSearch
from urllib.parse import urlparse
import aiofiles
import os
from random import randint
from youtubesearchpython import SearchVideos
from pyrogram.errors import FloodWait, MessageNotModified
from pyrogram.types import Chat, Message, User
import asyncio
from typing import Callable, Coroutine, Dict, List, Tuple, Union
import sys
import time
from helpers.errors import DurationLimitError
@Client.on_message(filters.command('song') & ~filters.channel)
def song(client, message):
user_id = message.from_user.id
user_name = message.from_user.first_name
rpk = "["+user_name+"](tg://user?id="+str(user_id)+")"
query = ''
for i in message.command[1:]:
query += ' ' + str(i)
print(query)
m = message.reply('Rukja jadu se tere song ko dhudh rha hu (Hue hue)')
ydl_opts = {"format": "bestaudio/best"}
try:
results = YoutubeSearch(query, max_results=1).to_dict()
link = f"https://youtube.com{results[0]['url_suffix']}"
title = results[0]["title"][:40]
thumbnail = results[0]["thumbnails"][0]
thumb_name = f'thumb{title}.jpg'
thumb = requests.get(thumbnail, allow_redirects=True)
open(thumb_name, 'wb').write(thumb.content)
duration = results[0]["duration"]
url_suffix = results[0]["url_suffix"]
views = results[0]["views"]
except Exception as e:
m.edit(
"Abe kaun si duniya ka gana diya hai be youtube devta ke pass bhi nhi mila"
)
print(str(e))
return
m.edit("Jadu mantar mai karu song ka hogya download shuru")
try:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info_dict = ydl.extract_info(link, download=False)
audio_file = ydl.prepare_filename(info_dict)
ydl.process_info(info_dict)
rep = '**🎵 Uploaded by ❦︎𝗗𝗮𝘆𝗻𝗮𝗺𝗶𝗰 𝖝 𝗠𝘂𝘀𝗶𝗰 𝗕𝗼𝘁 **'
secmul, dur, dur_arr = 1, 0, duration.split(':')
for i in range(len(dur_arr)-1, -1, -1):
dur += (int(dur_arr[i]) * secmul)
secmul *= 60
message.reply_audio(audio_file, caption=rep, thumb=thumb_name, parse_mode='md', title=title, duration=dur)
m.delete()
except Exception as e:
m.edit('❌ Error')
print(e)
try:
os.remove(audio_file)
os.remove(thumb_name)
except Exception as e:
print(e)
def get_text(message: Message) -> [None, str]:
text_to_return = message.text
if message.text is None:
return None
if " " in text_to_return:
try:
return message.text.split(None, 1)[1]
except IndexError:
return None
else:
return None
def humanbytes(size):
if not size:
return ""
power = 2 ** 10
raised_to_pow = 0
dict_power_n = {0: "", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
raised_to_pow += 1
return str(round(size, 2)) + " " + dict_power_n[raised_to_pow] + "B"
async def progress(current, total, message, start, type_of_ps, file_name=None):
now = time.time()
diff = now - start
if round(diff % 10.00) == 0 or current == total:
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
if elapsed_time == 0:
return
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
progress_str = "{0}{1} {2}%\n".format(
"".join(["█" for i in range(math.floor(percentage / 10))]),
"".join(["░" for i in range(10 - math.floor(percentage / 10))]),
round(percentage, 2),
)
tmp = progress_str + "{0} of {1}\nETA: {2}".format(
humanbytes(current), humanbytes(total), time_formatter(estimated_total_time)
)
if file_name:
try:
await message.edit(
"{}\n**File Name:** `{}`\n{}".format(type_of_ps, file_name, tmp)
)
except FloodWait as e:
await asyncio.sleep(e.x)
except MessageNotModified:
pass
else:
try:
await message.edit("{}\n{}".format(type_of_ps, tmp))
except FloodWait as e:
await asyncio.sleep(e.x)
except MessageNotModified:
pass
def get_user(message: Message, text: str) -> [int, str, None]:
if text is None:
asplit = None
else:
asplit = text.split(" ", 1)
user_s = None
reason_ = None
if message.reply_to_message:
user_s = message.reply_to_message.from_user.id
reason_ = text if text else None
elif asplit is None:
return None, None
elif len(asplit[0]) > 0:
user_s = int(asplit[0]) if asplit[0].isdigit() else asplit[0]
if len(asplit) == 2:
reason_ = asplit[1]
return user_s, reason_
def get_readable_time(seconds: int) -> int:
count = 0
ping_time = ""
time_list = []
time_suffix_list = ["s", "m", "h", "days"]
while count < 4:
count += 1
if count < 3:
remainder, result = divmod(seconds, 60)
else:
remainder, result = divmod(seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
ping_time += time_list.pop() + ", "
time_list.reverse()
ping_time += ":".join(time_list)
return ping_time
def time_formatter(milliseconds: int) -> str:
seconds, milliseconds = divmod(int(milliseconds), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = (
((str(days) + " day(s), ") if days else "")
+ ((str(hours) + " hour(s), ") if hours else "")
+ ((str(minutes) + " minute(s), ") if minutes else "")
+ ((str(seconds) + " second(s), ") if seconds else "")
+ ((str(milliseconds) + " millisecond(s), ") if milliseconds else "")
)
return tmp[:-2]
ydl_opts = {
'format': 'bestaudio/best',
'writethumbnail': True,
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192'
}]
}
def get_file_extension_from_url(url):
url_path = urlparse(url).path
basename = os.path.basename(url_path)
return basename.split(".")[-1]
async def download_song(url):
song_name = f"{randint(6969, 6999)}.mp3"
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
if resp.status == 200:
f = await aiofiles.open(song_name, mode='wb')
await f.write(await resp.read())
await f.close()
return song_name
is_downloading = False
def time_to_seconds(time):
stringt = str(time)
return sum(int(x) * 60 ** i for i, x in enumerate(reversed(stringt.split(':'))))
@Client.on_message(filters.command(["vsong", "vid"]))
async def ytmusic(client,message: Message):
global is_downloading
if is_downloading:
await message.reply_text("Kitna majduri karwaoge abhi 1 song download kar rha hu bad me aana jao bhago yha se")
return
urlissed = get_text(message)
pablo = await client.send_message(
message.chat.id,
f"`Getting {urlissed} From Youtube Servers. Please Wait.`")
if not urlissed:
await pablo.edit("Invalid Command Syntax, Please Check Help Menu To Know More!")
return
search = SearchVideos(f"{urlissed}", offset=1, mode="dict", max_results=1)
mi = search.result()
mio = mi["search_result"]
mo = mio[0]["link"]
thum = mio[0]["title"]
fridayz = mio[0]["id"]
thums = mio[0]["channel"]
kekme = f"https://img.youtube.com/vi/{fridayz}/hqdefault.jpg"
await asyncio.sleep(0.6)
url = mo
sedlyf = wget.download(kekme)
opts = {
"format": "best",
"addmetadata": True,
"key": "FFmpegMetadata",
"prefer_ffmpeg": True,
"geo_bypass": True,
"nocheckcertificate": True,
"postprocessors": [
{"key": "FFmpegVideoConvertor", "preferedformat": "mp4"}
],
"outtmpl": "%(id)s.mp4",
"logtostderr": False,
"quiet": True,
}
try:
is_downloading = True
with youtube_dl.YoutubeDL(opts) as ytdl:
infoo = ytdl.extract_info(url, False)
duration = round(infoo["duration"] / 60)
if duration > 8:
await pablo.edit(
f"❌ Dekh bro 8 minute se jyada ki video/song mai play nhi karta ja bhag aur ye song {duration} minute(s) ka hai"
)
is_downloading = False
return
ytdl_data = ytdl.extract_info(url, download=True)
except Exception as e:
is_downloading = False
return
c_time = time.time()
file_stark = f"{ytdl_data['id']}.mp4"
capy = f"**Video Name ➠** `{thum}` \n**Requested For :** `{urlissed}` \n**Channel :** `{thums}` \n**Link :** `{mo}`"
await client.send_video(message.chat.id, video = open(file_stark, "rb"), duration = int(ytdl_data["duration"]), file_name = str(ytdl_data["title"]), thumb = sedlyf, caption = capy, supports_streaming = True , progress=progress, progress_args=(pablo, c_time, f'`Uploading {urlissed} Song From YouTube Music! Khusi manao re `', file_stark))
await pablo.delete()
is_downloading = False
for files in (sedlyf, file_stark):
if files and os.path.exists(files):
os.remove(files)
| true | true |
1c4ad4ad9b8231fd18565220e4311acfb7bcd546 | 2,634 | py | Python | visualisations/linear_regression.py | SaadChaouki/ml-eli5-cli5 | 625a69edadf4737e41c58193873cf8a54273d7f0 | [
"MIT"
] | 1 | 2021-05-04T19:53:12.000Z | 2021-05-04T19:53:12.000Z | visualisations/linear_regression.py | SaadChaouki/ml-eli5-cli5 | 625a69edadf4737e41c58193873cf8a54273d7f0 | [
"MIT"
] | null | null | null | visualisations/linear_regression.py | SaadChaouki/ml-eli5-cli5 | 625a69edadf4737e41c58193873cf8a54273d7f0 | [
"MIT"
] | null | null | null | from supervised.regression.linearRegression import LinearRegression
from visualisations.color_palette import two_colors
from deep_learning.loss import MSELoss
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_regression
from matplotlib.animation import FuncAnimation
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import argparse
matplotlib.use("TkAgg")
def update(i):
y_pred = np.array([x for _, x in sorted(zip(X_train, model.error[i]))])
plt.title(f'Iteration: {i + 1} | MSE: {round(MSELoss()(y_train, model.error[i]), 2)}')
line.set_ydata(y_pred)
if __name__ == '__main__':
# Argument parsing.
parser = argparse.ArgumentParser(description='Visualise a custom Linear Regression model in training.')
parser.add_argument('--max_iter', type=int, help='Maximum number of iterations.', default=100)
parser.add_argument('--random_state', type=int, help='Random state for data generation.', default=42)
parser.add_argument('--n_samples', type=int, help='Number of data points.', default=500)
parser.add_argument('--test_size', type=float, help='Test set size.', default=.2)
parser.add_argument('--lr', type=float, help='Learning Rate.', default=.1)
args = parser.parse_args()
# Maximum iterations.
max_iterations = args.max_iter
# Generate regression data.
X, y = make_regression(n_features=1, n_samples=args.n_samples, n_informative=1, noise=30,
random_state=args.random_state, bias=500, tail_strength=1)
# Train - Test Split.
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=args.test_size, random_state=42)
# Model and Predictions
model = LinearRegression(learning_rate=args.lr, iterations=max_iterations)
model.fit(X_train, y_train)
# Plot
fig, ax = plt.subplots(figsize=(15, 6), dpi=80)
fig.suptitle('Linear Regression', fontsize=20)
# Plotting training and testing data.
ax.scatter(X_train, y_train, color=two_colors[0], label='Train Data')
ax.scatter(X_test, y_test, color=two_colors[1], label='Test Data')
# Plot first iteration line.
y_pred = np.array([x for _, x in sorted(zip(X_train, model.error[0]))])
X_train_sorted = np.array(sorted(X_train))
line, = ax.plot(X_train_sorted, y_pred, color='black', linewidth=2, label="Prediction")
# Labels and legend
plt.xlabel('Feature')
plt.ylabel('Target')
plt.legend(loc='lower right')
# Animation
animation = FuncAnimation(fig, update, frames=max_iterations, interval=1, repeat=False)
# Show plot
plt.show()
| 36.583333 | 107 | 0.714503 | from supervised.regression.linearRegression import LinearRegression
from visualisations.color_palette import two_colors
from deep_learning.loss import MSELoss
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_regression
from matplotlib.animation import FuncAnimation
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import argparse
matplotlib.use("TkAgg")
def update(i):
y_pred = np.array([x for _, x in sorted(zip(X_train, model.error[i]))])
plt.title(f'Iteration: {i + 1} | MSE: {round(MSELoss()(y_train, model.error[i]), 2)}')
line.set_ydata(y_pred)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Visualise a custom Linear Regression model in training.')
parser.add_argument('--max_iter', type=int, help='Maximum number of iterations.', default=100)
parser.add_argument('--random_state', type=int, help='Random state for data generation.', default=42)
parser.add_argument('--n_samples', type=int, help='Number of data points.', default=500)
parser.add_argument('--test_size', type=float, help='Test set size.', default=.2)
parser.add_argument('--lr', type=float, help='Learning Rate.', default=.1)
args = parser.parse_args()
max_iterations = args.max_iter
X, y = make_regression(n_features=1, n_samples=args.n_samples, n_informative=1, noise=30,
random_state=args.random_state, bias=500, tail_strength=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=args.test_size, random_state=42)
model = LinearRegression(learning_rate=args.lr, iterations=max_iterations)
model.fit(X_train, y_train)
fig, ax = plt.subplots(figsize=(15, 6), dpi=80)
fig.suptitle('Linear Regression', fontsize=20)
ax.scatter(X_train, y_train, color=two_colors[0], label='Train Data')
ax.scatter(X_test, y_test, color=two_colors[1], label='Test Data')
y_pred = np.array([x for _, x in sorted(zip(X_train, model.error[0]))])
X_train_sorted = np.array(sorted(X_train))
line, = ax.plot(X_train_sorted, y_pred, color='black', linewidth=2, label="Prediction")
plt.xlabel('Feature')
plt.ylabel('Target')
plt.legend(loc='lower right')
animation = FuncAnimation(fig, update, frames=max_iterations, interval=1, repeat=False)
plt.show()
| true | true |
1c4ad4b41b0454140375984125d9d65161765ca1 | 1,711 | py | Python | oeml-sdk/python/test/test_order_cancel_all_request.py | oskaralfons/coinapi-sdk | 2c79b6d91d0f702040dd865e79f0774a4bba9bb3 | [
"MIT"
] | 1 | 2020-07-23T05:47:52.000Z | 2020-07-23T05:47:52.000Z | oeml-sdk/python/test/test_order_cancel_all_request.py | oskaralfons/coinapi-sdk | 2c79b6d91d0f702040dd865e79f0774a4bba9bb3 | [
"MIT"
] | null | null | null | oeml-sdk/python/test/test_order_cancel_all_request.py | oskaralfons/coinapi-sdk | 2c79b6d91d0f702040dd865e79f0774a4bba9bb3 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
OEML - REST API
This section will provide necessary information about the `CoinAPI OEML REST API` protocol. This API is also available in the Postman application: <a href=\"https://postman.coinapi.io/\" target=\"_blank\">https://postman.coinapi.io/</a> # noqa: E501
The version of the OpenAPI document: v1
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.order_cancel_all_request import OrderCancelAllRequest # noqa: E501
from openapi_client.rest import ApiException
class TestOrderCancelAllRequest(unittest.TestCase):
"""OrderCancelAllRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test OrderCancelAllRequest
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.order_cancel_all_request.OrderCancelAllRequest() # noqa: E501
if include_optional :
return OrderCancelAllRequest(
exchange_id = 'KRAKEN'
)
else :
return OrderCancelAllRequest(
exchange_id = 'KRAKEN',
)
def testOrderCancelAllRequest(self):
"""Test OrderCancelAllRequest"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 31.109091 | 261 | 0.687902 |
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.order_cancel_all_request import OrderCancelAllRequest from openapi_client.rest import ApiException
class TestOrderCancelAllRequest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
if include_optional :
return OrderCancelAllRequest(
exchange_id = 'KRAKEN'
)
else :
return OrderCancelAllRequest(
exchange_id = 'KRAKEN',
)
def testOrderCancelAllRequest(self):
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| true | true |
1c4ad4df6ff6671740e265a9a52975b025f45096 | 7,383 | py | Python | userbot/__init__.py | ferryumay/OpenUserBot | d259a55f6d1ac01ba5015b83751dc30548a7aa98 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2020-04-09T01:11:33.000Z | 2020-04-09T01:11:33.000Z | userbot/__init__.py | ferryumay/OpenUserBot | d259a55f6d1ac01ba5015b83751dc30548a7aa98 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/__init__.py | ferryumay/OpenUserBot | d259a55f6d1ac01ba5015b83751dc30548a7aa98 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot initialization. """
import os
from sys import version_info
from logging import basicConfig, getLogger, INFO, DEBUG
from distutils.util import strtobool as sb
from pylast import LastFMNetwork, md5
from pySmartDL import SmartDL
from dotenv import load_dotenv
from requests import get
from telethon import TelegramClient
from telethon.sessions import StringSession
load_dotenv("config.env")
# Bot Logs setup:
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False"))
if CONSOLE_LOGGER_VERBOSE:
basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=DEBUG,
)
else:
basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=INFO)
LOGS = getLogger(__name__)
if version_info[0] < 3 or version_info[1] < 8:
LOGS.info("You MUST have a python version of at least 3.8."
"Multiple features depend on this. Bot quitting.")
quit(1)
# Check if the config was edited by using the already used variable.
# Basically, its the 'virginity check' for the config file ;)
CONFIG_CHECK = os.environ.get(
"___________PLOX_______REMOVE_____THIS_____LINE__________", None)
if CONFIG_CHECK:
LOGS.info(
"Please remove the line mentioned in the first hashtag from the config.env file"
)
quit(1)
# Telegram App KEY and HASH
API_KEY = os.environ.get("API_KEY", None)
API_HASH = os.environ.get("API_HASH", None)
# Userbot Session String
STRING_SESSION = os.environ.get("STRING_SESSION", None)
# Logging channel/group ID configuration.
BOTLOG_CHATID = int(os.environ.get("BOTLOG_CHATID", None))
# Userbot logging feature switch.
BOTLOG = sb(os.environ.get("BOTLOG", "False"))
LOGSPAMMER = sb(os.environ.get("LOGSPAMMER", "False"))
# Bleep Blop, this is a bot ;)
PM_AUTO_BAN = sb(os.environ.get("PM_AUTO_BAN", "False"))
# Heroku Credentials for updater.
HEROKU_MEMEZ = sb(os.environ.get("HEROKU_MEMEZ", "False"))
HEROKU_APP_NAME = os.environ.get("HEROKU_APP_NAME", None)
HEROKU_API_KEY = os.environ.get("HEROKU_API_KEY", None)
# Github Credentials for updater and Gitupload.
GIT_REPO_NAME = os.environ.get("GIT_REPO_NAME", None)
GITHUB_ACCESS_TOKEN = os.environ.get("GITHUB_ACCESS_TOKEN", None)
# Custom (forked) repo URL for updater.
UPSTREAM_REPO_URL = os.environ.get(
"UPSTREAM_REPO_URL",
"https://github.com/mkaraniya/OpenUserBot.git")
# Console verbose logging
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False"))
# SQL Database URI
DB_URI = os.environ.get("DATABASE_URL", None)
# OCR API key
OCR_SPACE_API_KEY = os.environ.get("OCR_SPACE_API_KEY", None)
# remove.bg API key
REM_BG_API_KEY = os.environ.get("REM_BG_API_KEY", None)
# Chrome Driver and Headless Google Chrome Binaries
CHROME_DRIVER = os.environ.get("CHROME_DRIVER", None)
GOOGLE_CHROME_BIN = os.environ.get("GOOGLE_CHROME_BIN", None)
# OpenWeatherMap API Key
OPEN_WEATHER_MAP_APPID = os.environ.get("OPEN_WEATHER_MAP_APPID", None)
WEATHER_DEFCITY = os.environ.get("WEATHER_DEFCITY", None)
# Lydia API
LYDIA_API_KEY = os.environ.get("LYDIA_API_KEY", None)
# set blacklist_chats where you do not want userbot's features
UB_BLACK_LIST_CHAT = os.environ.get("UB_BLACK_LIST_CHAT", "")
# Anti Spambot Config
ANTI_SPAMBOT = sb(os.environ.get("ANTI_SPAMBOT", "False"))
ANTI_SPAMBOT_SHOUT = sb(os.environ.get("ANTI_SPAMBOT_SHOUT", "False"))
# Youtube API key
YOUTUBE_API_KEY = os.environ.get("YOUTUBE_API_KEY", None)
# Default .alive name
ALIVE_NAME = os.environ.get("ALIVE_NAME", None)
# Time & Date - Country and Time Zone
COUNTRY = str(os.environ.get("COUNTRY", ""))
TZ_NUMBER = int(os.environ.get("TZ_NUMBER", 1))
# Clean Welcome
CLEAN_WELCOME = sb(os.environ.get("CLEAN_WELCOME", "True"))
# Quotes API Token
QUOTES_API_TOKEN = os.environ.get("QUOTES_API_TOKEN", None)
# Last.fm Module
BIO_PREFIX = os.environ.get("BIO_PREFIX", None)
DEFAULT_BIO = os.environ.get("DEFAULT_BIO", None)
LASTFM_API = os.environ.get("LASTFM_API", None)
LASTFM_SECRET = os.environ.get("LASTFM_SECRET", None)
LASTFM_USERNAME = os.environ.get("LASTFM_USERNAME", None)
LASTFM_PASSWORD_PLAIN = os.environ.get("LASTFM_PASSWORD", None)
LASTFM_PASS = md5(LASTFM_PASSWORD_PLAIN)
if LASTFM_API and LASTFM_SECRET and LASTFM_USERNAME and LASTFM_PASS:
lastfm = LastFMNetwork(api_key=LASTFM_API,
api_secret=LASTFM_SECRET,
username=LASTFM_USERNAME,
password_hash=LASTFM_PASS)
else:
lastfm = None
# Google Drive Module
G_DRIVE_CLIENT_ID = os.environ.get("G_DRIVE_CLIENT_ID", None)
G_DRIVE_CLIENT_SECRET = os.environ.get("G_DRIVE_CLIENT_SECRET", None)
G_DRIVE_AUTH_TOKEN_DATA = os.environ.get("G_DRIVE_AUTH_TOKEN_DATA", None)
GDRIVE_FOLDER_ID = os.environ.get("GDRIVE_FOLDER_ID", None)
TEMP_DOWNLOAD_DIRECTORY = os.environ.get("TMP_DOWNLOAD_DIRECTORY",
"./downloads")
# Genius lyrics get this value from https://genius.com/developers both has same values
GENIUS_API_TOKEN = os.environ.get("GENIUS", None)
# Genius lyrics get this value from https://genius.com/developers both has same values
GENIUS = os.environ.get("GENIUS_API_TOKEN", None)
# Setting Up CloudMail.ru and MEGA.nz extractor binaries,
# and giving them correct perms to work properly.
if not os.path.exists('bin'):
os.mkdir('bin')
binaries = {
"https://raw.githubusercontent.com/adekmaulana/megadown/master/megadown":
"bin/megadown",
"https://raw.githubusercontent.com/yshalsager/cmrudl.py/master/cmrudl.py":
"bin/cmrudl"
}
for binary, path in binaries.items():
downloader = SmartDL(binary, path, progress_bar=False)
downloader.start()
os.chmod(path, 0o755)
# 'bot' variable
if STRING_SESSION:
# pylint: disable=invalid-name
bot = TelegramClient(StringSession(STRING_SESSION), API_KEY, API_HASH)
else:
# pylint: disable=invalid-name
bot = TelegramClient("userbot", API_KEY, API_HASH)
async def check_botlog_chatid():
if not BOTLOG_CHATID and LOGSPAMMER:
LOGS.info(
"You must set up the BOTLOG_CHATID variable in the config.env or environment variables, for the private error log storage to work."
)
quit(1)
elif not BOTLOG_CHATID and BOTLOG:
LOGS.info(
"You must set up the BOTLOG_CHATID variable in the config.env or environment variables, for the userbot logging feature to work."
)
quit(1)
elif not BOTLOG or not LOGSPAMMER:
return
entity = await bot.get_entity(BOTLOG_CHATID)
if entity.default_banned_rights.send_messages:
LOGS.info(
"Your account doesn't have rights to send messages to BOTLOG_CHATID "
"group. Check if you typed the Chat ID correctly.")
quit(1)
with bot:
try:
bot.loop.run_until_complete(check_botlog_chatid())
except:
LOGS.info(
"BOTLOG_CHATID environment variable isn't a "
"valid entity. Check your environment variables/config.env file.")
quit(1)
# Global Variables
COUNT_MSG = 0
USERS = {}
COUNT_PM = {}
LASTMSG = {}
CMD_HELP = {}
ISAFK = False
AFKREASON = None
| 32.240175 | 143 | 0.720439 |
import os
from sys import version_info
from logging import basicConfig, getLogger, INFO, DEBUG
from distutils.util import strtobool as sb
from pylast import LastFMNetwork, md5
from pySmartDL import SmartDL
from dotenv import load_dotenv
from requests import get
from telethon import TelegramClient
from telethon.sessions import StringSession
load_dotenv("config.env")
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False"))
if CONSOLE_LOGGER_VERBOSE:
basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=DEBUG,
)
else:
basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=INFO)
LOGS = getLogger(__name__)
if version_info[0] < 3 or version_info[1] < 8:
LOGS.info("You MUST have a python version of at least 3.8."
"Multiple features depend on this. Bot quitting.")
quit(1)
CONFIG_CHECK = os.environ.get(
"___________PLOX_______REMOVE_____THIS_____LINE__________", None)
if CONFIG_CHECK:
LOGS.info(
"Please remove the line mentioned in the first hashtag from the config.env file"
)
quit(1)
API_KEY = os.environ.get("API_KEY", None)
API_HASH = os.environ.get("API_HASH", None)
STRING_SESSION = os.environ.get("STRING_SESSION", None)
BOTLOG_CHATID = int(os.environ.get("BOTLOG_CHATID", None))
BOTLOG = sb(os.environ.get("BOTLOG", "False"))
LOGSPAMMER = sb(os.environ.get("LOGSPAMMER", "False"))
PM_AUTO_BAN = sb(os.environ.get("PM_AUTO_BAN", "False"))
HEROKU_MEMEZ = sb(os.environ.get("HEROKU_MEMEZ", "False"))
HEROKU_APP_NAME = os.environ.get("HEROKU_APP_NAME", None)
HEROKU_API_KEY = os.environ.get("HEROKU_API_KEY", None)
GIT_REPO_NAME = os.environ.get("GIT_REPO_NAME", None)
GITHUB_ACCESS_TOKEN = os.environ.get("GITHUB_ACCESS_TOKEN", None)
UPSTREAM_REPO_URL = os.environ.get(
"UPSTREAM_REPO_URL",
"https://github.com/mkaraniya/OpenUserBot.git")
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False"))
DB_URI = os.environ.get("DATABASE_URL", None)
OCR_SPACE_API_KEY = os.environ.get("OCR_SPACE_API_KEY", None)
REM_BG_API_KEY = os.environ.get("REM_BG_API_KEY", None)
CHROME_DRIVER = os.environ.get("CHROME_DRIVER", None)
GOOGLE_CHROME_BIN = os.environ.get("GOOGLE_CHROME_BIN", None)
OPEN_WEATHER_MAP_APPID = os.environ.get("OPEN_WEATHER_MAP_APPID", None)
WEATHER_DEFCITY = os.environ.get("WEATHER_DEFCITY", None)
LYDIA_API_KEY = os.environ.get("LYDIA_API_KEY", None)
UB_BLACK_LIST_CHAT = os.environ.get("UB_BLACK_LIST_CHAT", "")
# Anti Spambot Config
ANTI_SPAMBOT = sb(os.environ.get("ANTI_SPAMBOT", "False"))
ANTI_SPAMBOT_SHOUT = sb(os.environ.get("ANTI_SPAMBOT_SHOUT", "False"))
# Youtube API key
YOUTUBE_API_KEY = os.environ.get("YOUTUBE_API_KEY", None)
# Default .alive name
ALIVE_NAME = os.environ.get("ALIVE_NAME", None)
# Time & Date - Country and Time Zone
COUNTRY = str(os.environ.get("COUNTRY", ""))
TZ_NUMBER = int(os.environ.get("TZ_NUMBER", 1))
# Clean Welcome
CLEAN_WELCOME = sb(os.environ.get("CLEAN_WELCOME", "True"))
# Quotes API Token
QUOTES_API_TOKEN = os.environ.get("QUOTES_API_TOKEN", None)
# Last.fm Module
BIO_PREFIX = os.environ.get("BIO_PREFIX", None)
DEFAULT_BIO = os.environ.get("DEFAULT_BIO", None)
LASTFM_API = os.environ.get("LASTFM_API", None)
LASTFM_SECRET = os.environ.get("LASTFM_SECRET", None)
LASTFM_USERNAME = os.environ.get("LASTFM_USERNAME", None)
LASTFM_PASSWORD_PLAIN = os.environ.get("LASTFM_PASSWORD", None)
LASTFM_PASS = md5(LASTFM_PASSWORD_PLAIN)
if LASTFM_API and LASTFM_SECRET and LASTFM_USERNAME and LASTFM_PASS:
lastfm = LastFMNetwork(api_key=LASTFM_API,
api_secret=LASTFM_SECRET,
username=LASTFM_USERNAME,
password_hash=LASTFM_PASS)
else:
lastfm = None
# Google Drive Module
G_DRIVE_CLIENT_ID = os.environ.get("G_DRIVE_CLIENT_ID", None)
G_DRIVE_CLIENT_SECRET = os.environ.get("G_DRIVE_CLIENT_SECRET", None)
G_DRIVE_AUTH_TOKEN_DATA = os.environ.get("G_DRIVE_AUTH_TOKEN_DATA", None)
GDRIVE_FOLDER_ID = os.environ.get("GDRIVE_FOLDER_ID", None)
TEMP_DOWNLOAD_DIRECTORY = os.environ.get("TMP_DOWNLOAD_DIRECTORY",
"./downloads")
# Genius lyrics get this value from https://genius.com/developers both has same values
GENIUS_API_TOKEN = os.environ.get("GENIUS", None)
# Genius lyrics get this value from https://genius.com/developers both has same values
GENIUS = os.environ.get("GENIUS_API_TOKEN", None)
# Setting Up CloudMail.ru and MEGA.nz extractor binaries,
# and giving them correct perms to work properly.
if not os.path.exists('bin'):
os.mkdir('bin')
binaries = {
"https://raw.githubusercontent.com/adekmaulana/megadown/master/megadown":
"bin/megadown",
"https://raw.githubusercontent.com/yshalsager/cmrudl.py/master/cmrudl.py":
"bin/cmrudl"
}
for binary, path in binaries.items():
downloader = SmartDL(binary, path, progress_bar=False)
downloader.start()
os.chmod(path, 0o755)
# 'bot' variable
if STRING_SESSION:
# pylint: disable=invalid-name
bot = TelegramClient(StringSession(STRING_SESSION), API_KEY, API_HASH)
else:
# pylint: disable=invalid-name
bot = TelegramClient("userbot", API_KEY, API_HASH)
async def check_botlog_chatid():
if not BOTLOG_CHATID and LOGSPAMMER:
LOGS.info(
"You must set up the BOTLOG_CHATID variable in the config.env or environment variables, for the private error log storage to work."
)
quit(1)
elif not BOTLOG_CHATID and BOTLOG:
LOGS.info(
"You must set up the BOTLOG_CHATID variable in the config.env or environment variables, for the userbot logging feature to work."
)
quit(1)
elif not BOTLOG or not LOGSPAMMER:
return
entity = await bot.get_entity(BOTLOG_CHATID)
if entity.default_banned_rights.send_messages:
LOGS.info(
"Your account doesn't have rights to send messages to BOTLOG_CHATID "
"group. Check if you typed the Chat ID correctly.")
quit(1)
with bot:
try:
bot.loop.run_until_complete(check_botlog_chatid())
except:
LOGS.info(
"BOTLOG_CHATID environment variable isn't a "
"valid entity. Check your environment variables/config.env file.")
quit(1)
# Global Variables
COUNT_MSG = 0
USERS = {}
COUNT_PM = {}
LASTMSG = {}
CMD_HELP = {}
ISAFK = False
AFKREASON = None
| true | true |
1c4ad6f2ead8fd5590dda5ddff82e834af7eaa24 | 1,825 | py | Python | decrypt.py | tjmovie/psse-decrypt | f2c8f09738185766f8b04253f79f627ae433de7a | [
"Unlicense"
] | null | null | null | decrypt.py | tjmovie/psse-decrypt | f2c8f09738185766f8b04253f79f627ae433de7a | [
"Unlicense"
] | null | null | null | decrypt.py | tjmovie/psse-decrypt | f2c8f09738185766f8b04253f79f627ae433de7a | [
"Unlicense"
] | null | null | null | import os
import binascii
import sys
from Crypto.Cipher import AES
game_key = b""
def ReadGameKey(filename):
global game_key
riffd = open(filename, "rb")
riffd.seek(0x120)
game_key = riffd.read(0x10)
riffd.close()
def DecryptFile(filename):
fd = open(filename, "rb")
header = fd.read(4)
if header != b"PSSE" and header != b"PSME":
print(filename + " Not a PSSE File")
exit()
fd.seek(0x50, 0)
enc1 = fd.read(0x20)
fd.seek(0x70, 0)
enc2 = fd.read(0x10)
fd.seek(0x680, 0)
file_data = fd.read()
fd.close()
iv = b"\x00\x01\x02\x03\04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F"
key = b"\x4E\x29\x8B\x40\xF5\x31\xF4\x69\xD2\x1F\x75\xB1\x33\xC3\x07\xBE"
cipher = AES.new(key, AES.MODE_CBC, iv)
dec1 = cipher.decrypt(enc1)
cipher = AES.new(key, AES.MODE_CBC, iv)
iv2 = cipher.decrypt(enc2)
cipher = AES.new(game_key, AES.MODE_CBC, iv2)
game_data_dec = cipher.decrypt(file_data)
return game_data_dec
file = sys.argv[1]
ReadGameKey(file+"\\RO\\License\\FAKE.RIF")
print("Reading Game key from FAKE.RIF: "+binascii.hexlify(game_key).decode("UTF-8"))
FileData = DecryptFile(file+"\\RO\\Application\\psse.list")
FilesList = FileData.replace(b"\r", b"").split(b"\n")
for File in FilesList:
if File == b"":
continue
File = File.replace(b"/", b"\\")
path = file.encode("UTF-8")
FilePath = path+b"\\RO\\Application\\"+File
print((b"Decrypting: "+FilePath).decode("UTF-8"))
if os.path.exists(FilePath):
FileData = DecryptFile(FilePath)
open(FilePath, "wb").write(FileData)
else:
print("Error: File not Found")
open(file+"\\RO\\Application\\psse.list", "wb").write(FileData)
print("Done") | 29.435484 | 85 | 0.610411 | import os
import binascii
import sys
from Crypto.Cipher import AES
game_key = b""
def ReadGameKey(filename):
global game_key
riffd = open(filename, "rb")
riffd.seek(0x120)
game_key = riffd.read(0x10)
riffd.close()
def DecryptFile(filename):
fd = open(filename, "rb")
header = fd.read(4)
if header != b"PSSE" and header != b"PSME":
print(filename + " Not a PSSE File")
exit()
fd.seek(0x50, 0)
enc1 = fd.read(0x20)
fd.seek(0x70, 0)
enc2 = fd.read(0x10)
fd.seek(0x680, 0)
file_data = fd.read()
fd.close()
iv = b"\x00\x01\x02\x03\04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F"
key = b"\x4E\x29\x8B\x40\xF5\x31\xF4\x69\xD2\x1F\x75\xB1\x33\xC3\x07\xBE"
cipher = AES.new(key, AES.MODE_CBC, iv)
dec1 = cipher.decrypt(enc1)
cipher = AES.new(key, AES.MODE_CBC, iv)
iv2 = cipher.decrypt(enc2)
cipher = AES.new(game_key, AES.MODE_CBC, iv2)
game_data_dec = cipher.decrypt(file_data)
return game_data_dec
file = sys.argv[1]
ReadGameKey(file+"\\RO\\License\\FAKE.RIF")
print("Reading Game key from FAKE.RIF: "+binascii.hexlify(game_key).decode("UTF-8"))
FileData = DecryptFile(file+"\\RO\\Application\\psse.list")
FilesList = FileData.replace(b"\r", b"").split(b"\n")
for File in FilesList:
if File == b"":
continue
File = File.replace(b"/", b"\\")
path = file.encode("UTF-8")
FilePath = path+b"\\RO\\Application\\"+File
print((b"Decrypting: "+FilePath).decode("UTF-8"))
if os.path.exists(FilePath):
FileData = DecryptFile(FilePath)
open(FilePath, "wb").write(FileData)
else:
print("Error: File not Found")
open(file+"\\RO\\Application\\psse.list", "wb").write(FileData)
print("Done") | true | true |
1c4ad74c2747e25780643d5004eefc4b2adda9e9 | 8,693 | py | Python | webservice/utils/analyze_buildLog.py | randytli/Paddle-bot | c4dc7044a82047f46d47733ad31c10eab0417b4b | [
"Apache-2.0"
] | 1 | 2020-05-27T05:21:40.000Z | 2020-05-27T05:21:40.000Z | webservice/utils/analyze_buildLog.py | randytli/Paddle-bot | c4dc7044a82047f46d47733ad31c10eab0417b4b | [
"Apache-2.0"
] | null | null | null | webservice/utils/analyze_buildLog.py | randytli/Paddle-bot | c4dc7044a82047f46d47733ad31c10eab0417b4b | [
"Apache-2.0"
] | null | null | null | #coding=utf-8
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from utils.readConfig import ReadConfig
from utils.auth_ipipe import Get_ipipe_auth
from utils.db import Database
from utils import bosclient
import os
import time
import datetime
import logging
from tornado.httpclient import AsyncHTTPClient
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
localConfig = ReadConfig()
logging.basicConfig(
level=logging.INFO,
filename='./logs/event.log',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def ifDocumentFix(message):
document_fix = True if 'test=document_fix' in message else False
return document_fix
def ifAlreadyExist(query_stat):
db = Database()
result = list(db.query(query_stat))
queryTime = ''
if len(result) != 0:
queryTime = result[0][0]['time'].split('.')[0].replace('T', ' ')
queryTime = time.strptime(queryTime, '%Y-%m-%d %H:%M:%S')
dt = datetime.datetime.fromtimestamp(time.mktime(queryTime))
actualQueryTime = (
dt + datetime.timedelta(hours=8)).strftime("%Y-%m-%d %H:%M:%S")
timeArray = time.strptime(actualQueryTime, "%Y-%m-%d %H:%M:%S")
queryTime = int(time.mktime(timeArray))
return queryTime
def generateCiIndex(repo, sha, target_url):
if target_url.startswith('http://10.87.145.41:8111'):
analyze_teamcity_log(target_url)
elif target_url.startswith('https://xly.bce.baidu.com'):
analyze_ipipe_log(sha, target_url)
def analyze_teamcity_log(target_url):
pass
def analyze_ipipe_log(sha, target_url):
index_dict = {}
pipelineBuildid = target_url.split('/')[-3]
stage_url = localConfig.cf.get('ipipeConf', 'stage_url') + pipelineBuildid
session, req = Get_ipipe_auth(stage_url)
try:
res = session.send(req).json()
except Exception as e:
print("Error: %s" % e)
else:
pipelineConfName = res['pipelineConfName']
jobGroupBuildBeans = res['pipelineBuildBean']['stageBuildBeans'][0][
'jobGroupBuildBeans'][0]
PR = res['pipelineBuildBean']['stageBuildBeans'][0]['outParams'][
'AGILE_PULL_ID']
createTime = get_commit_createTime(PR, sha)
index_dict['PR'] = int(PR)
index_dict['commitId'] = sha
index_dict['createTime'] = createTime
for job in jobGroupBuildBeans:
jobName = job['jobName']
if jobName not in ['构建镜像', 'build-docker-image']:
logParam = job['realJobBuild']['logUrl']
startTime = int(str(job['startTime'])[:-3])
endTime = int(str(job['endTime'])[:-3])
index_dict['startTime'] = startTime
index_dict['endTime'] = endTime
logUrl = localConfig.cf.get('ipipeConf', 'log_url') + logParam
getIpipeBuildLog(index_dict, sha, pipelineConfName, logUrl)
def getIpipeBuildLog(index_dict, sha, pipelineConfName, logUrl):
try:
r = requests.get(logUrl)
except Exception as e:
print("Error: %s" % e)
else:
with open("buildLog/%s_%s.log" % (pipelineConfName, sha), "wb") as f:
f.write(r.content)
f.close()
get_index(index_dict, sha, pipelineConfName)
os.remove("buildLog/%s_%s.log" % (pipelineConfName, sha))
def get_index(index_dict, sha, pipelineConfName):
ifInsert = True
db = Database()
filename = '%s_%s.log' % (pipelineConfName, sha)
index_dict['ciName'] = pipelineConfName
f = open('buildLog/%s' % filename, 'r')
logger.info('filename: %s; PR: %s' % (filename, index_dict['PR']))
data = f.read()
buildTime_strlist = data.split('Build Time:', 1)
buildTime = buildTime_strlist[1:][0].split('s')[0].strip()
index_dict['buildTime'] = float(buildTime)
if filename.startswith('PR-CI-Inference'):
fluidInferenceSize_strlist = data.split('FLuid_Inference Size:', 1)
fluidInferenceSize = fluidInferenceSize_strlist[1:][0].split('M')[
0].strip()
index_dict['fluidInferenceSize'] = float(fluidInferenceSize)
testFluidLibTime_strlist = data.split('test_fluid_lib Total Time:', 1)
testFluidLibTime = testFluidLibTime_strlist[1:][0].split('s')[0].strip(
)
index_dict['testFluidLibTime'] = float(testFluidLibTime)
testFluidLibTrainTime_strlist = data.split(
'test_fluid_lib_train Total Time:', 1)
testFluidLibTrainTime = testFluidLibTrainTime_strlist[1:][0].split(
's')[0].strip()
index_dict['testFluidLibTrainTime'] = float(testFluidLibTrainTime)
elif filename.startswith('PR-CI-Coverage') or filename.startswith(
'PR-CI-Py35'):
buildSize_strlist = data.split('Build Size:', 1)
buildSize = buildSize_strlist[1:][0].split('G')[0].strip()
index_dict['buildSize'] = float(buildSize)
WhlSize_strlist = data.split('PR whl Size:', 1)
WhlSize = WhlSize_strlist[1:][0].split('M')[0].strip()
index_dict['WhlSize'] = float(WhlSize)
testCaseCount_single_strlist = data.split('1 card TestCases count is')
testCaseCount_single = 0
for item in testCaseCount_single_strlist[1:]: #原因是单卡的case分了两部分
testCaseCount_single += int(item.split('\n')[0].strip())
index_dict['testCaseCount_single'] = testCaseCount_single
testCaseCount_multi_strlist = data.split('2 card TestCases count is')
testCaseCount_multi = int(testCaseCount_multi_strlist[1:][0].split(
'\n')[0].strip())
index_dict['testCaseCount_multi'] = testCaseCount_multi
testCaseCount_exclusive_strlist = data.split(
'exclusive TestCases count is')
testCaseCount_exclusive = int(testCaseCount_exclusive_strlist[1:][0]
.split('\n')[0].strip())
index_dict['testCaseCount_exclusive'] = testCaseCount_exclusive
testCaseCount_total = testCaseCount_single + testCaseCount_multi + testCaseCount_exclusive
index_dict['testCaseCount_total'] = testCaseCount_total
testCaseTime_single_strlist = data.split(
'1 card TestCases Total Time:')
testCaseTime_single = 0
for item in testCaseTime_single_strlist[1:]: #原因是单卡的case分了两部分
testCaseTime_single += int(item.split('s')[0].strip())
index_dict['testCaseTime_single'] = testCaseTime_single
testCaseTime_multi_strlist = data.split('2 card TestCases Total Time:')
testCaseTime_multi = int(testCaseTime_multi_strlist[1:][0].split('s')[
0].strip())
index_dict['testCaseTime_multi'] = testCaseTime_multi
testCaseTime_exclusive_strlist = data.split(
'exclusive TestCases Total Time:')
testCaseTime_exclusive = int(testCaseTime_exclusive_strlist[1:][0]
.split('s')[0].strip())
index_dict['testCaseTime_exclusive'] = testCaseTime_exclusive
testCaseTime_total_strlist = data.split('TestCases Total Time:')
testCaseTime_total = 0
for item in testCaseTime_total_strlist[1:]:
testCaseTime_total = int(item.split('s')[0].strip()) if int(
item.split('s')[0].strip(
)) > testCaseTime_total else testCaseTime_total
index_dict['testCaseTime_total'] = testCaseTime_total
insertTime = int(time.time())
query_stat = "SELECT * FROM paddle_ci_index WHERE ciName='%s' and commitId='%s' and PR=%s order by time desc" % (
index_dict['ciName'], index_dict['commitId'], index_dict['PR'])
queryTime = ifAlreadyExist(query_stat)
if queryTime != '':
ifInsert = False if insertTime - queryTime < 30 else True
if ifInsert == True:
result = db.insert('paddle_ci_index', index_dict)
if result == True:
logger.info('%s %s %s insert paddle_ci_index success!' %
(pipelineConfName, index_dict['PR'], sha))
else:
logger.info('%s %s %s insert paddle_ci_index failed!' %
(pipelineConfName, index_dict['PR'], sha))
def get_commit_createTime(PR, sha):
"""get commit createtime"""
query_stat = "SELECT createTime FROM commit_create_time WHERE PR=%s and commitId='%s'" % (
PR, sha)
db = Database()
result = list(db.query(query_stat))
if len(result) != 0:
createTime = result[0][0]['createTime']
else:
logger.error("The commit created before 2020-07-03 17:10: %s, %s" %
(PR, sha))
createTime = 0
return createTime
| 43.683417 | 117 | 0.644081 | import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from utils.readConfig import ReadConfig
from utils.auth_ipipe import Get_ipipe_auth
from utils.db import Database
from utils import bosclient
import os
import time
import datetime
import logging
from tornado.httpclient import AsyncHTTPClient
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
localConfig = ReadConfig()
logging.basicConfig(
level=logging.INFO,
filename='./logs/event.log',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def ifDocumentFix(message):
document_fix = True if 'test=document_fix' in message else False
return document_fix
def ifAlreadyExist(query_stat):
db = Database()
result = list(db.query(query_stat))
queryTime = ''
if len(result) != 0:
queryTime = result[0][0]['time'].split('.')[0].replace('T', ' ')
queryTime = time.strptime(queryTime, '%Y-%m-%d %H:%M:%S')
dt = datetime.datetime.fromtimestamp(time.mktime(queryTime))
actualQueryTime = (
dt + datetime.timedelta(hours=8)).strftime("%Y-%m-%d %H:%M:%S")
timeArray = time.strptime(actualQueryTime, "%Y-%m-%d %H:%M:%S")
queryTime = int(time.mktime(timeArray))
return queryTime
def generateCiIndex(repo, sha, target_url):
if target_url.startswith('http://10.87.145.41:8111'):
analyze_teamcity_log(target_url)
elif target_url.startswith('https://xly.bce.baidu.com'):
analyze_ipipe_log(sha, target_url)
def analyze_teamcity_log(target_url):
pass
def analyze_ipipe_log(sha, target_url):
index_dict = {}
pipelineBuildid = target_url.split('/')[-3]
stage_url = localConfig.cf.get('ipipeConf', 'stage_url') + pipelineBuildid
session, req = Get_ipipe_auth(stage_url)
try:
res = session.send(req).json()
except Exception as e:
print("Error: %s" % e)
else:
pipelineConfName = res['pipelineConfName']
jobGroupBuildBeans = res['pipelineBuildBean']['stageBuildBeans'][0][
'jobGroupBuildBeans'][0]
PR = res['pipelineBuildBean']['stageBuildBeans'][0]['outParams'][
'AGILE_PULL_ID']
createTime = get_commit_createTime(PR, sha)
index_dict['PR'] = int(PR)
index_dict['commitId'] = sha
index_dict['createTime'] = createTime
for job in jobGroupBuildBeans:
jobName = job['jobName']
if jobName not in ['构建镜像', 'build-docker-image']:
logParam = job['realJobBuild']['logUrl']
startTime = int(str(job['startTime'])[:-3])
endTime = int(str(job['endTime'])[:-3])
index_dict['startTime'] = startTime
index_dict['endTime'] = endTime
logUrl = localConfig.cf.get('ipipeConf', 'log_url') + logParam
getIpipeBuildLog(index_dict, sha, pipelineConfName, logUrl)
def getIpipeBuildLog(index_dict, sha, pipelineConfName, logUrl):
try:
r = requests.get(logUrl)
except Exception as e:
print("Error: %s" % e)
else:
with open("buildLog/%s_%s.log" % (pipelineConfName, sha), "wb") as f:
f.write(r.content)
f.close()
get_index(index_dict, sha, pipelineConfName)
os.remove("buildLog/%s_%s.log" % (pipelineConfName, sha))
def get_index(index_dict, sha, pipelineConfName):
ifInsert = True
db = Database()
filename = '%s_%s.log' % (pipelineConfName, sha)
index_dict['ciName'] = pipelineConfName
f = open('buildLog/%s' % filename, 'r')
logger.info('filename: %s; PR: %s' % (filename, index_dict['PR']))
data = f.read()
buildTime_strlist = data.split('Build Time:', 1)
buildTime = buildTime_strlist[1:][0].split('s')[0].strip()
index_dict['buildTime'] = float(buildTime)
if filename.startswith('PR-CI-Inference'):
fluidInferenceSize_strlist = data.split('FLuid_Inference Size:', 1)
fluidInferenceSize = fluidInferenceSize_strlist[1:][0].split('M')[
0].strip()
index_dict['fluidInferenceSize'] = float(fluidInferenceSize)
testFluidLibTime_strlist = data.split('test_fluid_lib Total Time:', 1)
testFluidLibTime = testFluidLibTime_strlist[1:][0].split('s')[0].strip(
)
index_dict['testFluidLibTime'] = float(testFluidLibTime)
testFluidLibTrainTime_strlist = data.split(
'test_fluid_lib_train Total Time:', 1)
testFluidLibTrainTime = testFluidLibTrainTime_strlist[1:][0].split(
's')[0].strip()
index_dict['testFluidLibTrainTime'] = float(testFluidLibTrainTime)
elif filename.startswith('PR-CI-Coverage') or filename.startswith(
'PR-CI-Py35'):
buildSize_strlist = data.split('Build Size:', 1)
buildSize = buildSize_strlist[1:][0].split('G')[0].strip()
index_dict['buildSize'] = float(buildSize)
WhlSize_strlist = data.split('PR whl Size:', 1)
WhlSize = WhlSize_strlist[1:][0].split('M')[0].strip()
index_dict['WhlSize'] = float(WhlSize)
testCaseCount_single_strlist = data.split('1 card TestCases count is')
testCaseCount_single = 0
for item in testCaseCount_single_strlist[1:]: testCaseCount_single += int(item.split('\n')[0].strip())
index_dict['testCaseCount_single'] = testCaseCount_single
testCaseCount_multi_strlist = data.split('2 card TestCases count is')
testCaseCount_multi = int(testCaseCount_multi_strlist[1:][0].split(
'\n')[0].strip())
index_dict['testCaseCount_multi'] = testCaseCount_multi
testCaseCount_exclusive_strlist = data.split(
'exclusive TestCases count is')
testCaseCount_exclusive = int(testCaseCount_exclusive_strlist[1:][0]
.split('\n')[0].strip())
index_dict['testCaseCount_exclusive'] = testCaseCount_exclusive
testCaseCount_total = testCaseCount_single + testCaseCount_multi + testCaseCount_exclusive
index_dict['testCaseCount_total'] = testCaseCount_total
testCaseTime_single_strlist = data.split(
'1 card TestCases Total Time:')
testCaseTime_single = 0
for item in testCaseTime_single_strlist[1:]: testCaseTime_single += int(item.split('s')[0].strip())
index_dict['testCaseTime_single'] = testCaseTime_single
testCaseTime_multi_strlist = data.split('2 card TestCases Total Time:')
testCaseTime_multi = int(testCaseTime_multi_strlist[1:][0].split('s')[
0].strip())
index_dict['testCaseTime_multi'] = testCaseTime_multi
testCaseTime_exclusive_strlist = data.split(
'exclusive TestCases Total Time:')
testCaseTime_exclusive = int(testCaseTime_exclusive_strlist[1:][0]
.split('s')[0].strip())
index_dict['testCaseTime_exclusive'] = testCaseTime_exclusive
testCaseTime_total_strlist = data.split('TestCases Total Time:')
testCaseTime_total = 0
for item in testCaseTime_total_strlist[1:]:
testCaseTime_total = int(item.split('s')[0].strip()) if int(
item.split('s')[0].strip(
)) > testCaseTime_total else testCaseTime_total
index_dict['testCaseTime_total'] = testCaseTime_total
insertTime = int(time.time())
query_stat = "SELECT * FROM paddle_ci_index WHERE ciName='%s' and commitId='%s' and PR=%s order by time desc" % (
index_dict['ciName'], index_dict['commitId'], index_dict['PR'])
queryTime = ifAlreadyExist(query_stat)
if queryTime != '':
ifInsert = False if insertTime - queryTime < 30 else True
if ifInsert == True:
result = db.insert('paddle_ci_index', index_dict)
if result == True:
logger.info('%s %s %s insert paddle_ci_index success!' %
(pipelineConfName, index_dict['PR'], sha))
else:
logger.info('%s %s %s insert paddle_ci_index failed!' %
(pipelineConfName, index_dict['PR'], sha))
def get_commit_createTime(PR, sha):
query_stat = "SELECT createTime FROM commit_create_time WHERE PR=%s and commitId='%s'" % (
PR, sha)
db = Database()
result = list(db.query(query_stat))
if len(result) != 0:
createTime = result[0][0]['createTime']
else:
logger.error("The commit created before 2020-07-03 17:10: %s, %s" %
(PR, sha))
createTime = 0
return createTime
| true | true |
1c4ad7bb528c874b879b1da41e20cceb700b3a0a | 3,651 | py | Python | mmfewshot/detection/models/roi_heads/bbox_heads/meta_bbox_head.py | BIGWangYuDong/mmfewshot | dac097afc92df176bc2de76b7c90968584865197 | [
"Apache-2.0"
] | 376 | 2021-11-23T13:29:57.000Z | 2022-03-30T07:22:14.000Z | mmfewshot/detection/models/roi_heads/bbox_heads/meta_bbox_head.py | BIGWangYuDong/mmfewshot | dac097afc92df176bc2de76b7c90968584865197 | [
"Apache-2.0"
] | 51 | 2021-11-23T14:45:08.000Z | 2022-03-30T03:37:15.000Z | mmfewshot/detection/models/roi_heads/bbox_heads/meta_bbox_head.py | BIGWangYuDong/mmfewshot | dac097afc92df176bc2de76b7c90968584865197 | [
"Apache-2.0"
] | 56 | 2021-11-23T14:02:27.000Z | 2022-03-31T09:01:50.000Z | # Copyright (c) OpenMMLab. All rights reserved.
import copy
from typing import Dict, Optional
import torch
import torch.nn as nn
from mmcv.runner import force_fp32
from mmdet.models.builder import HEADS, build_loss
from mmdet.models.losses import accuracy
from mmdet.models.roi_heads import BBoxHead
from torch import Tensor
@HEADS.register_module()
class MetaBBoxHead(BBoxHead):
"""BBoxHead with meta classification for metarcnn and fsdetview.
Args:
num_meta_classes (int): Number of classes for meta classification.
meta_cls_in_channels (int): Number of support feature channels.
with_meta_cls_loss (bool): Use meta classification loss.
Default: True.
meta_cls_loss_weight (float | None): The loss weight of `loss_meta`.
Default: None.
loss_meta (dict): Config for meta classification loss.
"""
def __init__(self,
num_meta_classes: int,
meta_cls_in_channels: int = 2048,
with_meta_cls_loss: bool = True,
meta_cls_loss_weight: Optional[float] = None,
loss_meta: Dict = dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.with_meta_cls_loss = with_meta_cls_loss
if with_meta_cls_loss:
self.fc_meta = nn.Linear(meta_cls_in_channels, num_meta_classes)
self.meta_cls_loss_weight = meta_cls_loss_weight
self.loss_meta_cls = build_loss(copy.deepcopy(loss_meta))
def forward_meta_cls(self, support_feat: Tensor) -> Tensor:
"""Forward function for meta classification.
Args:
support_feat (Tensor): Shape of (N, C, H, W).
Returns:
Tensor: Box scores with shape of (N, num_meta_classes, H, W).
"""
meta_cls_score = self.fc_meta(support_feat)
return meta_cls_score
@force_fp32(apply_to='meta_cls_score')
def loss_meta(self,
meta_cls_score: Tensor,
meta_cls_labels: Tensor,
meta_cls_label_weights: Tensor,
reduction_override: Optional[str] = None) -> Dict:
"""Meta classification loss.
Args:
meta_cls_score (Tensor): Predicted meta classification scores
with shape (N, num_meta_classes).
meta_cls_labels (Tensor): Corresponding class indices with
shape (N).
meta_cls_label_weights (Tensor): Meta classification loss weight
of each sample with shape (N).
reduction_override (str | None): The reduction method used to
override the original reduction method of the loss. Options
are "none", "mean" and "sum". Default: None.
Returns:
Dict: The calculated loss.
"""
losses = dict()
if self.meta_cls_loss_weight is None:
loss_weight = 1. / max(
torch.sum(meta_cls_label_weights > 0).float().item(), 1.)
else:
loss_weight = self.meta_cls_loss_weight
if meta_cls_score.numel() > 0:
loss_meta_cls_ = self.loss_meta_cls(
meta_cls_score,
meta_cls_labels,
meta_cls_label_weights,
reduction_override=reduction_override)
losses['loss_meta_cls'] = loss_meta_cls_ * loss_weight
losses['meta_acc'] = accuracy(meta_cls_score, meta_cls_labels)
return losses
| 38.431579 | 76 | 0.612435 | import copy
from typing import Dict, Optional
import torch
import torch.nn as nn
from mmcv.runner import force_fp32
from mmdet.models.builder import HEADS, build_loss
from mmdet.models.losses import accuracy
from mmdet.models.roi_heads import BBoxHead
from torch import Tensor
@HEADS.register_module()
class MetaBBoxHead(BBoxHead):
def __init__(self,
num_meta_classes: int,
meta_cls_in_channels: int = 2048,
with_meta_cls_loss: bool = True,
meta_cls_loss_weight: Optional[float] = None,
loss_meta: Dict = dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.with_meta_cls_loss = with_meta_cls_loss
if with_meta_cls_loss:
self.fc_meta = nn.Linear(meta_cls_in_channels, num_meta_classes)
self.meta_cls_loss_weight = meta_cls_loss_weight
self.loss_meta_cls = build_loss(copy.deepcopy(loss_meta))
def forward_meta_cls(self, support_feat: Tensor) -> Tensor:
meta_cls_score = self.fc_meta(support_feat)
return meta_cls_score
@force_fp32(apply_to='meta_cls_score')
def loss_meta(self,
meta_cls_score: Tensor,
meta_cls_labels: Tensor,
meta_cls_label_weights: Tensor,
reduction_override: Optional[str] = None) -> Dict:
losses = dict()
if self.meta_cls_loss_weight is None:
loss_weight = 1. / max(
torch.sum(meta_cls_label_weights > 0).float().item(), 1.)
else:
loss_weight = self.meta_cls_loss_weight
if meta_cls_score.numel() > 0:
loss_meta_cls_ = self.loss_meta_cls(
meta_cls_score,
meta_cls_labels,
meta_cls_label_weights,
reduction_override=reduction_override)
losses['loss_meta_cls'] = loss_meta_cls_ * loss_weight
losses['meta_acc'] = accuracy(meta_cls_score, meta_cls_labels)
return losses
| true | true |
1c4ada067ccaae4dfb745c521b71812b9ecd0997 | 81,491 | py | Python | imjoy_elfinder/elfinder.py | oeway/jupyter_elfinder | e89eb535b6bdc9024a1af57ecc67056ce9b5a94f | [
"MIT"
] | null | null | null | imjoy_elfinder/elfinder.py | oeway/jupyter_elfinder | e89eb535b6bdc9024a1af57ecc67056ce9b5a94f | [
"MIT"
] | null | null | null | imjoy_elfinder/elfinder.py | oeway/jupyter_elfinder | e89eb535b6bdc9024a1af57ecc67056ce9b5a94f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# Connector for elFinder File Manager
# Original author Troex Nevelin <[email protected]>
# Modified by Svintsov Dmitry (https://github.com/uralbash)
# Further adapted by ImJoy Team (https://github.com/imjoy-team)
# License: 3-clauses BSD license
"""Provide the connector for elFinder File Manager."""
# pylint: disable=too-many-lines
import base64
import hashlib
import mimetypes
import os
import re
import shlex
import shutil
import subprocess
import time
import traceback
import uuid
from datetime import datetime
from types import ModuleType
from typing import Any, BinaryIO, Dict, Generator, List, Optional, Tuple, Union
from urllib.parse import quote, urljoin
from pathvalidate import sanitize_filename, sanitize_filepath
from typing_extensions import Literal, TypedDict
from .api_const import (
API_CMD,
API_CONTENT,
API_CURRENT,
API_CUT,
API_CHUNK,
API_CID,
API_DIRS,
API_DOWNLOAD,
API_DST,
API_HEIGHT,
API_INIT,
API_INTERSECT,
API_MAKEDIR,
API_MIMES,
API_NAME,
API_Q,
API_SRC,
API_TARGET,
API_TARGETS,
API_TREE,
API_TYPE,
API_UPLOAD,
API_UPLOAD_PATH,
API_WIDTH,
API_RANGE,
ARCHIVE_ARGC,
ARCHIVE_CMD,
ARCHIVE_EXT,
R_ADDED,
R_API,
R_CHANGED,
R_CHUNKMERGED,
R_CWD,
R_DEBUG,
R_DIM,
R_DIR_CNT,
R_ERROR,
R_FILE_CNT,
R_FILES,
R_HASHES,
R_IMAGES,
R_LIST,
R_NAME,
R_NETDRIVERS,
R_OPTIONS,
R_OPTIONS_ARCHIVERS,
R_OPTIONS_COPY_OVERWRITE,
R_OPTIONS_CREATE,
R_OPTIONS_CREATE_EXT,
R_OPTIONS_DISABLED,
R_OPTIONS_DISP_INLINE_REGEX,
R_OPTIONS_EXTRACT,
R_OPTIONS_I18N_FOLDER_NAME,
R_OPTIONS_JPG_QUALITY,
R_OPTIONS_MIME_ALLOW,
R_OPTIONS_MIME_DENY,
R_OPTIONS_MIME_FIRST_ORDER,
R_OPTIONS_PATH,
R_OPTIONS_SEPARATOR,
R_OPTIONS_SYNC_CHK_AS_TS,
R_OPTIONS_SYNC_MIN_MS,
R_OPTIONS_TMB_URL,
R_OPTIONS_UI_CMD_MAP,
R_OPTIONS_UPLOAD_MAX_CONN,
R_OPTIONS_UPLOAD_MAX_SIZE,
R_OPTIONS_UPLOAD_MIME,
R_OPTIONS_UPLOAD_OVERWRITE,
R_OPTIONS_URL,
R_REMOVED,
R_SIZE,
R_SIZES,
R_TREE,
R_UPLMAXFILE,
R_UPLMAXSIZE,
R_WARNING,
)
COMMANDS = {
"archive": "__archive",
"chmod": "__chmod",
"dim": "__dim",
"duplicate": "__duplicate",
"extract": "__extract",
"file": "__file",
"get": "__get",
"info": "__places",
"ls": "__ls",
"mkdir": "__mkdir",
"mkfile": "__mkfile",
"netmount": "__netmount",
"open": "__open",
"parents": "__parents",
"paste": "__paste",
"ping": "__ping",
"put": "__put",
"reload": "__reload", # not implemented
"rename": "__rename",
"resize": "__resize",
"rm": "__rm",
"search": "__search",
"size": "__size",
"tmb": "__thumbnails",
"tree": "__tree",
"upload": "__upload",
"zipdl": "__zipdl",
}
MIME_TYPES = {
# text
".cfg": "text/plain",
".conf": "text/plain",
".css": "text/css",
".htm": "text/html",
".html": "text/html",
".ini": "text/plain",
".java": "text/x-java-source",
".js": "text/javascript",
".md": "text/markdown",
".php": "text/x-php",
".pl": "text/x-perl",
".py": "text/x-python",
".rb": "text/x-ruby",
".rtf": "text/rtf",
".rtfd": "text/rtfd",
".sh": "text/x-shellscript",
".sql": "text/x-sql",
".txt": "text/plain",
# apps
".7z": "application/x-7z-compressed",
".doc": "application/msword",
".ogg": "application/ogg",
# video
".mkv": "video/x-matroska",
".ogm": "application/ogm",
}
Archivers = TypedDict( # pylint: disable=invalid-name
"Archivers",
{"create": Dict[str, Dict[str, str]], "extract": Dict[str, Dict[str, str]]},
)
Info = TypedDict( # pylint: disable=invalid-name
"Info",
{
"alias": str,
"dim": str,
"dirs": int,
"hash": str,
"link": str,
"locked": int,
"mime": str,
"name": str,
"path": str,
"phash": str,
"read": int,
"size": int,
"tmb": str,
"ts": float,
"url": str,
"volumeid": str,
"write": int,
},
total=False,
)
Options = TypedDict( # pylint: disable=invalid-name
"Options",
{
"archive_mimes": List[str],
"archivers": Archivers,
"base_url": str,
"debug": bool,
"defaults": Dict[str, bool],
"dir_mode": Literal[493],
"dir_size": bool,
"disabled": List[str],
"dot_files": bool,
"expose_real_path": bool,
"file_mode": Literal[420],
"file_url": bool,
"files_url": str,
"img_lib": Optional[str],
"max_folder_depth": int,
"perms": Dict[str, Dict[str, bool]],
"root_alias": str,
"root": str,
"tmb_at_once": int,
"tmb_dir": Optional[str],
"tmb_size": int,
"upload_allow": List[str],
"upload_deny": List[str],
"upload_max_conn": int,
"upload_max_size": int,
"upload_order": List[Literal["deny", "allow"]],
"upload_write_chunk": int,
},
)
def exception_to_string(excp: Exception) -> str:
"""Convert exception to string."""
stack = traceback.extract_stack()[:-3] + traceback.extract_tb(
excp.__traceback__
) # add limit=??
pretty = traceback.format_list(stack)
return "".join(pretty) + f"\n {excp.__class__} {excp}"
class Connector:
"""Connector for elFinder."""
# pylint: disable=too-many-instance-attributes, too-many-arguments
# pylint: disable=unused-private-member
# The options need to be persistent between connector instances.
_options = {
"archive_mimes": [],
"archivers": {"create": {}, "extract": {}},
"base_url": "",
"debug": False,
"defaults": {"read": True, "write": True, "rm": True},
"dir_mode": 0o755,
"dir_size": False,
"disabled": ["netmount", "zipdl"],
"dot_files": False,
"expose_real_path": False,
"file_mode": 0o644,
"file_url": True,
"files_url": "",
"img_lib": "auto",
"max_folder_depth": 256,
"perms": {},
"root_alias": "HOME",
"root": "",
"tmb_at_once": 5,
"tmb_dir": ".tmb",
"tmb_size": 48,
"upload_allow": [],
"upload_deny": [],
"upload_max_conn": -1,
"upload_max_size": 256 * 1024 * 1024,
"upload_order": ["deny", "allow"],
"upload_write_chunk": 8192,
} # type: Options
# The cache needs to be persistent between connector instances.
_cached_path = {} # type: Dict[str, str]
# public variables
http_allowed_parameters = (
API_CHUNK,
API_CID,
API_CMD,
API_CONTENT,
API_CURRENT,
API_CUT,
API_DIRS,
API_DOWNLOAD,
API_DST,
API_HEIGHT,
API_INIT,
API_MAKEDIR,
API_NAME,
API_Q,
API_RANGE,
API_SRC,
API_TARGET,
API_TARGETS,
API_TREE,
API_TYPE,
API_UPLOAD,
API_UPLOAD_PATH,
API_WIDTH,
)
def __init__(
self,
root: str,
url: str,
base_url: str,
upload_max_size: int,
tmb_dir: Optional[str],
expose_real_path: bool = False,
dot_files: bool = False,
debug: bool = False,
) -> None:
"""Set up connector instance."""
self.volumeid = str(uuid.uuid4())
# internal
self._commands = dict(COMMANDS)
self._http_header = {} # type: Dict[str, str]
self._http_status_code = 0
self._request = {} # type: Dict[str, Any]
self._response = {} # type: Dict[str, Any]
self._response[R_DEBUG] = {}
self._error_data = {} # type: Dict[str, str]
self._img = None # type: Optional[ModuleType]
# options
self._options["root"] = self._check_utf8(root)
self._options["upload_max_size"] = upload_max_size
self._options["debug"] = debug
self._options["base_url"] = (
base_url.lstrip("/") if base_url.startswith("//") else base_url
)
self._options["expose_real_path"] = expose_real_path
self._options["dot_files"] = dot_files
self._options["files_url"] = self._check_utf8(url).rstrip("/")
self._debug("files_url", self._options["files_url"])
self._debug("root", self._options["root"])
for cmd in self._options["disabled"]:
if cmd in self._commands:
del self._commands[cmd]
# TODO: Move side effects out of init.
if tmb_dir:
thumbs_dir = os.path.join(self._options["root"], tmb_dir)
try:
if not os.path.exists(thumbs_dir):
os.makedirs(thumbs_dir) # self._options['tmbDir'] = False
self._options["tmb_dir"] = thumbs_dir
except PermissionError:
self._options["tmb_dir"] = None
self._debug("thumbnail", " Permission denied: " + thumbs_dir)
print(
"WARNING: failed to create thumbnail folder "
"due to permission denied, it will be disabled."
)
def run(
self, http_request: Dict[str, Any]
) -> Tuple[int, Dict[str, str], Dict[str, Any]]:
"""Run main function."""
start_time = time.time()
root_ok = True
if not os.path.exists(self._options["root"]):
root_ok = False
self._response[R_ERROR] = "Invalid backend configuration"
elif not self._is_allowed(self._options["root"], "read"):
root_ok = False
self._response[R_ERROR] = "Access denied"
for field in self.http_allowed_parameters:
if field in http_request:
self._request[field] = http_request[field]
if root_ok and API_CMD in self._request:
if self._request[API_CMD] in self._commands:
cmd = self._commands[self._request[API_CMD]]
# A missing command method should blow up here.
func = getattr(self, "_" + self.__class__.__name__ + cmd)
try:
func()
except Exception as exc: # pylint: disable=broad-except
self._response[
R_ERROR
] = f"Command Failed: {self._request[API_CMD]}, Error: \n{exc}"
traceback.print_exc()
self._debug("exception", exception_to_string(exc))
else:
self._response[R_ERROR] = f"Unknown command: {self._request[API_CMD]}"
if self._error_data:
self._debug("errorData", self._error_data)
if self._options["debug"]:
self._debug("time", (time.time() - start_time))
else:
self._response.pop(R_DEBUG, None)
if self._http_status_code < 100:
self._http_status_code = 200
if "Content-type" not in self._http_header:
if API_CMD in self._request and self._request[API_CMD] == "upload":
self._http_header["Content-type"] = "text/html"
else:
self._http_header["Content-type"] = "application/json"
return self._http_status_code, self._http_header, self._response
def __places(self) -> None:
if API_TARGETS not in self._request:
self._response[R_ERROR] = "Invalid parameters"
return
targets = self._request[API_TARGETS]
files = []
for target in targets:
path = self._find(target)
if path is None:
self._set_error_data(target, "File not found")
else:
files.append(self._info(path))
self._response[R_FILES] = files
def __open(self) -> None:
"""Open file or directory."""
path = None
init = self._request.get(API_INIT)
target = self._request.get(API_TARGET)
if not init and not target:
self._response[R_ERROR] = "Invalid parameters"
return
if target:
path = self._find_dir(target)
if init:
self._response[R_API] = 2.1
if not path:
path = self._options["root"]
if not path:
self._response[R_ERROR] = "File not found"
return
if not self._is_allowed(path, "read"):
self._response[R_ERROR] = "Access denied"
return
self._cwd(path)
try:
items = os.listdir(path)
except PermissionError:
self._response[R_ERROR] = "Access denied"
return
files = []
for item in sorted(items):
file_path = os.path.join(path, item)
if self._is_accepted(item):
info = self._info(file_path)
files.append(info)
self._response[R_FILES] = files
if self._request.get(API_TREE):
self._response[R_FILES].append(self._info(path))
self._check_archivers()
if not self._options["file_url"]:
url = ""
else:
url = self._options["files_url"]
self._response[R_NETDRIVERS] = []
self._response[R_UPLMAXFILE] = 1000
self._response[R_UPLMAXSIZE] = (
str(self._options["upload_max_size"] / (1024 * 1024)) + "M"
)
thumbs_dir = self._options["tmb_dir"]
if thumbs_dir:
thumbs_url = self._path2url(thumbs_dir)
else:
thumbs_url = ""
self._response[R_OPTIONS] = {
R_OPTIONS_PATH: path,
R_OPTIONS_SEPARATOR: os.path.sep,
R_OPTIONS_URL: url,
R_OPTIONS_DISABLED: self._options["disabled"],
R_OPTIONS_TMB_URL: thumbs_url,
R_OPTIONS_ARCHIVERS: {
R_OPTIONS_CREATE: list(self._options["archivers"]["create"].keys()),
R_OPTIONS_EXTRACT: list(self._options["archivers"]["extract"].keys()),
R_OPTIONS_CREATE_EXT: {
k: self._options["archivers"]["create"][k][ARCHIVE_EXT]
for k in self._options["archivers"]["create"]
},
},
R_OPTIONS_COPY_OVERWRITE: True,
R_OPTIONS_UPLOAD_MAX_SIZE: self._options["upload_max_size"],
R_OPTIONS_UPLOAD_OVERWRITE: True,
R_OPTIONS_UPLOAD_MAX_CONN: 3,
R_OPTIONS_UPLOAD_MIME: {
R_OPTIONS_MIME_ALLOW: ["all"],
R_OPTIONS_MIME_DENY: [],
R_OPTIONS_MIME_FIRST_ORDER: R_OPTIONS_MIME_DENY,
},
R_OPTIONS_I18N_FOLDER_NAME: True,
R_OPTIONS_DISP_INLINE_REGEX: "^(?:(?:image|video|audio)|application/"
+ "(?:x-mpegURL|dash\\+xml)|(?:text/plain|application/pdf)$)",
R_OPTIONS_JPG_QUALITY: 100,
R_OPTIONS_SYNC_CHK_AS_TS: 1,
R_OPTIONS_SYNC_MIN_MS: 30000,
R_OPTIONS_UI_CMD_MAP: {},
}
def __parents(self) -> None:
# TODO: implement according to the spec
# https://github.com/Studio-42/elFinder/wiki/Client-Server-API-2.1#parents
self._response[R_TREE] = []
def __chmod(self) -> None:
# TODO: implement according to the spec
# https://github.com/Studio-42/elFinder/wiki/Client-Server-API-2.1#chmod
self._response[R_CHANGED] = []
def __netmount(self) -> None:
# TODO: implement according to the spec
# https://github.com/Studio-42/elFinder/wiki/Client-Server-API-2.1#netmount
pass
def __zipdl(self) -> None:
# TODO: implement according to the spec
# https://github.com/Studio-42/elFinder/wiki/Client-Server-API-2.1#zipdl
pass
def __file(self) -> None:
self._http_header["Content-type"] = "text/html"
target = self._request.get(API_TARGET)
if not target:
self._response["__text"] = "Invalid parameters"
return
download = self._request.get(API_DOWNLOAD)
cur_file = self._find(target)
if not cur_file or not os.path.exists(cur_file) or os.path.isdir(cur_file):
self._http_status_code = 404
self._response["__text"] = "File not found"
return
if not self._is_allowed(cur_file, "read"):
self._http_status_code = 403
self._response["__text"] = "Access denied"
return
if os.path.islink(cur_file):
cur_file = self._read_link(cur_file)
if (
not cur_file
or not self._is_allowed(os.path.dirname(cur_file), "read")
or not self._is_allowed(cur_file, "read")
):
self._http_status_code = 403
self._response["__text"] = "Access denied"
return
mime = _mimetype(cur_file)
parts = mime.split("/", 2)
if download:
disp = "attachments"
elif parts[0] == "image":
disp = "image"
else:
disp = "inline"
self._http_status_code = 200
self._http_header["Content-type"] = mime
self._http_header["Content-Length"] = str(os.lstat(cur_file).st_size)
self._http_header["Content-Disposition"] = disp + ";"
self._response["__send_file"] = cur_file
def __rename(self) -> None:
"""Rename file or dir."""
name = self._request.get(API_NAME)
target = self._request.get(API_TARGET)
if not (name and target):
self._response[R_ERROR] = "Invalid parameters"
return
cur_name = self._find(target)
if not cur_name:
self._response[R_ERROR] = "File not found"
return
cur_dir = os.path.dirname(cur_name)
if not self._is_allowed(cur_dir, "write") and self._is_allowed(cur_name, "rm"):
self._response[R_ERROR] = "Access denied"
return
name = self._check_utf8(name)
if not name or not _check_name(name):
self._response[R_ERROR] = "Invalid name"
return
new_name = os.path.join(cur_dir, name)
if os.path.exists(new_name):
self._response[R_ERROR] = (
"File or folder with the same name " + new_name + " already exists"
)
return
self._rm_tmb(cur_name)
try:
os.rename(cur_name, new_name)
self._response[R_ADDED] = [self._info(new_name)]
self._response[R_REMOVED] = [target]
except OSError:
self._response[R_ERROR] = "Unable to rename file"
def __mkdir(self) -> None:
"""Create new directory."""
path = None
new_dir = None
name = self._request.get(API_NAME)
target = self._request.get(API_TARGET)
dirs = self._request.get(API_DIRS)
if not target or (not name and not dirs):
self._response[R_ERROR] = "Invalid parameters"
return
path = self._find_dir(target)
if not path:
self._response[R_ERROR] = "Invalid parameters"
return
if not self._is_allowed(path, "write"):
self._response[R_ERROR] = "Access denied"
return
if name:
name = self._check_utf8(name)
if not _check_name(name):
self._response[R_ERROR] = "Invalid name"
return
new_dir = os.path.join(path, name)
if os.path.exists(new_dir):
self._response[R_ERROR] = (
"File or folder with the same name " + name + " already exists"
)
else:
try:
os.mkdir(new_dir, int(self._options["dir_mode"]))
self._response[R_ADDED] = [self._info(new_dir)]
self._response[R_HASHES] = {}
except OSError:
self._response[R_ERROR] = "Unable to create folder"
if dirs:
self._response[R_ADDED] = []
self._response[R_HASHES] = {}
for sdir in dirs:
subdir = sdir.lstrip("/")
if not _check_dir(subdir):
self._response[R_ERROR] = "Invalid dir name: " + subdir
return
new_subdir = os.path.join(path, subdir)
if os.path.exists(new_subdir):
self._response[R_ERROR] = (
"File or folder with the same name "
+ subdir
+ " already exists"
)
return
try:
os.mkdir(new_subdir, int(self._options["dir_mode"]))
self._response[R_ADDED].append(self._info(new_subdir))
self._response[R_HASHES][sdir] = self._hash(new_subdir)
except OSError:
self._response[R_ERROR] = "Unable to create folder"
return
def __mkfile(self) -> None:
"""Create new file."""
name = self._request.get(API_NAME)
target = self._request.get(API_TARGET)
if not target or not name:
self._response[R_ERROR] = "Invalid parameters"
return
name = self._check_utf8(name)
cur_dir = self._find_dir(target)
if not cur_dir:
self._response[R_ERROR] = "Invalid parameters"
return
if not self._is_allowed(cur_dir, "write"):
self._response[R_ERROR] = "Access denied"
return
if not _check_name(name):
self._response[R_ERROR] = "Invalid name"
return
new_file = os.path.join(cur_dir, name)
if os.path.exists(new_file):
self._response[R_ERROR] = "File or folder with the same name already exists"
else:
try:
with open(new_file, "w", encoding="utf-8"):
pass
except OSError:
self._response[R_ERROR] = "Unable to create file"
else:
self._response[R_ADDED] = [self._info(new_file)]
def __rm(self) -> None:
"""Delete files and directories."""
rm_file = rm_list = None
if API_TARGETS in self._request:
rm_list = self._request[API_TARGETS]
if not rm_list:
self._response[R_ERROR] = "Invalid parameters"
return
if not isinstance(rm_list, list):
rm_list = [rm_list]
removed = []
for rm_hash in rm_list:
rm_file = self._find(rm_hash)
if not rm_file:
continue
if self._remove(rm_file):
removed.append(rm_hash)
else:
self._response[R_ERROR] = "Failed to remove: " + rm_file
return
self._response[R_REMOVED] = removed
def __upload(self) -> None:
"""Upload files."""
try: # Windows needs stdio set for binary mode.
import msvcrt # pylint: disable=import-outside-toplevel
# pylint: disable=no-member
# stdin = 0
# stdout = 1
msvcrt.setmode(0, os.O_BINARY) # type: ignore
msvcrt.setmode(1, os.O_BINARY) # type: ignore
except ImportError:
pass
if API_TARGET in self._request:
chunk = self._request.get(API_CHUNK)
self._response[R_ADDED] = []
self._response[R_WARNING] = []
if chunk:
self.__upload_large_file()
else:
self.__upload_small_files()
if len(self._response[R_WARNING]) == 0:
del self._response[R_WARNING]
else:
self._http_status_code = 400
self._response[R_WARNING] = ["Invalid parameters"]
def __upload_large_file(self) -> None:
"""Upload large files by chunks."""
target = self._request.get(API_TARGET)
if not target:
self._response[R_WARNING] = "Invalid parameters"
return
cur_dir = self._find_dir(target)
if not cur_dir:
self._response[R_WARNING] = "Invalid parameters"
return
up_files = self._request.get(API_UPLOAD)
if not up_files:
self._response[R_WARNING] = "No file to upload"
return
chunk = self._request.get(API_CHUNK)
if not chunk:
self._response[R_WARNING] = "No chunk to upload"
return
max_size = self._options["upload_max_size"]
upload_paths = self._request.get(API_UPLOAD_PATH)
if upload_paths:
upload_paths = [self._find_dir(d) for d in upload_paths]
if upload_paths and upload_paths[0]:
cur_dir = upload_paths[0]
if not cur_dir:
self._response[R_WARNING] = "Invalid upload path"
return
if not self._is_allowed(cur_dir, "write"):
self._response[R_WARNING] = "Access denied"
return
if chunk.endswith(".part"):
chunk_range = self._request.get(API_RANGE)
if not chunk_range:
self._response[R_WARNING] = "No chunk range"
return
start, clength, total = [int(i) for i in chunk_range.split(",")]
name = ".".join(chunk.split(".")[:-2])
if not self._is_upload_allow(name):
self._set_error_data(name, "Not allowed file type")
elif total > max_size:
self._set_error_data(name, "File exceeds the maximum allowed filesize")
else:
chunk_index, total_chunks = [
int(i) for i in chunk.split(".")[-2].split("_")
]
if not _check_name(name):
self._set_error_data(name, "Invalid name: " + name)
else:
record_path = os.path.join(cur_dir, "." + name + ".txt")
file_path = os.path.join(cur_dir, name + ".parts")
if not os.path.exists(file_path) and os.path.exists(record_path):
os.remove(record_path)
with open(
file_path, "rb+" if os.path.exists(file_path) else "wb+"
) as fil:
fil.seek(start)
data = up_files[0]
written_size = 0
for chunk in self._fbuffer(data.file):
fil.write(chunk)
written_size += len(chunk)
if written_size > clength:
self._set_error_data(name, "Invalid file size")
break
with open(
record_path,
"r+" if os.path.exists(record_path) else "w+",
encoding="utf-8",
) as record_fil:
record_fil.seek(chunk_index)
record_fil.write("X")
record_fil.seek(0)
written = record_fil.read()
if written == ("X" * (total_chunks + 1)):
self._response[R_ADDED] = []
self._response[R_CHUNKMERGED] = name
self._response[R_NAME] = name
else:
self._response[R_ADDED] = []
if R_CHUNKMERGED in self._response:
os.remove(record_path)
else:
name = chunk
file_path = os.path.join(cur_dir, name)
if os.path.exists(file_path + ".parts"):
up_size = os.lstat(file_path + ".parts").st_size
if up_size > max_size:
try:
os.unlink(file_path + ".parts")
self._response[R_WARNING].append(
"File exceeds the maximum allowed filesize"
)
except OSError:
# TODO ? # pylint: disable=fixme
self._response[R_WARNING].append(
"File was only partially uploaded"
)
else:
if self._is_upload_allow(name):
os.rename(file_path + ".parts", file_path)
os.chmod(file_path, self._options["file_mode"])
self._response[R_ADDED] = [self._info(file_path)]
else:
self._response[R_WARNING].append("Not allowed file type")
try:
os.unlink(file_path + ".parts")
except OSError:
pass
def __upload_small_files(self) -> None:
"""Upload small files."""
target = self._request.get(API_TARGET)
if not target:
self._response[R_WARNING] = "Invalid parameters"
return
cur_dir = self._find_dir(target)
if not cur_dir:
self._response[R_WARNING] = "Invalid parameters"
return
up_files = self._request.get(API_UPLOAD)
if not up_files:
self._response[R_WARNING] = "No file to upload"
return
up_size = 0
max_size = self._options["upload_max_size"]
upload_paths = self._request.get(API_UPLOAD_PATH)
if upload_paths:
upload_paths = [self._find_dir(d) for d in upload_paths]
for idx, data in enumerate(up_files):
name = data.filename.encode("utf-8")
if not name:
continue
name = self._check_utf8(name)
name = os.path.basename(name)
if not upload_paths:
target_dir = cur_dir
else:
target_dir = upload_paths[idx]
if not target_dir:
self._response[R_WARNING].append("Invalid upload path")
elif not _check_name(name):
self._response[R_WARNING].append("Invalid name: " + name)
elif not self._is_allowed(target_dir, "write"):
self._response[R_WARNING] = "Access denied"
else:
name = os.path.join(target_dir, name)
replace = os.path.exists(name)
try:
with open(name, "wb", self._options["upload_write_chunk"]) as fil:
for chunk in self._fbuffer(data.file):
fil.write(chunk)
up_size += os.lstat(name).st_size
if up_size > max_size:
try:
os.unlink(name)
self._response[R_WARNING].append(
"File exceeds the maximum allowed filesize"
)
except OSError:
self._response[R_WARNING].append(
"File was only partially uploaded"
)
elif not self._is_upload_allow(name):
self._response[R_WARNING].append("Not allowed file type")
try:
os.unlink(name)
except OSError:
pass
else:
os.chmod(name, self._options["file_mode"])
if replace: # update thumbnail
self._rm_tmb(name)
self._response[R_ADDED].append(self._info(name))
except OSError:
self._response[R_WARNING].append("Unable to save uploaded file")
if up_size > max_size:
try:
os.unlink(name)
self._response[R_WARNING].append(
"File exceeds the maximum allowed filesize"
)
except OSError:
self._response[R_WARNING].append(
"File was only partially uploaded"
)
def __paste(self) -> None:
"""Copy or cut files/directories."""
if API_TARGETS in self._request and API_DST in self._request:
dst = self._find_dir(self._request[API_DST])
cur_dir = dst
if not cur_dir or not dst or API_TARGETS not in self._request:
self._response[R_ERROR] = "Invalid parameters"
return
files = self._request[API_TARGETS]
if not isinstance(files, list):
files = [files]
cut = False
if API_CUT in self._request:
if self._request[API_CUT] == "1":
cut = True
if not self._is_allowed(dst, "write"):
self._response[R_ERROR] = "Access denied"
return
added = []
removed = []
for fhash in files:
fil = self._find(fhash)
if not fil:
self._response[R_ERROR] = "File not found"
return
new_dst = os.path.join(dst, os.path.basename(fil))
if dst.find(fil) == 0:
self._response[R_ERROR] = "Unable to copy into itself"
return
if cut:
if not self._is_allowed(fil, "rm"):
self._response[R_ERROR] = "Move failed"
self._set_error_data(fil, "Access denied")
return
# TODO thumbs # pylint: disable=fixme
if os.path.exists(new_dst):
self._response[
R_ERROR
] = "File or folder with the same name already exists"
self._set_error_data(
fil, "File or folder with the same name already exists"
)
return
try:
os.rename(fil, new_dst)
self._rm_tmb(fil)
added.append(self._info(new_dst))
removed.append(fhash)
continue
except OSError:
self._response[R_ERROR] = "Unable to move files"
self._set_error_data(fil, "Unable to move")
return
else:
if not self._copy(fil, new_dst):
self._response[R_ERROR] = "Unable to copy files"
return
added.append(self._info(new_dst))
continue
self._response[R_ADDED] = added
self._response[R_REMOVED] = removed
else:
self._response[R_ERROR] = "Invalid parameters"
def __duplicate(self) -> None:
"""Create copy of files/directories."""
targets = self._request.get(API_TARGETS)
if not targets:
self._response[R_ERROR] = "Invalid parameters"
return
added = []
for target in targets:
target = self._find(target)
if not target:
self._response[R_ERROR] = "File not found"
return
cur_dir = os.path.dirname(target)
if not self._is_allowed(target, "read") or not self._is_allowed(
cur_dir, "write"
):
self._response[R_ERROR] = "Access denied"
return
new_name = _unique_name(target)
if not self._copy(target, new_name):
self._response[R_ERROR] = "Unable to create file copy"
return
added.append(self._info(new_name))
self._response[R_ADDED] = added
def __resize(self) -> None:
"""Scale image size."""
target = self._request.get(API_TARGET)
width = self._request.get(API_WIDTH)
height = self._request.get(API_HEIGHT)
if not (target and width is not None and height is not None):
self._response[R_ERROR] = "Invalid parameters"
return
width = int(width)
height = int(height)
if width < 1 or height < 1:
self._response[R_ERROR] = "Invalid parameters"
return
cur_file = self._find(target)
if not cur_file:
self._response[R_ERROR] = "File not found"
return
if not self._is_allowed(cur_file, "write"):
self._response[R_ERROR] = "Access denied"
return
if _mimetype(cur_file).find("image") != 0:
self._response[R_ERROR] = "File is not an image"
return
self._debug("resize " + cur_file, str(width) + ":" + str(height))
if not self._init_img_lib():
return
try:
img = self._img.open(cur_file) # type: ignore
img_resized = img.resize(
(width, height), self._img.ANTIALIAS # type: ignore
)
img_resized.save(cur_file)
self._rm_tmb(cur_file)
except OSError as exc: # UnidentifiedImageError requires Pillow 7.0.0
# self._debug('resizeFailed_' + path, str(exc))
self._debug("resizeFailed_" + self._options["root"], str(exc))
self._response[R_ERROR] = "Unable to resize image"
return
self._response[R_CHANGED] = [self._info(cur_file)]
def __thumbnails(self) -> None:
"""Create previews for images."""
thumbs_dir = self._options["tmb_dir"]
targets = self._request.get(API_TARGETS)
if not targets:
return
if not self._init_img_lib() or not self._can_create_tmb():
return
assert thumbs_dir # typing
if self._options["tmb_at_once"] > 0:
tmb_max = self._options["tmb_at_once"]
else:
tmb_max = 5
self._response[R_IMAGES] = {}
i = 0
for fhash in targets:
path = self._find(fhash)
if path is None:
continue
if os.path.dirname(path) == thumbs_dir:
continue
if self._can_create_tmb(path) and self._is_allowed(path, "read"):
tmb = os.path.join(thumbs_dir, fhash + ".png")
if not os.path.exists(tmb):
if self._tmb(path, tmb):
self._response[R_IMAGES].update({fhash: self._path2url(tmb)})
i += 1
if i >= tmb_max:
break
def __size(self) -> None:
if API_TARGETS not in self._request:
self._response[R_ERROR] = "Invalid parameters"
return
targets = self._request[API_TARGETS]
all_total_size = 0
all_file_count = 0
all_dir_count = 0
sizes = [] # type: List[Dict[str, int]]
for target in targets:
path = self._find(target)
if path is None:
self._set_error_data(target, "Target not found")
continue
total_size = 0
file_count = 0
dir_count = 0
if os.path.isdir(path):
for root, dirs, files in os.walk(path, topdown=True):
for folder in dirs:
folder_path = os.path.join(root, folder)
size = self._dir_size(folder_path)
sizes.append({})
dir_count += 1
total_size += size
for fil in files:
file_path = os.path.join(root, fil)
size = os.stat(file_path).st_size
total_size += size
file_count += 1
break
else:
size = os.stat(file_path).st_size
total_size += size
file_count += 1
sizes.append(
{R_DIR_CNT: dir_count, R_FILE_CNT: file_count, R_SIZE: total_size}
)
all_total_size += total_size
all_file_count += file_count
all_dir_count += dir_count
self._response[R_SIZE] = all_total_size
self._response[R_FILE_CNT] = all_file_count
self._response[R_DIR_CNT] = all_dir_count
self._response[R_SIZES] = sizes
def __ls(self) -> None:
target = self._request.get(API_TARGET)
if not target:
self._response[R_ERROR] = "Invalid parameters"
return
intersect = self._request.get(API_INTERSECT)
path = self._find(target)
if path is None or not os.path.isdir(path):
self._response[R_ERROR] = "Target directory not found"
return
if os.path.islink(path):
path = self._read_link(path)
if path is None:
self._response[R_ERROR] = "Directory (link) not found"
return
if not self._is_allowed(path, "read"):
self._response[R_ERROR] = "Access denied"
return
try:
file_names = os.listdir(path)
except PermissionError:
self._response[R_ERROR] = "Access denied"
return
items = {}
for fname in file_names:
fhash = self._hash(os.path.join(path, fname))
if intersect:
if fhash in intersect:
items[fhash] = fname
else:
items[fhash] = fname
self._response[R_LIST] = items
def __tree(self) -> None:
"""Return directory tree starting from path."""
target = self._request.get(API_TARGET)
if not target:
self._response[R_ERROR] = "Invalid parameters"
return
path = self._find_dir(target)
if path is None or not os.path.isdir(path):
self._response[R_ERROR] = "Directory not found"
return
if os.path.islink(path):
path = self._read_link(path)
if path is None:
self._response[R_ERROR] = "Directory (link) not found"
return
if not self._is_allowed(path, "read"):
self._response[R_ERROR] = "Access denied"
return
try:
directories = os.listdir(path)
except PermissionError:
self._response[R_ERROR] = "Access denied"
return
tree = []
for directory in sorted(directories):
dir_path = os.path.join(path, directory)
if (
os.path.isdir(dir_path)
and not os.path.islink(dir_path)
and self._is_accepted(directory)
):
tree.append(self._info(dir_path))
self._response[R_TREE] = tree
def __get(self) -> None:
target = self._request.get(API_TARGET)
if not target:
self._response[R_ERROR] = "Invalid parameters"
return
cur_file = self._find(target)
if not cur_file:
self._response[R_ERROR] = "File not found"
return
if not self._is_allowed(cur_file, "read"):
self._response[R_ERROR] = "Access denied"
return
try:
with open(cur_file, "r", encoding="utf-8") as text_fil:
self._response[API_CONTENT] = text_fil.read()
except UnicodeDecodeError:
with open(cur_file, "rb") as bin_fil:
self._response[API_CONTENT] = base64.b64encode(bin_fil.read()).decode(
"ascii"
)
def __dim(self) -> None:
target = self._request.get(API_TARGET)
if not target:
self._response[R_ERROR] = "Invalid parameters"
return
cur_file = self._find(target)
if not cur_file:
self._response[R_ERROR] = "File not found"
return
if not self._is_allowed(cur_file, "read"):
self._response[R_ERROR] = "Access denied"
return
dim = self._get_img_size(cur_file)
if dim:
self._response[R_DIM] = str(dim)
else:
# FIXME This should be an error in the response instead.
self._response[R_DIM] = None
def __put(self) -> None:
"""Save content in file."""
target = self._request.get(API_TARGET)
content = self._request.get(API_CONTENT)
if not target or not content:
self._response[R_ERROR] = "Invalid parameters"
return
cur_file = self._find(target)
if not cur_file:
self._response[R_ERROR] = "File not found"
return
if not self._is_allowed(cur_file, "write"):
self._response[R_ERROR] = "Access denied"
return
try:
if (
self._request[API_CONTENT].startswith("data:")
and ";base64," in self._request[API_CONTENT][:100]
):
img_data = self._request[API_CONTENT].split(";base64,")[1]
img_data = base64.b64decode(img_data)
with open(cur_file, "wb") as bin_fil:
bin_fil.write(img_data)
else:
with open(cur_file, "w+", encoding="utf-8") as text_fil:
text_fil.write(self._request[API_CONTENT])
self._rm_tmb(cur_file)
self._response[R_CHANGED] = [self._info(cur_file)]
except OSError:
self._response[R_ERROR] = "Unable to write to file"
def __archive(self) -> None:
"""Compress files/directories to archive."""
# TODO: We don't support "name" field yet.
# "name" is a parameter according to api 2.1.
archive_type = self._request.get(API_TYPE)
target = self._request.get(API_TARGET)
files = self._request.get(API_TARGETS)
if not archive_type or not target or not files:
self._response[R_ERROR] = "Invalid parameters"
return
cur_dir = self._find_dir(target)
if not cur_dir:
self._response[R_ERROR] = "File not found"
return
if not self._is_allowed(cur_dir, "write"):
self._response[R_ERROR] = "Access denied"
return
if (
archive_type not in self._options["archivers"]["create"]
or archive_type not in self._options["archive_mimes"]
):
self._response[R_ERROR] = "Unable to create archive"
return
real_files = []
for fhash in files:
cur_file = self._find(fhash, cur_dir)
if not cur_file:
self._response[R_ERROR] = "File not found"
return
real_files.append(os.path.basename(cur_file))
arc = self._options["archivers"]["create"][archive_type]
if len(real_files) > 1:
archive_name = "Archive"
else:
archive_name = real_files[0]
archive_name += "." + arc[ARCHIVE_EXT]
archive_name = _unique_name(archive_name, "")
archive_path = os.path.join(cur_dir, archive_name)
cmd = [arc[ARCHIVE_CMD]]
for arg in arc[ARCHIVE_ARGC].split():
cmd.append(arg)
cmd.append(archive_name)
for fil in real_files:
cmd.append(fil)
cur_cwd = os.getcwd()
os.chdir(cur_dir)
ret = _run_sub_process(cmd)
os.chdir(cur_cwd)
if not ret:
self._response[R_ERROR] = "Unable to create archive"
return
self._response[R_ADDED] = [self._info(archive_path)]
def __extract(self) -> None:
"""Extract archive."""
target = self._request.get(API_TARGET)
if not target:
self._response[R_ERROR] = "Invalid parameters"
return
makedir = self._request.get(API_MAKEDIR)
cur_file = self._find(target)
if cur_file is None or os.path.isdir(cur_file):
self._response[R_ERROR] = "File not found"
return
cur_dir = os.path.dirname(cur_file)
if not self._is_allowed(cur_dir, "write"):
self._response[R_ERROR] = "Access denied"
return
mime = _mimetype(cur_file)
self._check_archivers()
if mime not in self._options["archivers"]["extract"]:
self._response[R_ERROR] = "Unable to extract files from archive"
return
arc = self._options["archivers"]["extract"][mime]
cmd = [arc[ARCHIVE_CMD]]
for arg in arc[ARCHIVE_ARGC].split():
cmd.append(arg)
cmd.append(os.path.basename(cur_file))
target_dir = cur_dir
added = None
if makedir and makedir != "0":
base_name = os.path.splitext(os.path.basename(cur_file))[0] or "New Folder"
target_dir = os.path.join(target_dir, base_name)
target_dir = _unique_name(target_dir, copy="")
try:
os.mkdir(target_dir, int(self._options["dir_mode"]))
except OSError:
self._response[R_ERROR] = "Unable to create folder: " + base_name
return
cmd += shlex.split(arc["argd"].format(shlex.quote(target_dir)))
added = [self._info(target_dir)]
if added is None:
try:
existing_files = os.listdir(cur_dir)
except PermissionError:
# FIXME: This will likely never happen.
# The find helper will already have failed
# to find the file without parent dir read access.
self._response[R_ERROR] = "Access denied"
return
cur_cwd = os.getcwd()
os.chdir(cur_dir)
ret = _run_sub_process(cmd)
os.chdir(cur_cwd)
if not ret:
self._response[R_ERROR] = "Unable to extract files from archive"
return
if added is None:
added = [
self._info(os.path.join(cur_dir, dname))
for dname in os.listdir(cur_dir)
if dname not in existing_files
]
self._response[R_ADDED] = added
def __ping(self) -> None:
"""Workaround for Safari."""
self._http_status_code = 200
self._http_header["Connection"] = "close"
def __search(self) -> None:
if API_Q not in self._request:
self._response[R_ERROR] = "Invalid parameters"
return
if API_TARGET in self._request:
target = self._request[API_TARGET]
if not target:
self._response[R_ERROR] = "Invalid parameters"
return
search_path = self._find_dir(target)
else:
search_path = self._options["root"]
if not search_path:
self._response[R_ERROR] = "File not found"
return
mimes = self._request.get(API_MIMES)
result = []
query = self._request[API_Q]
for root, dirs, files in os.walk(search_path):
for fil in files:
if query.lower() in fil.lower():
file_path = os.path.join(root, fil)
if mimes is None:
result.append(self._info(file_path))
else:
if _mimetype(file_path) in mimes:
result.append(self._info(file_path))
if mimes is None:
for folder in dirs:
file_path = os.path.join(root, folder)
if query.lower() in folder.lower():
result.append(self._info(file_path))
self._response[R_FILES] = result
def _cwd(self, path: str) -> None:
"""Get Current Working Directory."""
name = os.path.basename(path)
if path == self._options["root"]:
name = self._options["root_alias"]
root = True
else:
root = False
if self._options["root_alias"]:
basename = self._options["root_alias"]
else:
basename = os.path.basename(self._options["root"])
rel = os.path.join(basename, path[len(self._options["root"]) :])
info = {
"hash": self._hash(path),
"name": self._check_utf8(name),
"mime": "directory",
"rel": self._check_utf8(rel),
"size": 0,
"date": datetime.fromtimestamp(os.stat(path).st_mtime).strftime(
"%d %b %Y %H:%M"
),
"read": 1,
"write": 1 if self._is_allowed(path, "write") else 0,
"locked": 0,
"rm": not root and self._is_allowed(path, "rm"),
"volumeid": self.volumeid,
}
try:
info["dirs"] = 1 if any(next(os.walk(path))[1]) else 0
except StopIteration:
info["dirs"] = 0
self._response[R_CWD] = info
def _info(self, path: str) -> Info:
# mime = ''
filetype = "file"
if os.path.isfile(path):
filetype = "file"
elif os.path.isdir(path):
filetype = "dir"
elif os.path.islink(path):
filetype = "link"
stat = os.lstat(path)
readable = self._is_allowed(path, "read")
writable = self._is_allowed(path, "write")
deletable = self._is_allowed(path, "rm")
info = {
"name": self._check_utf8(os.path.basename(path)),
"hash": self._hash(path),
"mime": "directory" if filetype == "dir" else _mimetype(path),
"read": 1 if readable else 0,
"write": 1 if writable else 0,
"locked": 1 if not readable and not writable and not deletable else 0,
"ts": stat.st_mtime,
} # type: Info
if self._options["expose_real_path"]:
info["path"] = os.path.abspath(path)
if filetype == "dir":
info["volumeid"] = self.volumeid
try:
info["dirs"] = 1 if any(next(os.walk(path))[1]) else 0
except StopIteration:
info["dirs"] = 0
if path != self._options["root"]:
info["phash"] = self._hash(os.path.dirname(path))
if filetype == "link":
lpath = self._read_link(path)
if not lpath:
info["mime"] = "symlink-broken"
return info
if os.path.isdir(lpath):
info["mime"] = "directory"
else:
info["mime"] = _mimetype(lpath)
if self._options["root_alias"]:
basename = self._options["root_alias"]
else:
basename = os.path.basename(self._options["root"])
info["link"] = self._hash(lpath)
info["alias"] = os.path.join(basename, lpath[len(self._options["root"]) :])
info["read"] = 1 if info["read"] and self._is_allowed(lpath, "read") else 0
info["write"] = (
1 if info["write"] and self._is_allowed(lpath, "write") else 0
)
info["locked"] = (
1
if (
not info["write"]
and not info["read"]
and not self._is_allowed(lpath, "rm")
)
else 0
)
info["size"] = 0
else:
lpath = None
info["size"] = self._dir_size(path) if filetype == "dir" else stat.st_size
if info["mime"] != "directory":
if self._options["file_url"] and info["read"]:
if lpath:
info["url"] = self._path2url(lpath)
else:
info["url"] = self._path2url(path)
if info["mime"][0:5] == "image":
thumbs_dir = self._options["tmb_dir"]
if self._can_create_tmb():
assert thumbs_dir # typing
dim = self._get_img_size(path)
if dim:
info["dim"] = dim
# if we are in tmb dir, files are thumbs itself
if os.path.dirname(path) == thumbs_dir:
info["tmb"] = self._path2url(path)
return info
tmb = os.path.join(thumbs_dir, info["hash"] + ".png")
if os.path.exists(tmb):
tmb_url = self._path2url(tmb)
info["tmb"] = tmb_url
else:
if info["mime"].startswith("image/"):
info["tmb"] = "1"
if info["mime"] == "application/x-empty" or info["mime"] == "inode/x-empty":
info["mime"] = "text/plain"
return info
def _remove(self, target: str) -> bool:
"""Provide internal remove procedure."""
if not self._is_allowed(target, "rm"):
self._set_error_data(target, "Access denied")
if not os.path.isdir(target):
try:
os.unlink(target)
self._rm_tmb(target)
return True
except OSError:
self._set_error_data(target, "Remove failed")
return False
else:
try:
targets = os.listdir(target)
except PermissionError:
self._set_error_data(target, "Access denied")
return False
for fil in targets:
if self._is_accepted(fil):
self._remove(os.path.join(target, fil))
try:
os.rmdir(target)
return True
except OSError:
self._set_error_data(target, "Remove failed")
return False
def _copy(self, src: str, dst: str) -> bool:
"""Provide internal copy procedure."""
dst_dir = os.path.dirname(dst)
if not (self._is_allowed(src, "read") and self._is_allowed(dst_dir, "write")):
self._set_error_data(src, "Access denied")
return False
if os.path.exists(dst):
self._set_error_data(
dst, "File or folder with the same name already exists"
)
return False
if not os.path.isdir(src):
try:
shutil.copyfile(src, dst)
shutil.copymode(src, dst)
return True
except (shutil.SameFileError, OSError):
self._set_error_data(src, "Unable to copy files")
return False
else:
try:
os.mkdir(dst, int(self._options["dir_mode"]))
shutil.copymode(src, dst)
except (shutil.SameFileError, OSError):
self._set_error_data(src, "Unable to copy files")
return False
try:
srcs = os.listdir(src)
except PermissionError:
self._set_error_data(src, "Access denied")
return False
for i in srcs:
new_src = os.path.join(src, i)
new_dst = os.path.join(dst, i)
if not self._copy(new_src, new_dst):
self._set_error_data(new_src, "Unable to copy files")
return False
return True
def _find_dir(self, fhash: str, path: Optional[str] = None) -> Optional[str]:
"""Find directory by hash."""
fhash = str(fhash)
# try to get find it in the cache
cached_path = self._cached_path.get(fhash)
if cached_path:
return cached_path
if not path:
path = self._options["root"]
if fhash == self._hash(path):
return path
if not os.path.isdir(path):
return None
for root, dirs, _ in os.walk(path, topdown=True):
for folder in dirs:
folder_path = os.path.join(root, folder)
if not os.path.islink(folder_path) and fhash == self._hash(folder_path):
return folder_path
return None
def _find(self, fhash: str, parent: Optional[str] = None) -> Optional[str]:
"""Find file/dir by hash."""
fhash = str(fhash)
cached_path = self._cached_path.get(fhash)
if cached_path:
return cached_path
if not parent:
parent = self._options["root"]
if os.path.isdir(parent):
for root, dirs, files in os.walk(parent, topdown=True):
for folder in dirs:
folder_path = os.path.join(root, folder)
if fhash == self._hash(folder_path):
return folder_path
for fil in files:
file_path = os.path.join(root, fil)
if fhash == self._hash(file_path):
return file_path
return None
def _tmb(self, path: str, tmb_path: str) -> bool:
"""Provide internal thumbnail create procedure."""
try:
img = self._img.open(path).copy() # type: ignore
size = self._options["tmb_size"], self._options["tmb_size"]
box = _crop_tuple(img.size)
if box:
img = img.crop(box)
img.thumbnail(size, self._img.ANTIALIAS) # type: ignore
img.save(tmb_path, "PNG")
# UnidentifiedImageError requires Pillow 7.0.0
except (OSError, ValueError) as exc:
self._debug("tmbFailed_" + path, str(exc))
return False
return True
def _rm_tmb(self, path: str) -> None:
tmb = self._tmb_path(path)
if tmb:
if os.path.exists(tmb):
try:
os.unlink(tmb)
except OSError:
pass
def _read_link(self, path: str) -> Optional[str]:
"""Read link and return real path if not broken."""
target = os.readlink(path)
if not target[0] == "/":
target = os.path.join(os.path.dirname(path), target)
target = os.path.normpath(target)
if os.path.exists(target):
if not target.find(self._options["root"]) == -1:
return target
return None
def _dir_size(self, path: str) -> int:
total_size = 0
if self._options["dir_size"]:
for dirpath, _, filenames in os.walk(path):
for fil in filenames:
file_path = os.path.join(dirpath, fil)
if os.path.exists(file_path):
total_size += os.stat(file_path).st_size
else:
total_size = os.lstat(path).st_size
return total_size
def _fbuffer(
self, fil: BinaryIO, chunk_size: int = _options["upload_write_chunk"]
) -> Generator[bytes, None, None]:
while True:
chunk = fil.read(chunk_size)
if not chunk:
break
yield chunk
def _can_create_tmb(self, path: Optional[str] = None) -> bool:
if self._options["img_lib"] and self._options["tmb_dir"]:
if path is not None:
mime = _mimetype(path)
if mime[0:5] != "image":
return False
return True
return False
def _tmb_path(self, path: str) -> Optional[str]:
tmb = None
thumbs_dir = self._options["tmb_dir"]
if thumbs_dir:
if not os.path.dirname(path) == thumbs_dir:
tmb = os.path.join(thumbs_dir, self._hash(path) + ".png")
return tmb
def _is_upload_allow(self, name: str) -> bool:
allow = False
deny = False
mime = _mimetype(name)
if "all" in self._options["upload_allow"]:
allow = True
else:
for opt in self._options["upload_allow"]:
if mime.find(opt) == 0:
allow = True
if "all" in self._options["upload_deny"]:
deny = True
else:
for opt in self._options["upload_deny"]:
if mime.find(opt) == 0:
deny = True
if self._options["upload_order"][0] == "allow": # ,deny
if deny is True:
return False
return bool(allow)
# deny,allow
if allow is True:
return True
if deny is True:
return False
return True
def _is_accepted(self, target: str) -> bool:
if target in (".", ".."):
return False
if target[0:1] == "." and not self._options["dot_files"]:
return False
return True
def _is_allowed(self, path: str, access: str) -> bool:
if not os.path.exists(path):
return False
if access == "read":
if not os.access(path, os.R_OK):
self._set_error_data(path, access)
return False
elif access == "write":
if not os.access(path, os.W_OK):
self._set_error_data(path, access)
return False
elif access == "rm":
if not os.access(os.path.dirname(path), os.W_OK):
self._set_error_data(path, access)
return False
else:
return False
path = path[len(os.path.normpath(self._options["root"])) :]
for ppath, permissions in self._options["perms"].items():
regex = r"" + ppath
if re.search(regex, path) and access in permissions:
return permissions[access]
return self._options["defaults"][access]
def _hash(self, path: str) -> str:
"""Hash of the path."""
hash_code = make_hash(path)
# TODO: what if the cache getting to big? # pylint: disable=fixme
self._cached_path[hash_code] = path
return hash_code
def _path2url(self, path: str) -> str:
cur_dir = path
length = len(self._options["root"])
url = multi_urljoin(
self._options["base_url"],
self._options["files_url"],
cur_dir[length:],
)
url = self._check_utf8(url).replace(os.sep, "/")
url = quote(url, safe="/")
return url
def _set_error_data(self, path: str, msg: str) -> None:
"""Collect error/warning messages."""
self._error_data[path] = msg
def _init_img_lib(self) -> Optional[str]:
if not self._options["img_lib"] or self._options["img_lib"] == "auto":
self._options["img_lib"] = "PIL"
if self._options["img_lib"] == "PIL":
try:
from PIL import Image # pylint: disable=import-outside-toplevel
self._img = Image
except ImportError:
self._img = None
self._options["img_lib"] = None
else:
raise NotImplementedError
self._debug("img_lib", self._options["img_lib"])
return self._options["img_lib"]
def _get_img_size(self, path: str) -> Optional[str]:
if not self._init_img_lib():
return None
if self._can_create_tmb():
try:
img = self._img.open(path) # type: ignore
return str(img.size[0]) + "x" + str(img.size[1])
except OSError: # UnidentifiedImageError requires Pillow 7.0.0
print("WARNING: unidentified image or file not found: " + path)
return None
def _debug(self, key: str, val: Any) -> None:
if self._options["debug"]:
self._response[R_DEBUG].update({key: val})
def _check_archivers(self) -> None:
# import subprocess
# proc = subprocess.Popen(['tar', '--version'], shell = False,
# stdout = subprocess.PIPE, stderr=subprocess.PIPE)
# out, err = proc.communicate()
# print 'out:', out, '\nerr:', err, '\n'
archive = {"create": {}, "extract": {}} # type: Archivers
if (
"archive" in self._options["disabled"]
and "extract" in self._options["disabled"]
):
self._options["archive_mimes"] = []
self._options["archivers"] = archive
return
tar = _run_sub_process(["tar", "--version"])
gzip = _run_sub_process(["gzip", "--version"])
bzip2 = _run_sub_process(["bzip2", "--version"])
zipc = _run_sub_process(["zip", "--version"])
unzip = _run_sub_process(["unzip", "--help"])
rar = _run_sub_process(["rar", "--version"], valid_return=[0, 7])
unrar = _run_sub_process(["unrar"], valid_return=[0, 7])
p7z = _run_sub_process(["7z", "--help"])
p7za = _run_sub_process(["7za", "--help"])
p7zr = _run_sub_process(["7zr", "--help"])
# tar = False
# tar = gzip = bzip2 = zipc = unzip = rar = unrar = False
# print tar, gzip, bzip2, zipc, unzip, rar, unrar, p7z, p7za, p7zr
create = archive["create"]
extract = archive["extract"]
if tar:
mime = "application/x-tar"
create.update(
{mime: {ARCHIVE_CMD: "tar", ARCHIVE_ARGC: "-cf", ARCHIVE_EXT: "tar"}}
)
extract.update(
{
mime: {
ARCHIVE_CMD: "tar",
ARCHIVE_ARGC: "-xf",
ARCHIVE_EXT: "tar",
"argd": "-C {}",
}
}
)
if tar and gzip:
mime = "application/x-gzip"
create.update(
{
mime: {
ARCHIVE_CMD: "tar",
ARCHIVE_ARGC: "-czf",
ARCHIVE_EXT: "tar.gz",
}
}
)
extract.update(
{
mime: {
ARCHIVE_CMD: "tar",
ARCHIVE_ARGC: "-xzf",
ARCHIVE_EXT: "tar.gz",
"argd": "-C {}",
}
}
)
if tar and bzip2:
mime = "application/x-bzip2"
create.update(
{
mime: {
ARCHIVE_CMD: "tar",
ARCHIVE_ARGC: "-cjf",
ARCHIVE_EXT: "tar.bz2",
}
}
)
extract.update(
{
mime: {
ARCHIVE_CMD: "tar",
ARCHIVE_ARGC: "-xjf",
ARCHIVE_EXT: "tar.bz2",
"argd": "-C {}",
}
}
)
mime = "application/zip"
if zipc:
create.update(
{mime: {ARCHIVE_CMD: "zip", ARCHIVE_ARGC: "-r9", ARCHIVE_EXT: "zip"}}
)
if unzip:
extract.update(
{
mime: {
ARCHIVE_CMD: "unzip",
ARCHIVE_ARGC: "",
ARCHIVE_EXT: "zip",
"argd": "-d {}",
}
}
)
mime = "application/x-rar"
if rar:
create.update(
{
mime: {
ARCHIVE_CMD: "rar",
ARCHIVE_ARGC: "a -inul",
ARCHIVE_EXT: "rar",
}
}
)
extract.update(
{
mime: {
ARCHIVE_CMD: "rar",
ARCHIVE_ARGC: "x -y",
ARCHIVE_EXT: "rar",
"argd": "{}",
}
}
)
elif unrar:
extract.update(
{
mime: {
ARCHIVE_CMD: "unrar",
ARCHIVE_ARGC: "x -y",
ARCHIVE_EXT: "rar",
"argd": "{}",
}
}
)
p7zip = None
if p7z:
p7zip = "7z"
elif p7za:
p7zip = "7za"
elif p7zr:
p7zip = "7zr"
if p7zip:
mime = "application/x-7z-compressed"
create.update(
{mime: {ARCHIVE_CMD: p7zip, ARCHIVE_ARGC: "a -t7z", ARCHIVE_EXT: "7z"}}
)
extract.update(
{
mime: {
ARCHIVE_CMD: p7zip,
ARCHIVE_ARGC: "extract -y",
ARCHIVE_EXT: "7z",
"argd": "-o{}",
}
}
)
mime = "application/x-tar"
if mime not in create:
create.update(
{
mime: {
ARCHIVE_CMD: p7zip,
ARCHIVE_ARGC: "a -ttar",
ARCHIVE_EXT: "tar",
}
}
)
if mime not in extract:
extract.update(
{
mime: {
ARCHIVE_CMD: p7zip,
ARCHIVE_ARGC: "extract -y",
ARCHIVE_EXT: "tar",
"argd": "-o{}",
}
}
)
mime = "application/x-gzip"
if mime not in create:
create.update(
{
mime: {
ARCHIVE_CMD: p7zip,
ARCHIVE_ARGC: "a -tgzip",
ARCHIVE_EXT: "gz",
}
}
)
if mime not in extract:
extract.update(
{
mime: {
ARCHIVE_CMD: p7zip,
ARCHIVE_ARGC: "extract -y",
ARCHIVE_EXT: "tar.gz",
"argd": "-o{}",
}
}
)
mime = "application/x-bzip2"
if mime not in create:
create.update(
{
mime: {
ARCHIVE_CMD: p7zip,
ARCHIVE_ARGC: "a -tbzip2",
ARCHIVE_EXT: "bz2",
}
}
)
if mime not in extract:
extract.update(
{
mime: {
ARCHIVE_CMD: p7zip,
ARCHIVE_ARGC: "extract -y",
ARCHIVE_EXT: "tar.bz2",
"argd": "-o{}",
}
}
)
mime = "application/zip"
if mime not in create:
create.update(
{
mime: {
ARCHIVE_CMD: p7zip,
ARCHIVE_ARGC: "a -tzip",
ARCHIVE_EXT: "zip",
}
}
)
if mime not in extract:
extract.update(
{
mime: {
ARCHIVE_CMD: p7zip,
ARCHIVE_ARGC: "extract -y",
ARCHIVE_EXT: "zip",
"argd": "-o{}",
}
}
)
if not self._options["archive_mimes"]:
self._options["archive_mimes"] = list(create.keys())
else:
pass
self._options["archivers"] = archive
def _check_utf8(self, name: Union[str, bytes]) -> str:
if isinstance(name, str):
return name
try:
str_name = name.decode("utf-8")
except UnicodeDecodeError:
str_name = str(name, "utf-8", "replace")
self._debug("invalid encoding", str_name)
return str_name
def _check_name(filename: str) -> bool:
"""Check for valid file name."""
if sanitize_filename(filename) != filename:
return False
return True
def _check_dir(filepath: str) -> bool:
"""Check for valid dir name."""
if sanitize_filepath(filepath) != filepath:
return False
return True
def _mimetype(path: str) -> str:
"""Detect mimetype of file."""
mime = mimetypes.guess_type(path)[0] or "unknown"
_, ext = os.path.splitext(path)
if mime == "unknown" and ext in mimetypes.types_map:
mime = mimetypes.types_map[ext]
if mime == "text/plain" and ext == ".pl":
mime = MIME_TYPES[ext]
if mime == "application/vnd.ms-office" and ext == ".doc":
mime = MIME_TYPES[ext]
if mime == "unknown":
if os.path.basename(path) in ["README", "ChangeLog", "LICENSE", "Makefile"]:
mime = "text/plain"
else:
mime = MIME_TYPES.get(ext, mime)
return mime
def _unique_name(path: str, copy: str = " copy") -> str:
"""Generate unique name for file copied file."""
cur_dir = os.path.dirname(path)
cur_name = os.path.basename(path)
last_dot = cur_name.rfind(".")
ext = new_name = ""
if not os.path.isdir(path) and re.search(r"\..{3}\.(gz|bz|bz2)$", cur_name):
pos = -7
if cur_name[-1:] == "2":
pos -= 1
ext = cur_name[pos:]
old_name = cur_name[0:pos]
new_name = old_name + copy
elif os.path.isdir(path) or last_dot <= 0:
old_name = cur_name
new_name = old_name + copy
else:
ext = cur_name[last_dot:]
old_name = cur_name[0:last_dot]
new_name = old_name + copy
pos = 0
if old_name[-len(copy) :] == copy:
new_name = old_name
elif re.search(r"" + copy + r"\s\d+$", old_name):
pos = old_name.rfind(copy) + len(copy)
new_name = old_name[0:pos]
else:
new_path = os.path.join(cur_dir, new_name + ext)
if not os.path.exists(new_path):
return new_path
# if we are here then copy already exists or making copy of copy
# we will make new indexed copy *black magic*
idx = 1
if pos > 0:
idx = int(old_name[pos:])
while True:
idx += 1
new_name_ext = new_name + " " + str(idx) + ext
new_path = os.path.join(cur_dir, new_name_ext)
if not os.path.exists(new_path):
return new_path
# if idx >= 1000: break # possible loop
def _run_sub_process(cmd: List[str], valid_return: Optional[List[int]] = None) -> bool:
if valid_return is None:
valid_return = [0]
try:
completed = subprocess.run(
cmd, input=b"", check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
except (subprocess.SubprocessError, OSError):
return False
if completed.returncode not in valid_return:
print(str(completed.stderr))
return False
return True
def _crop_tuple(size: Tuple[int, int]) -> Optional[Tuple[int, int, int, int]]:
"""Return the crop rectangle, as a (left, upper, right, lower)-tuple."""
width, height = size
if width > height: # landscape
left = int((width - height) / 2)
upper = 0
right = left + height
lower = height
return (left, upper, right, lower)
if height > width: # portrait
left = 0
upper = int((height - width) / 2)
right = width
lower = upper + width
return (left, upper, right, lower)
# cube
return None
def make_hash(to_hash: str) -> str:
"""Return a hash of to_hash."""
hash_obj = hashlib.md5()
hash_obj.update(to_hash.encode("utf-8"))
hash_code = str(hash_obj.hexdigest())
return hash_code
def multi_urljoin(*parts: str) -> str:
"""Join multiple url parts into a valid url."""
if parts[0].startswith("http"):
return str(
urljoin(
parts[0],
"/".join(part.strip("/") for part in parts[1:]),
)
)
return "/" + "/".join(part.strip("/") for part in parts if part)
| 33.898087 | 88 | 0.504473 |
import base64
import hashlib
import mimetypes
import os
import re
import shlex
import shutil
import subprocess
import time
import traceback
import uuid
from datetime import datetime
from types import ModuleType
from typing import Any, BinaryIO, Dict, Generator, List, Optional, Tuple, Union
from urllib.parse import quote, urljoin
from pathvalidate import sanitize_filename, sanitize_filepath
from typing_extensions import Literal, TypedDict
from .api_const import (
API_CMD,
API_CONTENT,
API_CURRENT,
API_CUT,
API_CHUNK,
API_CID,
API_DIRS,
API_DOWNLOAD,
API_DST,
API_HEIGHT,
API_INIT,
API_INTERSECT,
API_MAKEDIR,
API_MIMES,
API_NAME,
API_Q,
API_SRC,
API_TARGET,
API_TARGETS,
API_TREE,
API_TYPE,
API_UPLOAD,
API_UPLOAD_PATH,
API_WIDTH,
API_RANGE,
ARCHIVE_ARGC,
ARCHIVE_CMD,
ARCHIVE_EXT,
R_ADDED,
R_API,
R_CHANGED,
R_CHUNKMERGED,
R_CWD,
R_DEBUG,
R_DIM,
R_DIR_CNT,
R_ERROR,
R_FILE_CNT,
R_FILES,
R_HASHES,
R_IMAGES,
R_LIST,
R_NAME,
R_NETDRIVERS,
R_OPTIONS,
R_OPTIONS_ARCHIVERS,
R_OPTIONS_COPY_OVERWRITE,
R_OPTIONS_CREATE,
R_OPTIONS_CREATE_EXT,
R_OPTIONS_DISABLED,
R_OPTIONS_DISP_INLINE_REGEX,
R_OPTIONS_EXTRACT,
R_OPTIONS_I18N_FOLDER_NAME,
R_OPTIONS_JPG_QUALITY,
R_OPTIONS_MIME_ALLOW,
R_OPTIONS_MIME_DENY,
R_OPTIONS_MIME_FIRST_ORDER,
R_OPTIONS_PATH,
R_OPTIONS_SEPARATOR,
R_OPTIONS_SYNC_CHK_AS_TS,
R_OPTIONS_SYNC_MIN_MS,
R_OPTIONS_TMB_URL,
R_OPTIONS_UI_CMD_MAP,
R_OPTIONS_UPLOAD_MAX_CONN,
R_OPTIONS_UPLOAD_MAX_SIZE,
R_OPTIONS_UPLOAD_MIME,
R_OPTIONS_UPLOAD_OVERWRITE,
R_OPTIONS_URL,
R_REMOVED,
R_SIZE,
R_SIZES,
R_TREE,
R_UPLMAXFILE,
R_UPLMAXSIZE,
R_WARNING,
)
COMMANDS = {
"archive": "__archive",
"chmod": "__chmod",
"dim": "__dim",
"duplicate": "__duplicate",
"extract": "__extract",
"file": "__file",
"get": "__get",
"info": "__places",
"ls": "__ls",
"mkdir": "__mkdir",
"mkfile": "__mkfile",
"netmount": "__netmount",
"open": "__open",
"parents": "__parents",
"paste": "__paste",
"ping": "__ping",
"put": "__put",
"reload": "__reload", "rename": "__rename",
"resize": "__resize",
"rm": "__rm",
"search": "__search",
"size": "__size",
"tmb": "__thumbnails",
"tree": "__tree",
"upload": "__upload",
"zipdl": "__zipdl",
}
MIME_TYPES = {
".cfg": "text/plain",
".conf": "text/plain",
".css": "text/css",
".htm": "text/html",
".html": "text/html",
".ini": "text/plain",
".java": "text/x-java-source",
".js": "text/javascript",
".md": "text/markdown",
".php": "text/x-php",
".pl": "text/x-perl",
".py": "text/x-python",
".rb": "text/x-ruby",
".rtf": "text/rtf",
".rtfd": "text/rtfd",
".sh": "text/x-shellscript",
".sql": "text/x-sql",
".txt": "text/plain",
".7z": "application/x-7z-compressed",
".doc": "application/msword",
".ogg": "application/ogg",
".mkv": "video/x-matroska",
".ogm": "application/ogm",
}
Archivers = TypedDict( "Archivers",
{"create": Dict[str, Dict[str, str]], "extract": Dict[str, Dict[str, str]]},
)
Info = TypedDict( "Info",
{
"alias": str,
"dim": str,
"dirs": int,
"hash": str,
"link": str,
"locked": int,
"mime": str,
"name": str,
"path": str,
"phash": str,
"read": int,
"size": int,
"tmb": str,
"ts": float,
"url": str,
"volumeid": str,
"write": int,
},
total=False,
)
Options = TypedDict( "Options",
{
"archive_mimes": List[str],
"archivers": Archivers,
"base_url": str,
"debug": bool,
"defaults": Dict[str, bool],
"dir_mode": Literal[493],
"dir_size": bool,
"disabled": List[str],
"dot_files": bool,
"expose_real_path": bool,
"file_mode": Literal[420],
"file_url": bool,
"files_url": str,
"img_lib": Optional[str],
"max_folder_depth": int,
"perms": Dict[str, Dict[str, bool]],
"root_alias": str,
"root": str,
"tmb_at_once": int,
"tmb_dir": Optional[str],
"tmb_size": int,
"upload_allow": List[str],
"upload_deny": List[str],
"upload_max_conn": int,
"upload_max_size": int,
"upload_order": List[Literal["deny", "allow"]],
"upload_write_chunk": int,
},
)
def exception_to_string(excp: Exception) -> str:
stack = traceback.extract_stack()[:-3] + traceback.extract_tb(
excp.__traceback__
) pretty = traceback.format_list(stack)
return "".join(pretty) + f"\n {excp.__class__} {excp}"
class Connector:
_options = {
"archive_mimes": [],
"archivers": {"create": {}, "extract": {}},
"base_url": "",
"debug": False,
"defaults": {"read": True, "write": True, "rm": True},
"dir_mode": 0o755,
"dir_size": False,
"disabled": ["netmount", "zipdl"],
"dot_files": False,
"expose_real_path": False,
"file_mode": 0o644,
"file_url": True,
"files_url": "",
"img_lib": "auto",
"max_folder_depth": 256,
"perms": {},
"root_alias": "HOME",
"root": "",
"tmb_at_once": 5,
"tmb_dir": ".tmb",
"tmb_size": 48,
"upload_allow": [],
"upload_deny": [],
"upload_max_conn": -1,
"upload_max_size": 256 * 1024 * 1024,
"upload_order": ["deny", "allow"],
"upload_write_chunk": 8192,
}
_cached_path = {}
http_allowed_parameters = (
API_CHUNK,
API_CID,
API_CMD,
API_CONTENT,
API_CURRENT,
API_CUT,
API_DIRS,
API_DOWNLOAD,
API_DST,
API_HEIGHT,
API_INIT,
API_MAKEDIR,
API_NAME,
API_Q,
API_RANGE,
API_SRC,
API_TARGET,
API_TARGETS,
API_TREE,
API_TYPE,
API_UPLOAD,
API_UPLOAD_PATH,
API_WIDTH,
)
def __init__(
self,
root: str,
url: str,
base_url: str,
upload_max_size: int,
tmb_dir: Optional[str],
expose_real_path: bool = False,
dot_files: bool = False,
debug: bool = False,
) -> None:
self.volumeid = str(uuid.uuid4())
self._commands = dict(COMMANDS)
self._http_header = {} self._http_status_code = 0
self._request = {} self._response = {} self._response[R_DEBUG] = {}
self._error_data = {} self._img = None
self._options["root"] = self._check_utf8(root)
self._options["upload_max_size"] = upload_max_size
self._options["debug"] = debug
self._options["base_url"] = (
base_url.lstrip("/") if base_url.startswith("//") else base_url
)
self._options["expose_real_path"] = expose_real_path
self._options["dot_files"] = dot_files
self._options["files_url"] = self._check_utf8(url).rstrip("/")
self._debug("files_url", self._options["files_url"])
self._debug("root", self._options["root"])
for cmd in self._options["disabled"]:
if cmd in self._commands:
del self._commands[cmd]
if tmb_dir:
thumbs_dir = os.path.join(self._options["root"], tmb_dir)
try:
if not os.path.exists(thumbs_dir):
os.makedirs(thumbs_dir) self._options["tmb_dir"] = thumbs_dir
except PermissionError:
self._options["tmb_dir"] = None
self._debug("thumbnail", " Permission denied: " + thumbs_dir)
print(
"WARNING: failed to create thumbnail folder "
"due to permission denied, it will be disabled."
)
def run(
self, http_request: Dict[str, Any]
) -> Tuple[int, Dict[str, str], Dict[str, Any]]:
start_time = time.time()
root_ok = True
if not os.path.exists(self._options["root"]):
root_ok = False
self._response[R_ERROR] = "Invalid backend configuration"
elif not self._is_allowed(self._options["root"], "read"):
root_ok = False
self._response[R_ERROR] = "Access denied"
for field in self.http_allowed_parameters:
if field in http_request:
self._request[field] = http_request[field]
if root_ok and API_CMD in self._request:
if self._request[API_CMD] in self._commands:
cmd = self._commands[self._request[API_CMD]]
func = getattr(self, "_" + self.__class__.__name__ + cmd)
try:
func()
except Exception as exc: self._response[
R_ERROR
] = f"Command Failed: {self._request[API_CMD]}, Error: \n{exc}"
traceback.print_exc()
self._debug("exception", exception_to_string(exc))
else:
self._response[R_ERROR] = f"Unknown command: {self._request[API_CMD]}"
if self._error_data:
self._debug("errorData", self._error_data)
if self._options["debug"]:
self._debug("time", (time.time() - start_time))
else:
self._response.pop(R_DEBUG, None)
if self._http_status_code < 100:
self._http_status_code = 200
if "Content-type" not in self._http_header:
if API_CMD in self._request and self._request[API_CMD] == "upload":
self._http_header["Content-type"] = "text/html"
else:
self._http_header["Content-type"] = "application/json"
return self._http_status_code, self._http_header, self._response
def __places(self) -> None:
if API_TARGETS not in self._request:
self._response[R_ERROR] = "Invalid parameters"
return
targets = self._request[API_TARGETS]
files = []
for target in targets:
path = self._find(target)
if path is None:
self._set_error_data(target, "File not found")
else:
files.append(self._info(path))
self._response[R_FILES] = files
def __open(self) -> None:
path = None
init = self._request.get(API_INIT)
target = self._request.get(API_TARGET)
if not init and not target:
self._response[R_ERROR] = "Invalid parameters"
return
if target:
path = self._find_dir(target)
if init:
self._response[R_API] = 2.1
if not path:
path = self._options["root"]
if not path:
self._response[R_ERROR] = "File not found"
return
if not self._is_allowed(path, "read"):
self._response[R_ERROR] = "Access denied"
return
self._cwd(path)
try:
items = os.listdir(path)
except PermissionError:
self._response[R_ERROR] = "Access denied"
return
files = []
for item in sorted(items):
file_path = os.path.join(path, item)
if self._is_accepted(item):
info = self._info(file_path)
files.append(info)
self._response[R_FILES] = files
if self._request.get(API_TREE):
self._response[R_FILES].append(self._info(path))
self._check_archivers()
if not self._options["file_url"]:
url = ""
else:
url = self._options["files_url"]
self._response[R_NETDRIVERS] = []
self._response[R_UPLMAXFILE] = 1000
self._response[R_UPLMAXSIZE] = (
str(self._options["upload_max_size"] / (1024 * 1024)) + "M"
)
thumbs_dir = self._options["tmb_dir"]
if thumbs_dir:
thumbs_url = self._path2url(thumbs_dir)
else:
thumbs_url = ""
self._response[R_OPTIONS] = {
R_OPTIONS_PATH: path,
R_OPTIONS_SEPARATOR: os.path.sep,
R_OPTIONS_URL: url,
R_OPTIONS_DISABLED: self._options["disabled"],
R_OPTIONS_TMB_URL: thumbs_url,
R_OPTIONS_ARCHIVERS: {
R_OPTIONS_CREATE: list(self._options["archivers"]["create"].keys()),
R_OPTIONS_EXTRACT: list(self._options["archivers"]["extract"].keys()),
R_OPTIONS_CREATE_EXT: {
k: self._options["archivers"]["create"][k][ARCHIVE_EXT]
for k in self._options["archivers"]["create"]
},
},
R_OPTIONS_COPY_OVERWRITE: True,
R_OPTIONS_UPLOAD_MAX_SIZE: self._options["upload_max_size"],
R_OPTIONS_UPLOAD_OVERWRITE: True,
R_OPTIONS_UPLOAD_MAX_CONN: 3,
R_OPTIONS_UPLOAD_MIME: {
R_OPTIONS_MIME_ALLOW: ["all"],
R_OPTIONS_MIME_DENY: [],
R_OPTIONS_MIME_FIRST_ORDER: R_OPTIONS_MIME_DENY,
},
R_OPTIONS_I18N_FOLDER_NAME: True,
R_OPTIONS_DISP_INLINE_REGEX: "^(?:(?:image|video|audio)|application/"
+ "(?:x-mpegURL|dash\\+xml)|(?:text/plain|application/pdf)$)",
R_OPTIONS_JPG_QUALITY: 100,
R_OPTIONS_SYNC_CHK_AS_TS: 1,
R_OPTIONS_SYNC_MIN_MS: 30000,
R_OPTIONS_UI_CMD_MAP: {},
}
def __parents(self) -> None:
self._response[R_TREE] = []
def __chmod(self) -> None:
self._response[R_CHANGED] = []
def __netmount(self) -> None:
pass
def __zipdl(self) -> None:
pass
def __file(self) -> None:
self._http_header["Content-type"] = "text/html"
target = self._request.get(API_TARGET)
if not target:
self._response["__text"] = "Invalid parameters"
return
download = self._request.get(API_DOWNLOAD)
cur_file = self._find(target)
if not cur_file or not os.path.exists(cur_file) or os.path.isdir(cur_file):
self._http_status_code = 404
self._response["__text"] = "File not found"
return
if not self._is_allowed(cur_file, "read"):
self._http_status_code = 403
self._response["__text"] = "Access denied"
return
if os.path.islink(cur_file):
cur_file = self._read_link(cur_file)
if (
not cur_file
or not self._is_allowed(os.path.dirname(cur_file), "read")
or not self._is_allowed(cur_file, "read")
):
self._http_status_code = 403
self._response["__text"] = "Access denied"
return
mime = _mimetype(cur_file)
parts = mime.split("/", 2)
if download:
disp = "attachments"
elif parts[0] == "image":
disp = "image"
else:
disp = "inline"
self._http_status_code = 200
self._http_header["Content-type"] = mime
self._http_header["Content-Length"] = str(os.lstat(cur_file).st_size)
self._http_header["Content-Disposition"] = disp + ";"
self._response["__send_file"] = cur_file
def __rename(self) -> None:
name = self._request.get(API_NAME)
target = self._request.get(API_TARGET)
if not (name and target):
self._response[R_ERROR] = "Invalid parameters"
return
cur_name = self._find(target)
if not cur_name:
self._response[R_ERROR] = "File not found"
return
cur_dir = os.path.dirname(cur_name)
if not self._is_allowed(cur_dir, "write") and self._is_allowed(cur_name, "rm"):
self._response[R_ERROR] = "Access denied"
return
name = self._check_utf8(name)
if not name or not _check_name(name):
self._response[R_ERROR] = "Invalid name"
return
new_name = os.path.join(cur_dir, name)
if os.path.exists(new_name):
self._response[R_ERROR] = (
"File or folder with the same name " + new_name + " already exists"
)
return
self._rm_tmb(cur_name)
try:
os.rename(cur_name, new_name)
self._response[R_ADDED] = [self._info(new_name)]
self._response[R_REMOVED] = [target]
except OSError:
self._response[R_ERROR] = "Unable to rename file"
def __mkdir(self) -> None:
path = None
new_dir = None
name = self._request.get(API_NAME)
target = self._request.get(API_TARGET)
dirs = self._request.get(API_DIRS)
if not target or (not name and not dirs):
self._response[R_ERROR] = "Invalid parameters"
return
path = self._find_dir(target)
if not path:
self._response[R_ERROR] = "Invalid parameters"
return
if not self._is_allowed(path, "write"):
self._response[R_ERROR] = "Access denied"
return
if name:
name = self._check_utf8(name)
if not _check_name(name):
self._response[R_ERROR] = "Invalid name"
return
new_dir = os.path.join(path, name)
if os.path.exists(new_dir):
self._response[R_ERROR] = (
"File or folder with the same name " + name + " already exists"
)
else:
try:
os.mkdir(new_dir, int(self._options["dir_mode"]))
self._response[R_ADDED] = [self._info(new_dir)]
self._response[R_HASHES] = {}
except OSError:
self._response[R_ERROR] = "Unable to create folder"
if dirs:
self._response[R_ADDED] = []
self._response[R_HASHES] = {}
for sdir in dirs:
subdir = sdir.lstrip("/")
if not _check_dir(subdir):
self._response[R_ERROR] = "Invalid dir name: " + subdir
return
new_subdir = os.path.join(path, subdir)
if os.path.exists(new_subdir):
self._response[R_ERROR] = (
"File or folder with the same name "
+ subdir
+ " already exists"
)
return
try:
os.mkdir(new_subdir, int(self._options["dir_mode"]))
self._response[R_ADDED].append(self._info(new_subdir))
self._response[R_HASHES][sdir] = self._hash(new_subdir)
except OSError:
self._response[R_ERROR] = "Unable to create folder"
return
def __mkfile(self) -> None:
name = self._request.get(API_NAME)
target = self._request.get(API_TARGET)
if not target or not name:
self._response[R_ERROR] = "Invalid parameters"
return
name = self._check_utf8(name)
cur_dir = self._find_dir(target)
if not cur_dir:
self._response[R_ERROR] = "Invalid parameters"
return
if not self._is_allowed(cur_dir, "write"):
self._response[R_ERROR] = "Access denied"
return
if not _check_name(name):
self._response[R_ERROR] = "Invalid name"
return
new_file = os.path.join(cur_dir, name)
if os.path.exists(new_file):
self._response[R_ERROR] = "File or folder with the same name already exists"
else:
try:
with open(new_file, "w", encoding="utf-8"):
pass
except OSError:
self._response[R_ERROR] = "Unable to create file"
else:
self._response[R_ADDED] = [self._info(new_file)]
def __rm(self) -> None:
rm_file = rm_list = None
if API_TARGETS in self._request:
rm_list = self._request[API_TARGETS]
if not rm_list:
self._response[R_ERROR] = "Invalid parameters"
return
if not isinstance(rm_list, list):
rm_list = [rm_list]
removed = []
for rm_hash in rm_list:
rm_file = self._find(rm_hash)
if not rm_file:
continue
if self._remove(rm_file):
removed.append(rm_hash)
else:
self._response[R_ERROR] = "Failed to remove: " + rm_file
return
self._response[R_REMOVED] = removed
def __upload(self) -> None:
try: import msvcrt
msvcrt.setmode(0, os.O_BINARY) msvcrt.setmode(1, os.O_BINARY) except ImportError:
pass
if API_TARGET in self._request:
chunk = self._request.get(API_CHUNK)
self._response[R_ADDED] = []
self._response[R_WARNING] = []
if chunk:
self.__upload_large_file()
else:
self.__upload_small_files()
if len(self._response[R_WARNING]) == 0:
del self._response[R_WARNING]
else:
self._http_status_code = 400
self._response[R_WARNING] = ["Invalid parameters"]
def __upload_large_file(self) -> None:
target = self._request.get(API_TARGET)
if not target:
self._response[R_WARNING] = "Invalid parameters"
return
cur_dir = self._find_dir(target)
if not cur_dir:
self._response[R_WARNING] = "Invalid parameters"
return
up_files = self._request.get(API_UPLOAD)
if not up_files:
self._response[R_WARNING] = "No file to upload"
return
chunk = self._request.get(API_CHUNK)
if not chunk:
self._response[R_WARNING] = "No chunk to upload"
return
max_size = self._options["upload_max_size"]
upload_paths = self._request.get(API_UPLOAD_PATH)
if upload_paths:
upload_paths = [self._find_dir(d) for d in upload_paths]
if upload_paths and upload_paths[0]:
cur_dir = upload_paths[0]
if not cur_dir:
self._response[R_WARNING] = "Invalid upload path"
return
if not self._is_allowed(cur_dir, "write"):
self._response[R_WARNING] = "Access denied"
return
if chunk.endswith(".part"):
chunk_range = self._request.get(API_RANGE)
if not chunk_range:
self._response[R_WARNING] = "No chunk range"
return
start, clength, total = [int(i) for i in chunk_range.split(",")]
name = ".".join(chunk.split(".")[:-2])
if not self._is_upload_allow(name):
self._set_error_data(name, "Not allowed file type")
elif total > max_size:
self._set_error_data(name, "File exceeds the maximum allowed filesize")
else:
chunk_index, total_chunks = [
int(i) for i in chunk.split(".")[-2].split("_")
]
if not _check_name(name):
self._set_error_data(name, "Invalid name: " + name)
else:
record_path = os.path.join(cur_dir, "." + name + ".txt")
file_path = os.path.join(cur_dir, name + ".parts")
if not os.path.exists(file_path) and os.path.exists(record_path):
os.remove(record_path)
with open(
file_path, "rb+" if os.path.exists(file_path) else "wb+"
) as fil:
fil.seek(start)
data = up_files[0]
written_size = 0
for chunk in self._fbuffer(data.file):
fil.write(chunk)
written_size += len(chunk)
if written_size > clength:
self._set_error_data(name, "Invalid file size")
break
with open(
record_path,
"r+" if os.path.exists(record_path) else "w+",
encoding="utf-8",
) as record_fil:
record_fil.seek(chunk_index)
record_fil.write("X")
record_fil.seek(0)
written = record_fil.read()
if written == ("X" * (total_chunks + 1)):
self._response[R_ADDED] = []
self._response[R_CHUNKMERGED] = name
self._response[R_NAME] = name
else:
self._response[R_ADDED] = []
if R_CHUNKMERGED in self._response:
os.remove(record_path)
else:
name = chunk
file_path = os.path.join(cur_dir, name)
if os.path.exists(file_path + ".parts"):
up_size = os.lstat(file_path + ".parts").st_size
if up_size > max_size:
try:
os.unlink(file_path + ".parts")
self._response[R_WARNING].append(
"File exceeds the maximum allowed filesize"
)
except OSError:
self._response[R_WARNING].append(
"File was only partially uploaded"
)
else:
if self._is_upload_allow(name):
os.rename(file_path + ".parts", file_path)
os.chmod(file_path, self._options["file_mode"])
self._response[R_ADDED] = [self._info(file_path)]
else:
self._response[R_WARNING].append("Not allowed file type")
try:
os.unlink(file_path + ".parts")
except OSError:
pass
def __upload_small_files(self) -> None:
target = self._request.get(API_TARGET)
if not target:
self._response[R_WARNING] = "Invalid parameters"
return
cur_dir = self._find_dir(target)
if not cur_dir:
self._response[R_WARNING] = "Invalid parameters"
return
up_files = self._request.get(API_UPLOAD)
if not up_files:
self._response[R_WARNING] = "No file to upload"
return
up_size = 0
max_size = self._options["upload_max_size"]
upload_paths = self._request.get(API_UPLOAD_PATH)
if upload_paths:
upload_paths = [self._find_dir(d) for d in upload_paths]
for idx, data in enumerate(up_files):
name = data.filename.encode("utf-8")
if not name:
continue
name = self._check_utf8(name)
name = os.path.basename(name)
if not upload_paths:
target_dir = cur_dir
else:
target_dir = upload_paths[idx]
if not target_dir:
self._response[R_WARNING].append("Invalid upload path")
elif not _check_name(name):
self._response[R_WARNING].append("Invalid name: " + name)
elif not self._is_allowed(target_dir, "write"):
self._response[R_WARNING] = "Access denied"
else:
name = os.path.join(target_dir, name)
replace = os.path.exists(name)
try:
with open(name, "wb", self._options["upload_write_chunk"]) as fil:
for chunk in self._fbuffer(data.file):
fil.write(chunk)
up_size += os.lstat(name).st_size
if up_size > max_size:
try:
os.unlink(name)
self._response[R_WARNING].append(
"File exceeds the maximum allowed filesize"
)
except OSError:
self._response[R_WARNING].append(
"File was only partially uploaded"
)
elif not self._is_upload_allow(name):
self._response[R_WARNING].append("Not allowed file type")
try:
os.unlink(name)
except OSError:
pass
else:
os.chmod(name, self._options["file_mode"])
if replace: self._rm_tmb(name)
self._response[R_ADDED].append(self._info(name))
except OSError:
self._response[R_WARNING].append("Unable to save uploaded file")
if up_size > max_size:
try:
os.unlink(name)
self._response[R_WARNING].append(
"File exceeds the maximum allowed filesize"
)
except OSError:
self._response[R_WARNING].append(
"File was only partially uploaded"
)
def __paste(self) -> None:
if API_TARGETS in self._request and API_DST in self._request:
dst = self._find_dir(self._request[API_DST])
cur_dir = dst
if not cur_dir or not dst or API_TARGETS not in self._request:
self._response[R_ERROR] = "Invalid parameters"
return
files = self._request[API_TARGETS]
if not isinstance(files, list):
files = [files]
cut = False
if API_CUT in self._request:
if self._request[API_CUT] == "1":
cut = True
if not self._is_allowed(dst, "write"):
self._response[R_ERROR] = "Access denied"
return
added = []
removed = []
for fhash in files:
fil = self._find(fhash)
if not fil:
self._response[R_ERROR] = "File not found"
return
new_dst = os.path.join(dst, os.path.basename(fil))
if dst.find(fil) == 0:
self._response[R_ERROR] = "Unable to copy into itself"
return
if cut:
if not self._is_allowed(fil, "rm"):
self._response[R_ERROR] = "Move failed"
self._set_error_data(fil, "Access denied")
return
if os.path.exists(new_dst):
self._response[
R_ERROR
] = "File or folder with the same name already exists"
self._set_error_data(
fil, "File or folder with the same name already exists"
)
return
try:
os.rename(fil, new_dst)
self._rm_tmb(fil)
added.append(self._info(new_dst))
removed.append(fhash)
continue
except OSError:
self._response[R_ERROR] = "Unable to move files"
self._set_error_data(fil, "Unable to move")
return
else:
if not self._copy(fil, new_dst):
self._response[R_ERROR] = "Unable to copy files"
return
added.append(self._info(new_dst))
continue
self._response[R_ADDED] = added
self._response[R_REMOVED] = removed
else:
self._response[R_ERROR] = "Invalid parameters"
def __duplicate(self) -> None:
targets = self._request.get(API_TARGETS)
if not targets:
self._response[R_ERROR] = "Invalid parameters"
return
added = []
for target in targets:
target = self._find(target)
if not target:
self._response[R_ERROR] = "File not found"
return
cur_dir = os.path.dirname(target)
if not self._is_allowed(target, "read") or not self._is_allowed(
cur_dir, "write"
):
self._response[R_ERROR] = "Access denied"
return
new_name = _unique_name(target)
if not self._copy(target, new_name):
self._response[R_ERROR] = "Unable to create file copy"
return
added.append(self._info(new_name))
self._response[R_ADDED] = added
def __resize(self) -> None:
target = self._request.get(API_TARGET)
width = self._request.get(API_WIDTH)
height = self._request.get(API_HEIGHT)
if not (target and width is not None and height is not None):
self._response[R_ERROR] = "Invalid parameters"
return
width = int(width)
height = int(height)
if width < 1 or height < 1:
self._response[R_ERROR] = "Invalid parameters"
return
cur_file = self._find(target)
if not cur_file:
self._response[R_ERROR] = "File not found"
return
if not self._is_allowed(cur_file, "write"):
self._response[R_ERROR] = "Access denied"
return
if _mimetype(cur_file).find("image") != 0:
self._response[R_ERROR] = "File is not an image"
return
self._debug("resize " + cur_file, str(width) + ":" + str(height))
if not self._init_img_lib():
return
try:
img = self._img.open(cur_file) img_resized = img.resize(
(width, height), self._img.ANTIALIAS )
img_resized.save(cur_file)
self._rm_tmb(cur_file)
except OSError as exc: self._debug("resizeFailed_" + self._options["root"], str(exc))
self._response[R_ERROR] = "Unable to resize image"
return
self._response[R_CHANGED] = [self._info(cur_file)]
def __thumbnails(self) -> None:
thumbs_dir = self._options["tmb_dir"]
targets = self._request.get(API_TARGETS)
if not targets:
return
if not self._init_img_lib() or not self._can_create_tmb():
return
assert thumbs_dir if self._options["tmb_at_once"] > 0:
tmb_max = self._options["tmb_at_once"]
else:
tmb_max = 5
self._response[R_IMAGES] = {}
i = 0
for fhash in targets:
path = self._find(fhash)
if path is None:
continue
if os.path.dirname(path) == thumbs_dir:
continue
if self._can_create_tmb(path) and self._is_allowed(path, "read"):
tmb = os.path.join(thumbs_dir, fhash + ".png")
if not os.path.exists(tmb):
if self._tmb(path, tmb):
self._response[R_IMAGES].update({fhash: self._path2url(tmb)})
i += 1
if i >= tmb_max:
break
def __size(self) -> None:
if API_TARGETS not in self._request:
self._response[R_ERROR] = "Invalid parameters"
return
targets = self._request[API_TARGETS]
all_total_size = 0
all_file_count = 0
all_dir_count = 0
sizes = []
for target in targets:
path = self._find(target)
if path is None:
self._set_error_data(target, "Target not found")
continue
total_size = 0
file_count = 0
dir_count = 0
if os.path.isdir(path):
for root, dirs, files in os.walk(path, topdown=True):
for folder in dirs:
folder_path = os.path.join(root, folder)
size = self._dir_size(folder_path)
sizes.append({})
dir_count += 1
total_size += size
for fil in files:
file_path = os.path.join(root, fil)
size = os.stat(file_path).st_size
total_size += size
file_count += 1
break
else:
size = os.stat(file_path).st_size
total_size += size
file_count += 1
sizes.append(
{R_DIR_CNT: dir_count, R_FILE_CNT: file_count, R_SIZE: total_size}
)
all_total_size += total_size
all_file_count += file_count
all_dir_count += dir_count
self._response[R_SIZE] = all_total_size
self._response[R_FILE_CNT] = all_file_count
self._response[R_DIR_CNT] = all_dir_count
self._response[R_SIZES] = sizes
def __ls(self) -> None:
target = self._request.get(API_TARGET)
if not target:
self._response[R_ERROR] = "Invalid parameters"
return
intersect = self._request.get(API_INTERSECT)
path = self._find(target)
if path is None or not os.path.isdir(path):
self._response[R_ERROR] = "Target directory not found"
return
if os.path.islink(path):
path = self._read_link(path)
if path is None:
self._response[R_ERROR] = "Directory (link) not found"
return
if not self._is_allowed(path, "read"):
self._response[R_ERROR] = "Access denied"
return
try:
file_names = os.listdir(path)
except PermissionError:
self._response[R_ERROR] = "Access denied"
return
items = {}
for fname in file_names:
fhash = self._hash(os.path.join(path, fname))
if intersect:
if fhash in intersect:
items[fhash] = fname
else:
items[fhash] = fname
self._response[R_LIST] = items
def __tree(self) -> None:
target = self._request.get(API_TARGET)
if not target:
self._response[R_ERROR] = "Invalid parameters"
return
path = self._find_dir(target)
if path is None or not os.path.isdir(path):
self._response[R_ERROR] = "Directory not found"
return
if os.path.islink(path):
path = self._read_link(path)
if path is None:
self._response[R_ERROR] = "Directory (link) not found"
return
if not self._is_allowed(path, "read"):
self._response[R_ERROR] = "Access denied"
return
try:
directories = os.listdir(path)
except PermissionError:
self._response[R_ERROR] = "Access denied"
return
tree = []
for directory in sorted(directories):
dir_path = os.path.join(path, directory)
if (
os.path.isdir(dir_path)
and not os.path.islink(dir_path)
and self._is_accepted(directory)
):
tree.append(self._info(dir_path))
self._response[R_TREE] = tree
def __get(self) -> None:
target = self._request.get(API_TARGET)
if not target:
self._response[R_ERROR] = "Invalid parameters"
return
cur_file = self._find(target)
if not cur_file:
self._response[R_ERROR] = "File not found"
return
if not self._is_allowed(cur_file, "read"):
self._response[R_ERROR] = "Access denied"
return
try:
with open(cur_file, "r", encoding="utf-8") as text_fil:
self._response[API_CONTENT] = text_fil.read()
except UnicodeDecodeError:
with open(cur_file, "rb") as bin_fil:
self._response[API_CONTENT] = base64.b64encode(bin_fil.read()).decode(
"ascii"
)
def __dim(self) -> None:
target = self._request.get(API_TARGET)
if not target:
self._response[R_ERROR] = "Invalid parameters"
return
cur_file = self._find(target)
if not cur_file:
self._response[R_ERROR] = "File not found"
return
if not self._is_allowed(cur_file, "read"):
self._response[R_ERROR] = "Access denied"
return
dim = self._get_img_size(cur_file)
if dim:
self._response[R_DIM] = str(dim)
else:
self._response[R_DIM] = None
def __put(self) -> None:
target = self._request.get(API_TARGET)
content = self._request.get(API_CONTENT)
if not target or not content:
self._response[R_ERROR] = "Invalid parameters"
return
cur_file = self._find(target)
if not cur_file:
self._response[R_ERROR] = "File not found"
return
if not self._is_allowed(cur_file, "write"):
self._response[R_ERROR] = "Access denied"
return
try:
if (
self._request[API_CONTENT].startswith("data:")
and ";base64," in self._request[API_CONTENT][:100]
):
img_data = self._request[API_CONTENT].split(";base64,")[1]
img_data = base64.b64decode(img_data)
with open(cur_file, "wb") as bin_fil:
bin_fil.write(img_data)
else:
with open(cur_file, "w+", encoding="utf-8") as text_fil:
text_fil.write(self._request[API_CONTENT])
self._rm_tmb(cur_file)
self._response[R_CHANGED] = [self._info(cur_file)]
except OSError:
self._response[R_ERROR] = "Unable to write to file"
def __archive(self) -> None:
# "name" is a parameter according to api 2.1.
archive_type = self._request.get(API_TYPE)
target = self._request.get(API_TARGET)
files = self._request.get(API_TARGETS)
if not archive_type or not target or not files:
self._response[R_ERROR] = "Invalid parameters"
return
cur_dir = self._find_dir(target)
if not cur_dir:
self._response[R_ERROR] = "File not found"
return
if not self._is_allowed(cur_dir, "write"):
self._response[R_ERROR] = "Access denied"
return
if (
archive_type not in self._options["archivers"]["create"]
or archive_type not in self._options["archive_mimes"]
):
self._response[R_ERROR] = "Unable to create archive"
return
real_files = []
for fhash in files:
cur_file = self._find(fhash, cur_dir)
if not cur_file:
self._response[R_ERROR] = "File not found"
return
real_files.append(os.path.basename(cur_file))
arc = self._options["archivers"]["create"][archive_type]
if len(real_files) > 1:
archive_name = "Archive"
else:
archive_name = real_files[0]
archive_name += "." + arc[ARCHIVE_EXT]
archive_name = _unique_name(archive_name, "")
archive_path = os.path.join(cur_dir, archive_name)
cmd = [arc[ARCHIVE_CMD]]
for arg in arc[ARCHIVE_ARGC].split():
cmd.append(arg)
cmd.append(archive_name)
for fil in real_files:
cmd.append(fil)
cur_cwd = os.getcwd()
os.chdir(cur_dir)
ret = _run_sub_process(cmd)
os.chdir(cur_cwd)
if not ret:
self._response[R_ERROR] = "Unable to create archive"
return
self._response[R_ADDED] = [self._info(archive_path)]
def __extract(self) -> None:
target = self._request.get(API_TARGET)
if not target:
self._response[R_ERROR] = "Invalid parameters"
return
makedir = self._request.get(API_MAKEDIR)
cur_file = self._find(target)
if cur_file is None or os.path.isdir(cur_file):
self._response[R_ERROR] = "File not found"
return
cur_dir = os.path.dirname(cur_file)
if not self._is_allowed(cur_dir, "write"):
self._response[R_ERROR] = "Access denied"
return
mime = _mimetype(cur_file)
self._check_archivers()
if mime not in self._options["archivers"]["extract"]:
self._response[R_ERROR] = "Unable to extract files from archive"
return
arc = self._options["archivers"]["extract"][mime]
cmd = [arc[ARCHIVE_CMD]]
for arg in arc[ARCHIVE_ARGC].split():
cmd.append(arg)
cmd.append(os.path.basename(cur_file))
target_dir = cur_dir
added = None
if makedir and makedir != "0":
base_name = os.path.splitext(os.path.basename(cur_file))[0] or "New Folder"
target_dir = os.path.join(target_dir, base_name)
target_dir = _unique_name(target_dir, copy="")
try:
os.mkdir(target_dir, int(self._options["dir_mode"]))
except OSError:
self._response[R_ERROR] = "Unable to create folder: " + base_name
return
cmd += shlex.split(arc["argd"].format(shlex.quote(target_dir)))
added = [self._info(target_dir)]
if added is None:
try:
existing_files = os.listdir(cur_dir)
except PermissionError:
# FIXME: This will likely never happen.
# The find helper will already have failed
# to find the file without parent dir read access.
self._response[R_ERROR] = "Access denied"
return
cur_cwd = os.getcwd()
os.chdir(cur_dir)
ret = _run_sub_process(cmd)
os.chdir(cur_cwd)
if not ret:
self._response[R_ERROR] = "Unable to extract files from archive"
return
if added is None:
added = [
self._info(os.path.join(cur_dir, dname))
for dname in os.listdir(cur_dir)
if dname not in existing_files
]
self._response[R_ADDED] = added
def __ping(self) -> None:
self._http_status_code = 200
self._http_header["Connection"] = "close"
def __search(self) -> None:
if API_Q not in self._request:
self._response[R_ERROR] = "Invalid parameters"
return
if API_TARGET in self._request:
target = self._request[API_TARGET]
if not target:
self._response[R_ERROR] = "Invalid parameters"
return
search_path = self._find_dir(target)
else:
search_path = self._options["root"]
if not search_path:
self._response[R_ERROR] = "File not found"
return
mimes = self._request.get(API_MIMES)
result = []
query = self._request[API_Q]
for root, dirs, files in os.walk(search_path):
for fil in files:
if query.lower() in fil.lower():
file_path = os.path.join(root, fil)
if mimes is None:
result.append(self._info(file_path))
else:
if _mimetype(file_path) in mimes:
result.append(self._info(file_path))
if mimes is None:
for folder in dirs:
file_path = os.path.join(root, folder)
if query.lower() in folder.lower():
result.append(self._info(file_path))
self._response[R_FILES] = result
def _cwd(self, path: str) -> None:
name = os.path.basename(path)
if path == self._options["root"]:
name = self._options["root_alias"]
root = True
else:
root = False
if self._options["root_alias"]:
basename = self._options["root_alias"]
else:
basename = os.path.basename(self._options["root"])
rel = os.path.join(basename, path[len(self._options["root"]) :])
info = {
"hash": self._hash(path),
"name": self._check_utf8(name),
"mime": "directory",
"rel": self._check_utf8(rel),
"size": 0,
"date": datetime.fromtimestamp(os.stat(path).st_mtime).strftime(
"%d %b %Y %H:%M"
),
"read": 1,
"write": 1 if self._is_allowed(path, "write") else 0,
"locked": 0,
"rm": not root and self._is_allowed(path, "rm"),
"volumeid": self.volumeid,
}
try:
info["dirs"] = 1 if any(next(os.walk(path))[1]) else 0
except StopIteration:
info["dirs"] = 0
self._response[R_CWD] = info
def _info(self, path: str) -> Info:
# mime = ''
filetype = "file"
if os.path.isfile(path):
filetype = "file"
elif os.path.isdir(path):
filetype = "dir"
elif os.path.islink(path):
filetype = "link"
stat = os.lstat(path)
readable = self._is_allowed(path, "read")
writable = self._is_allowed(path, "write")
deletable = self._is_allowed(path, "rm")
info = {
"name": self._check_utf8(os.path.basename(path)),
"hash": self._hash(path),
"mime": "directory" if filetype == "dir" else _mimetype(path),
"read": 1 if readable else 0,
"write": 1 if writable else 0,
"locked": 1 if not readable and not writable and not deletable else 0,
"ts": stat.st_mtime,
} # type: Info
if self._options["expose_real_path"]:
info["path"] = os.path.abspath(path)
if filetype == "dir":
info["volumeid"] = self.volumeid
try:
info["dirs"] = 1 if any(next(os.walk(path))[1]) else 0
except StopIteration:
info["dirs"] = 0
if path != self._options["root"]:
info["phash"] = self._hash(os.path.dirname(path))
if filetype == "link":
lpath = self._read_link(path)
if not lpath:
info["mime"] = "symlink-broken"
return info
if os.path.isdir(lpath):
info["mime"] = "directory"
else:
info["mime"] = _mimetype(lpath)
if self._options["root_alias"]:
basename = self._options["root_alias"]
else:
basename = os.path.basename(self._options["root"])
info["link"] = self._hash(lpath)
info["alias"] = os.path.join(basename, lpath[len(self._options["root"]) :])
info["read"] = 1 if info["read"] and self._is_allowed(lpath, "read") else 0
info["write"] = (
1 if info["write"] and self._is_allowed(lpath, "write") else 0
)
info["locked"] = (
1
if (
not info["write"]
and not info["read"]
and not self._is_allowed(lpath, "rm")
)
else 0
)
info["size"] = 0
else:
lpath = None
info["size"] = self._dir_size(path) if filetype == "dir" else stat.st_size
if info["mime"] != "directory":
if self._options["file_url"] and info["read"]:
if lpath:
info["url"] = self._path2url(lpath)
else:
info["url"] = self._path2url(path)
if info["mime"][0:5] == "image":
thumbs_dir = self._options["tmb_dir"]
if self._can_create_tmb():
assert thumbs_dir # typing
dim = self._get_img_size(path)
if dim:
info["dim"] = dim
# if we are in tmb dir, files are thumbs itself
if os.path.dirname(path) == thumbs_dir:
info["tmb"] = self._path2url(path)
return info
tmb = os.path.join(thumbs_dir, info["hash"] + ".png")
if os.path.exists(tmb):
tmb_url = self._path2url(tmb)
info["tmb"] = tmb_url
else:
if info["mime"].startswith("image/"):
info["tmb"] = "1"
if info["mime"] == "application/x-empty" or info["mime"] == "inode/x-empty":
info["mime"] = "text/plain"
return info
def _remove(self, target: str) -> bool:
if not self._is_allowed(target, "rm"):
self._set_error_data(target, "Access denied")
if not os.path.isdir(target):
try:
os.unlink(target)
self._rm_tmb(target)
return True
except OSError:
self._set_error_data(target, "Remove failed")
return False
else:
try:
targets = os.listdir(target)
except PermissionError:
self._set_error_data(target, "Access denied")
return False
for fil in targets:
if self._is_accepted(fil):
self._remove(os.path.join(target, fil))
try:
os.rmdir(target)
return True
except OSError:
self._set_error_data(target, "Remove failed")
return False
def _copy(self, src: str, dst: str) -> bool:
dst_dir = os.path.dirname(dst)
if not (self._is_allowed(src, "read") and self._is_allowed(dst_dir, "write")):
self._set_error_data(src, "Access denied")
return False
if os.path.exists(dst):
self._set_error_data(
dst, "File or folder with the same name already exists"
)
return False
if not os.path.isdir(src):
try:
shutil.copyfile(src, dst)
shutil.copymode(src, dst)
return True
except (shutil.SameFileError, OSError):
self._set_error_data(src, "Unable to copy files")
return False
else:
try:
os.mkdir(dst, int(self._options["dir_mode"]))
shutil.copymode(src, dst)
except (shutil.SameFileError, OSError):
self._set_error_data(src, "Unable to copy files")
return False
try:
srcs = os.listdir(src)
except PermissionError:
self._set_error_data(src, "Access denied")
return False
for i in srcs:
new_src = os.path.join(src, i)
new_dst = os.path.join(dst, i)
if not self._copy(new_src, new_dst):
self._set_error_data(new_src, "Unable to copy files")
return False
return True
def _find_dir(self, fhash: str, path: Optional[str] = None) -> Optional[str]:
fhash = str(fhash)
# try to get find it in the cache
cached_path = self._cached_path.get(fhash)
if cached_path:
return cached_path
if not path:
path = self._options["root"]
if fhash == self._hash(path):
return path
if not os.path.isdir(path):
return None
for root, dirs, _ in os.walk(path, topdown=True):
for folder in dirs:
folder_path = os.path.join(root, folder)
if not os.path.islink(folder_path) and fhash == self._hash(folder_path):
return folder_path
return None
def _find(self, fhash: str, parent: Optional[str] = None) -> Optional[str]:
fhash = str(fhash)
cached_path = self._cached_path.get(fhash)
if cached_path:
return cached_path
if not parent:
parent = self._options["root"]
if os.path.isdir(parent):
for root, dirs, files in os.walk(parent, topdown=True):
for folder in dirs:
folder_path = os.path.join(root, folder)
if fhash == self._hash(folder_path):
return folder_path
for fil in files:
file_path = os.path.join(root, fil)
if fhash == self._hash(file_path):
return file_path
return None
def _tmb(self, path: str, tmb_path: str) -> bool:
try:
img = self._img.open(path).copy() # type: ignore
size = self._options["tmb_size"], self._options["tmb_size"]
box = _crop_tuple(img.size)
if box:
img = img.crop(box)
img.thumbnail(size, self._img.ANTIALIAS) # type: ignore
img.save(tmb_path, "PNG")
# UnidentifiedImageError requires Pillow 7.0.0
except (OSError, ValueError) as exc:
self._debug("tmbFailed_" + path, str(exc))
return False
return True
def _rm_tmb(self, path: str) -> None:
tmb = self._tmb_path(path)
if tmb:
if os.path.exists(tmb):
try:
os.unlink(tmb)
except OSError:
pass
def _read_link(self, path: str) -> Optional[str]:
target = os.readlink(path)
if not target[0] == "/":
target = os.path.join(os.path.dirname(path), target)
target = os.path.normpath(target)
if os.path.exists(target):
if not target.find(self._options["root"]) == -1:
return target
return None
def _dir_size(self, path: str) -> int:
total_size = 0
if self._options["dir_size"]:
for dirpath, _, filenames in os.walk(path):
for fil in filenames:
file_path = os.path.join(dirpath, fil)
if os.path.exists(file_path):
total_size += os.stat(file_path).st_size
else:
total_size = os.lstat(path).st_size
return total_size
def _fbuffer(
self, fil: BinaryIO, chunk_size: int = _options["upload_write_chunk"]
) -> Generator[bytes, None, None]:
while True:
chunk = fil.read(chunk_size)
if not chunk:
break
yield chunk
def _can_create_tmb(self, path: Optional[str] = None) -> bool:
if self._options["img_lib"] and self._options["tmb_dir"]:
if path is not None:
mime = _mimetype(path)
if mime[0:5] != "image":
return False
return True
return False
def _tmb_path(self, path: str) -> Optional[str]:
tmb = None
thumbs_dir = self._options["tmb_dir"]
if thumbs_dir:
if not os.path.dirname(path) == thumbs_dir:
tmb = os.path.join(thumbs_dir, self._hash(path) + ".png")
return tmb
def _is_upload_allow(self, name: str) -> bool:
allow = False
deny = False
mime = _mimetype(name)
if "all" in self._options["upload_allow"]:
allow = True
else:
for opt in self._options["upload_allow"]:
if mime.find(opt) == 0:
allow = True
if "all" in self._options["upload_deny"]:
deny = True
else:
for opt in self._options["upload_deny"]:
if mime.find(opt) == 0:
deny = True
if self._options["upload_order"][0] == "allow": # ,deny
if deny is True:
return False
return bool(allow)
# deny,allow
if allow is True:
return True
if deny is True:
return False
return True
def _is_accepted(self, target: str) -> bool:
if target in (".", ".."):
return False
if target[0:1] == "." and not self._options["dot_files"]:
return False
return True
def _is_allowed(self, path: str, access: str) -> bool:
if not os.path.exists(path):
return False
if access == "read":
if not os.access(path, os.R_OK):
self._set_error_data(path, access)
return False
elif access == "write":
if not os.access(path, os.W_OK):
self._set_error_data(path, access)
return False
elif access == "rm":
if not os.access(os.path.dirname(path), os.W_OK):
self._set_error_data(path, access)
return False
else:
return False
path = path[len(os.path.normpath(self._options["root"])) :]
for ppath, permissions in self._options["perms"].items():
regex = r"" + ppath
if re.search(regex, path) and access in permissions:
return permissions[access]
return self._options["defaults"][access]
def _hash(self, path: str) -> str:
hash_code = make_hash(path)
# TODO: what if the cache getting to big? # pylint: disable=fixme
self._cached_path[hash_code] = path
return hash_code
def _path2url(self, path: str) -> str:
cur_dir = path
length = len(self._options["root"])
url = multi_urljoin(
self._options["base_url"],
self._options["files_url"],
cur_dir[length:],
)
url = self._check_utf8(url).replace(os.sep, "/")
url = quote(url, safe="/")
return url
def _set_error_data(self, path: str, msg: str) -> None:
self._error_data[path] = msg
def _init_img_lib(self) -> Optional[str]:
if not self._options["img_lib"] or self._options["img_lib"] == "auto":
self._options["img_lib"] = "PIL"
if self._options["img_lib"] == "PIL":
try:
from PIL import Image # pylint: disable=import-outside-toplevel
self._img = Image
except ImportError:
self._img = None
self._options["img_lib"] = None
else:
raise NotImplementedError
self._debug("img_lib", self._options["img_lib"])
return self._options["img_lib"]
def _get_img_size(self, path: str) -> Optional[str]:
if not self._init_img_lib():
return None
if self._can_create_tmb():
try:
img = self._img.open(path) # type: ignore
return str(img.size[0]) + "x" + str(img.size[1])
except OSError: # UnidentifiedImageError requires Pillow 7.0.0
print("WARNING: unidentified image or file not found: " + path)
return None
def _debug(self, key: str, val: Any) -> None:
if self._options["debug"]:
self._response[R_DEBUG].update({key: val})
def _check_archivers(self) -> None:
# import subprocess
# proc = subprocess.Popen(['tar', '--version'], shell = False,
# stdout = subprocess.PIPE, stderr=subprocess.PIPE)
# out, err = proc.communicate()
# print 'out:', out, '\nerr:', err, '\n'
archive = {"create": {}, "extract": {}} # type: Archivers
if (
"archive" in self._options["disabled"]
and "extract" in self._options["disabled"]
):
self._options["archive_mimes"] = []
self._options["archivers"] = archive
return
tar = _run_sub_process(["tar", "--version"])
gzip = _run_sub_process(["gzip", "--version"])
bzip2 = _run_sub_process(["bzip2", "--version"])
zipc = _run_sub_process(["zip", "--version"])
unzip = _run_sub_process(["unzip", "--help"])
rar = _run_sub_process(["rar", "--version"], valid_return=[0, 7])
unrar = _run_sub_process(["unrar"], valid_return=[0, 7])
p7z = _run_sub_process(["7z", "--help"])
p7za = _run_sub_process(["7za", "--help"])
p7zr = _run_sub_process(["7zr", "--help"])
# tar = False
# tar = gzip = bzip2 = zipc = unzip = rar = unrar = False
# print tar, gzip, bzip2, zipc, unzip, rar, unrar, p7z, p7za, p7zr
create = archive["create"]
extract = archive["extract"]
if tar:
mime = "application/x-tar"
create.update(
{mime: {ARCHIVE_CMD: "tar", ARCHIVE_ARGC: "-cf", ARCHIVE_EXT: "tar"}}
)
extract.update(
{
mime: {
ARCHIVE_CMD: "tar",
ARCHIVE_ARGC: "-xf",
ARCHIVE_EXT: "tar",
"argd": "-C {}",
}
}
)
if tar and gzip:
mime = "application/x-gzip"
create.update(
{
mime: {
ARCHIVE_CMD: "tar",
ARCHIVE_ARGC: "-czf",
ARCHIVE_EXT: "tar.gz",
}
}
)
extract.update(
{
mime: {
ARCHIVE_CMD: "tar",
ARCHIVE_ARGC: "-xzf",
ARCHIVE_EXT: "tar.gz",
"argd": "-C {}",
}
}
)
if tar and bzip2:
mime = "application/x-bzip2"
create.update(
{
mime: {
ARCHIVE_CMD: "tar",
ARCHIVE_ARGC: "-cjf",
ARCHIVE_EXT: "tar.bz2",
}
}
)
extract.update(
{
mime: {
ARCHIVE_CMD: "tar",
ARCHIVE_ARGC: "-xjf",
ARCHIVE_EXT: "tar.bz2",
"argd": "-C {}",
}
}
)
mime = "application/zip"
if zipc:
create.update(
{mime: {ARCHIVE_CMD: "zip", ARCHIVE_ARGC: "-r9", ARCHIVE_EXT: "zip"}}
)
if unzip:
extract.update(
{
mime: {
ARCHIVE_CMD: "unzip",
ARCHIVE_ARGC: "",
ARCHIVE_EXT: "zip",
"argd": "-d {}",
}
}
)
mime = "application/x-rar"
if rar:
create.update(
{
mime: {
ARCHIVE_CMD: "rar",
ARCHIVE_ARGC: "a -inul",
ARCHIVE_EXT: "rar",
}
}
)
extract.update(
{
mime: {
ARCHIVE_CMD: "rar",
ARCHIVE_ARGC: "x -y",
ARCHIVE_EXT: "rar",
"argd": "{}",
}
}
)
elif unrar:
extract.update(
{
mime: {
ARCHIVE_CMD: "unrar",
ARCHIVE_ARGC: "x -y",
ARCHIVE_EXT: "rar",
"argd": "{}",
}
}
)
p7zip = None
if p7z:
p7zip = "7z"
elif p7za:
p7zip = "7za"
elif p7zr:
p7zip = "7zr"
if p7zip:
mime = "application/x-7z-compressed"
create.update(
{mime: {ARCHIVE_CMD: p7zip, ARCHIVE_ARGC: "a -t7z", ARCHIVE_EXT: "7z"}}
)
extract.update(
{
mime: {
ARCHIVE_CMD: p7zip,
ARCHIVE_ARGC: "extract -y",
ARCHIVE_EXT: "7z",
"argd": "-o{}",
}
}
)
mime = "application/x-tar"
if mime not in create:
create.update(
{
mime: {
ARCHIVE_CMD: p7zip,
ARCHIVE_ARGC: "a -ttar",
ARCHIVE_EXT: "tar",
}
}
)
if mime not in extract:
extract.update(
{
mime: {
ARCHIVE_CMD: p7zip,
ARCHIVE_ARGC: "extract -y",
ARCHIVE_EXT: "tar",
"argd": "-o{}",
}
}
)
mime = "application/x-gzip"
if mime not in create:
create.update(
{
mime: {
ARCHIVE_CMD: p7zip,
ARCHIVE_ARGC: "a -tgzip",
ARCHIVE_EXT: "gz",
}
}
)
if mime not in extract:
extract.update(
{
mime: {
ARCHIVE_CMD: p7zip,
ARCHIVE_ARGC: "extract -y",
ARCHIVE_EXT: "tar.gz",
"argd": "-o{}",
}
}
)
mime = "application/x-bzip2"
if mime not in create:
create.update(
{
mime: {
ARCHIVE_CMD: p7zip,
ARCHIVE_ARGC: "a -tbzip2",
ARCHIVE_EXT: "bz2",
}
}
)
if mime not in extract:
extract.update(
{
mime: {
ARCHIVE_CMD: p7zip,
ARCHIVE_ARGC: "extract -y",
ARCHIVE_EXT: "tar.bz2",
"argd": "-o{}",
}
}
)
mime = "application/zip"
if mime not in create:
create.update(
{
mime: {
ARCHIVE_CMD: p7zip,
ARCHIVE_ARGC: "a -tzip",
ARCHIVE_EXT: "zip",
}
}
)
if mime not in extract:
extract.update(
{
mime: {
ARCHIVE_CMD: p7zip,
ARCHIVE_ARGC: "extract -y",
ARCHIVE_EXT: "zip",
"argd": "-o{}",
}
}
)
if not self._options["archive_mimes"]:
self._options["archive_mimes"] = list(create.keys())
else:
pass
self._options["archivers"] = archive
def _check_utf8(self, name: Union[str, bytes]) -> str:
if isinstance(name, str):
return name
try:
str_name = name.decode("utf-8")
except UnicodeDecodeError:
str_name = str(name, "utf-8", "replace")
self._debug("invalid encoding", str_name)
return str_name
def _check_name(filename: str) -> bool:
if sanitize_filename(filename) != filename:
return False
return True
def _check_dir(filepath: str) -> bool:
if sanitize_filepath(filepath) != filepath:
return False
return True
def _mimetype(path: str) -> str:
mime = mimetypes.guess_type(path)[0] or "unknown"
_, ext = os.path.splitext(path)
if mime == "unknown" and ext in mimetypes.types_map:
mime = mimetypes.types_map[ext]
if mime == "text/plain" and ext == ".pl":
mime = MIME_TYPES[ext]
if mime == "application/vnd.ms-office" and ext == ".doc":
mime = MIME_TYPES[ext]
if mime == "unknown":
if os.path.basename(path) in ["README", "ChangeLog", "LICENSE", "Makefile"]:
mime = "text/plain"
else:
mime = MIME_TYPES.get(ext, mime)
return mime
def _unique_name(path: str, copy: str = " copy") -> str:
cur_dir = os.path.dirname(path)
cur_name = os.path.basename(path)
last_dot = cur_name.rfind(".")
ext = new_name = ""
if not os.path.isdir(path) and re.search(r"\..{3}\.(gz|bz|bz2)$", cur_name):
pos = -7
if cur_name[-1:] == "2":
pos -= 1
ext = cur_name[pos:]
old_name = cur_name[0:pos]
new_name = old_name + copy
elif os.path.isdir(path) or last_dot <= 0:
old_name = cur_name
new_name = old_name + copy
else:
ext = cur_name[last_dot:]
old_name = cur_name[0:last_dot]
new_name = old_name + copy
pos = 0
if old_name[-len(copy) :] == copy:
new_name = old_name
elif re.search(r"" + copy + r"\s\d+$", old_name):
pos = old_name.rfind(copy) + len(copy)
new_name = old_name[0:pos]
else:
new_path = os.path.join(cur_dir, new_name + ext)
if not os.path.exists(new_path):
return new_path
# if we are here then copy already exists or making copy of copy
# we will make new indexed copy *black magic*
idx = 1
if pos > 0:
idx = int(old_name[pos:])
while True:
idx += 1
new_name_ext = new_name + " " + str(idx) + ext
new_path = os.path.join(cur_dir, new_name_ext)
if not os.path.exists(new_path):
return new_path
# if idx >= 1000: break # possible loop
def _run_sub_process(cmd: List[str], valid_return: Optional[List[int]] = None) -> bool:
if valid_return is None:
valid_return = [0]
try:
completed = subprocess.run(
cmd, input=b"", check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
except (subprocess.SubprocessError, OSError):
return False
if completed.returncode not in valid_return:
print(str(completed.stderr))
return False
return True
def _crop_tuple(size: Tuple[int, int]) -> Optional[Tuple[int, int, int, int]]:
width, height = size
if width > height: # landscape
left = int((width - height) / 2)
upper = 0
right = left + height
lower = height
return (left, upper, right, lower)
if height > width: # portrait
left = 0
upper = int((height - width) / 2)
right = width
lower = upper + width
return (left, upper, right, lower)
# cube
return None
def make_hash(to_hash: str) -> str:
hash_obj = hashlib.md5()
hash_obj.update(to_hash.encode("utf-8"))
hash_code = str(hash_obj.hexdigest())
return hash_code
def multi_urljoin(*parts: str) -> str:
if parts[0].startswith("http"):
return str(
urljoin(
parts[0],
"/".join(part.strip("/") for part in parts[1:]),
)
)
return "/" + "/".join(part.strip("/") for part in parts if part)
| true | true |
1c4adaf952402e4e01c0172f18132c0bfdaf4075 | 4,308 | py | Python | tethys_portal/urls.py | quyendong/tethys | 99bcb524d5b2021b88d5fa15b7ed6b8acb460997 | [
"BSD-2-Clause"
] | 1 | 2020-10-08T20:38:33.000Z | 2020-10-08T20:38:33.000Z | tethys_portal/urls.py | quyendong/tethys | 99bcb524d5b2021b88d5fa15b7ed6b8acb460997 | [
"BSD-2-Clause"
] | 1 | 2018-04-14T19:40:54.000Z | 2018-04-14T19:40:54.000Z | tethys_portal/urls.py | quyendong/tethys | 99bcb524d5b2021b88d5fa15b7ed6b8acb460997 | [
"BSD-2-Clause"
] | 1 | 2021-09-07T14:47:11.000Z | 2021-09-07T14:47:11.000Z | """
********************************************************************************
* Name: urls.py
* Author: Nathan Swain
* Created On: 2014
* Copyright: (c) Brigham Young University 2014
* License: BSD 2-Clause
********************************************************************************
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth.views import password_reset, password_reset_done, password_reset_confirm, \
password_reset_complete
from django.conf import settings
from tethys_apps.urls import extension_urls
from tethys_portal.views import accounts as tethys_portal_accounts, developer as tethys_portal_developer, \
error as tethys_portal_error, home as tethys_portal_home, user as tethys_portal_user
from tethys_apps import views as tethys_apps_views
# ensure at least staff users logged in before accessing admin login page
from django.contrib.admin.views.decorators import staff_member_required
admin.site.login = staff_member_required(admin.site.login, redirect_field_name="", login_url='/accounts/login/')
admin.autodiscover()
admin.site.login = staff_member_required(admin.site.login, redirect_field_name="", login_url='/accounts/login/')
account_urls = [
url(r'^login/$', tethys_portal_accounts.login_view, name='login'),
url(r'^logout/$', tethys_portal_accounts.logout_view, name='logout'),
url(r'^register/$', tethys_portal_accounts.register, name='register'),
url(r'^password/reset/$', password_reset, {'post_reset_redirect': '/accounts/password/reset/done/'},
name='password_reset'),
url(r'^password/reset/done/$', password_reset_done),
url(r'^password/reset/(?P<uidb64>[0-9A-Za-z]+)-(?P<token>.+)/$', password_reset_confirm,
{'post_reset_redirect': '/accounts/password/done/'}, name='password_confirm'),
url(r'^password/done/$', password_reset_complete),
]
user_urls = [
url(r'^$', tethys_portal_user.profile, name='profile'),
url(r'^settings/$', tethys_portal_user.settings, name='settings'),
url(r'^change-password/$', tethys_portal_user.change_password, name='change_password'),
url(r'^disconnect/(?P<provider>[\w.@+-]+)/(?P<association_id>[0-9]+)/$', tethys_portal_user.social_disconnect,
name='disconnect'),
url(r'^delete-account/$', tethys_portal_user.delete_account, name='delete'),
]
developer_urls = [
url(r'^$', tethys_portal_developer.home, name='developer_home'),
url(r'^gizmos/', include('tethys_gizmos.urls', namespace='gizmos')),
url(r'^services/', include('tethys_services.urls', namespace='services')),
]
# development_error_urls = [
# url(r'^400/$', tethys_portal_error.handler_400, name='error_400'),
# url(r'^403/$', tethys_portal_error.handler_403, name='error_403'),
# url(r'^404/$', tethys_portal_error.handler_404, name='error_404'),
# url(r'^500/$', tethys_portal_error.handler_500, name='error_500'),
# ]
urlpatterns = [
url(r'^$', tethys_portal_home.home, name='home'),
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/', include(account_urls, namespace='accounts')),
url(r'^captcha/', include('captcha.urls')),
url(r'^oauth2/', include('social_django.urls', namespace='social')),
url(r'^user/(?P<username>[\w.@+-]+)/', include(user_urls, namespace='user')),
url(r'^apps/', include('tethys_apps.urls')),
url(r'^extensions/', include(extension_urls)),
url(r'^developer/', include(developer_urls)),
url(r'^handoff/(?P<app_name>[\w-]+)/$', tethys_apps_views.handoff_capabilities, name='handoff_capabilities'),
url(r'^handoff/(?P<app_name>[\w-]+)/(?P<handler_name>[\w-]+)/$', tethys_apps_views.handoff, name='handoff'),
url(r'^update-job-status/(?P<job_id>[\w-]+)/$', tethys_apps_views.update_job_status, name='update_job_status'),
url(r'^terms/', include('termsandconditions.urls')),
url(r'session_security/', include('session_security.urls')),
# url(r'^error/', include(development_error_urls)),
]
if settings.DEBUG and 'silk' in settings.INSTALLED_APPS:
urlpatterns.append(url(r'^silk/', include('silk.urls', namespace='silk')))
handler400 = tethys_portal_error.handler_400
handler403 = tethys_portal_error.handler_403
handler404 = tethys_portal_error.handler_404
handler500 = tethys_portal_error.handler_500
| 48.954545 | 115 | 0.694754 | from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth.views import password_reset, password_reset_done, password_reset_confirm, \
password_reset_complete
from django.conf import settings
from tethys_apps.urls import extension_urls
from tethys_portal.views import accounts as tethys_portal_accounts, developer as tethys_portal_developer, \
error as tethys_portal_error, home as tethys_portal_home, user as tethys_portal_user
from tethys_apps import views as tethys_apps_views
from django.contrib.admin.views.decorators import staff_member_required
admin.site.login = staff_member_required(admin.site.login, redirect_field_name="", login_url='/accounts/login/')
admin.autodiscover()
admin.site.login = staff_member_required(admin.site.login, redirect_field_name="", login_url='/accounts/login/')
account_urls = [
url(r'^login/$', tethys_portal_accounts.login_view, name='login'),
url(r'^logout/$', tethys_portal_accounts.logout_view, name='logout'),
url(r'^register/$', tethys_portal_accounts.register, name='register'),
url(r'^password/reset/$', password_reset, {'post_reset_redirect': '/accounts/password/reset/done/'},
name='password_reset'),
url(r'^password/reset/done/$', password_reset_done),
url(r'^password/reset/(?P<uidb64>[0-9A-Za-z]+)-(?P<token>.+)/$', password_reset_confirm,
{'post_reset_redirect': '/accounts/password/done/'}, name='password_confirm'),
url(r'^password/done/$', password_reset_complete),
]
user_urls = [
url(r'^$', tethys_portal_user.profile, name='profile'),
url(r'^settings/$', tethys_portal_user.settings, name='settings'),
url(r'^change-password/$', tethys_portal_user.change_password, name='change_password'),
url(r'^disconnect/(?P<provider>[\w.@+-]+)/(?P<association_id>[0-9]+)/$', tethys_portal_user.social_disconnect,
name='disconnect'),
url(r'^delete-account/$', tethys_portal_user.delete_account, name='delete'),
]
developer_urls = [
url(r'^$', tethys_portal_developer.home, name='developer_home'),
url(r'^gizmos/', include('tethys_gizmos.urls', namespace='gizmos')),
url(r'^services/', include('tethys_services.urls', namespace='services')),
]
urlpatterns = [
url(r'^$', tethys_portal_home.home, name='home'),
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/', include(account_urls, namespace='accounts')),
url(r'^captcha/', include('captcha.urls')),
url(r'^oauth2/', include('social_django.urls', namespace='social')),
url(r'^user/(?P<username>[\w.@+-]+)/', include(user_urls, namespace='user')),
url(r'^apps/', include('tethys_apps.urls')),
url(r'^extensions/', include(extension_urls)),
url(r'^developer/', include(developer_urls)),
url(r'^handoff/(?P<app_name>[\w-]+)/$', tethys_apps_views.handoff_capabilities, name='handoff_capabilities'),
url(r'^handoff/(?P<app_name>[\w-]+)/(?P<handler_name>[\w-]+)/$', tethys_apps_views.handoff, name='handoff'),
url(r'^update-job-status/(?P<job_id>[\w-]+)/$', tethys_apps_views.update_job_status, name='update_job_status'),
url(r'^terms/', include('termsandconditions.urls')),
url(r'session_security/', include('session_security.urls')),
]
if settings.DEBUG and 'silk' in settings.INSTALLED_APPS:
urlpatterns.append(url(r'^silk/', include('silk.urls', namespace='silk')))
handler400 = tethys_portal_error.handler_400
handler403 = tethys_portal_error.handler_403
handler404 = tethys_portal_error.handler_404
handler500 = tethys_portal_error.handler_500
| true | true |
1c4adc3e1414a627a27ad7d8b9a75d4ab2509e2d | 4,973 | py | Python | plugins/rapid7_intsights/icon_rapid7_intsights/actions/get_alerts/schema.py | blaxminarayan-r7/insightconnect-plugins | a3963eb3d3d7432d07bd46a5641700bd0ba6e11e | [
"MIT"
] | null | null | null | plugins/rapid7_intsights/icon_rapid7_intsights/actions/get_alerts/schema.py | blaxminarayan-r7/insightconnect-plugins | a3963eb3d3d7432d07bd46a5641700bd0ba6e11e | [
"MIT"
] | null | null | null | plugins/rapid7_intsights/icon_rapid7_intsights/actions/get_alerts/schema.py | blaxminarayan-r7/insightconnect-plugins | a3963eb3d3d7432d07bd46a5641700bd0ba6e11e | [
"MIT"
] | null | null | null | # GENERATED BY KOMAND SDK - DO NOT EDIT
import insightconnect_plugin_runtime
import json
class Component:
DESCRIPTION = "Search alerts based on criteria"
class Input:
ALERT_TYPE = "alert_type"
ASSIGNED = "assigned"
FOUND_DATE_FROM = "found_date_from"
FOUND_DATE_TO = "found_date_to"
HAS_INDICATORS = "has_indicators"
IS_CLOSED = "is_closed"
IS_FLAGGED = "is_flagged"
MATCHED_ASSET_VALUE = "matched_asset_value"
NETWORK_TYPE = "network_type"
REMEDIATION_STATUS = "remediation_status"
SEVERITY = "severity"
SOURCE_DATE_FROM = "source_date_from"
SOURCE_DATE_TO = "source_date_to"
SOURCE_TYPE = "source_type"
class Output:
ALERT_IDS = "alert_ids"
class GetAlertsInput(insightconnect_plugin_runtime.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"alert_type": {
"type": "array",
"title": "Alert Type",
"description": "List of alert types. Allowed values: AttackIndication, DataLeakage, Phishing, BrandSecurity, ExploitableData, vip",
"items": {
"type": "string"
},
"order": 1
},
"assigned": {
"type": "string",
"title": "Alert Assignment",
"description": "Show assigned/unassigned alerts",
"enum": [
"Assigned",
"Unassigned"
],
"order": 11
},
"found_date_from": {
"type": "string",
"title": "Found Date From",
"description": "Start date to fetch from in Unix Millisecond Timestamp",
"order": 9
},
"found_date_to": {
"type": "string",
"title": "Found Date To",
"description": "End date to fetch to in Unix Millisecond Timestamp",
"order": 10
},
"has_indicators": {
"type": "boolean",
"title": "Has Indicators",
"description": "Show alerts with IOCs results",
"order": 14
},
"is_closed": {
"type": "boolean",
"title": "Closed Status",
"description": "Status of the alert, either closed or open",
"enum": [
"Closed",
"Open"
],
"order": 13
},
"is_flagged": {
"type": "string",
"title": "Alert Flag Status",
"description": "Show flagged/unflagged alerts",
"enum": [
"Flagged",
"Unflagged"
],
"order": 12
},
"matched_asset_value": {
"type": "array",
"title": "Matched Asset Value",
"description": "List of matched asset values",
"items": {
"type": "string"
},
"order": 5
},
"network_type": {
"type": "array",
"title": "Network Type",
"description": "List of network type. Allowed values: ClearWeb, DarkWeb",
"items": {
"type": "string"
},
"order": 4
},
"remediation_status": {
"type": "array",
"title": "Remediation Status",
"description": "List of remediation statuses. Allowed values: InProgress, Pending, CancellationInProgress, Cancelled, CompletedSuccessfully, Failed",
"items": {
"type": "string"
},
"order": 6
},
"severity": {
"type": "array",
"title": "Severity",
"description": "List of alerts severity. Allowed values: High, Medium, Low",
"items": {
"type": "string"
},
"order": 2
},
"source_date_from": {
"type": "string",
"title": "Source Date From",
"description": "Start date to fetch from in Unix Millisecond Timestamp",
"order": 7
},
"source_date_to": {
"type": "string",
"title": "Source Date To",
"description": "End date to fetch to in Unix Millisecond Timestamp",
"order": 8
},
"source_type": {
"type": "array",
"title": "Source Type",
"description": "List of alerts source type. Allowed values: Application Store, Cyber Security Blog, Hacking News, Cyber Crime Forum, Hacktivism Forum, Social Media, Facebook, Twitter, LinkedIn, Google Plus, VK, Vimeo, YouTube, IRC Channel, IOC Block List, Credit Card Black Market, Paste Site, Data Leakage Website, Leaked Database, File Sharing Website, Gray Hat Website, Black Market, WHOIS servers, Company Website, Wikileaks, Pinterest, Tumblr, Instagram, Telegram, Webmail, Malware Analysis, Firehol, VRA, Other",
"items": {
"type": "string"
},
"order": 3
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class GetAlertsOutput(insightconnect_plugin_runtime.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"alert_ids": {
"type": "array",
"title": "Alert IDs",
"description": "List of alert IDs",
"items": {
"type": "string"
},
"order": 1
}
},
"required": [
"alert_ids"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 27.174863 | 524 | 0.580736 | import insightconnect_plugin_runtime
import json
class Component:
DESCRIPTION = "Search alerts based on criteria"
class Input:
ALERT_TYPE = "alert_type"
ASSIGNED = "assigned"
FOUND_DATE_FROM = "found_date_from"
FOUND_DATE_TO = "found_date_to"
HAS_INDICATORS = "has_indicators"
IS_CLOSED = "is_closed"
IS_FLAGGED = "is_flagged"
MATCHED_ASSET_VALUE = "matched_asset_value"
NETWORK_TYPE = "network_type"
REMEDIATION_STATUS = "remediation_status"
SEVERITY = "severity"
SOURCE_DATE_FROM = "source_date_from"
SOURCE_DATE_TO = "source_date_to"
SOURCE_TYPE = "source_type"
class Output:
ALERT_IDS = "alert_ids"
class GetAlertsInput(insightconnect_plugin_runtime.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"alert_type": {
"type": "array",
"title": "Alert Type",
"description": "List of alert types. Allowed values: AttackIndication, DataLeakage, Phishing, BrandSecurity, ExploitableData, vip",
"items": {
"type": "string"
},
"order": 1
},
"assigned": {
"type": "string",
"title": "Alert Assignment",
"description": "Show assigned/unassigned alerts",
"enum": [
"Assigned",
"Unassigned"
],
"order": 11
},
"found_date_from": {
"type": "string",
"title": "Found Date From",
"description": "Start date to fetch from in Unix Millisecond Timestamp",
"order": 9
},
"found_date_to": {
"type": "string",
"title": "Found Date To",
"description": "End date to fetch to in Unix Millisecond Timestamp",
"order": 10
},
"has_indicators": {
"type": "boolean",
"title": "Has Indicators",
"description": "Show alerts with IOCs results",
"order": 14
},
"is_closed": {
"type": "boolean",
"title": "Closed Status",
"description": "Status of the alert, either closed or open",
"enum": [
"Closed",
"Open"
],
"order": 13
},
"is_flagged": {
"type": "string",
"title": "Alert Flag Status",
"description": "Show flagged/unflagged alerts",
"enum": [
"Flagged",
"Unflagged"
],
"order": 12
},
"matched_asset_value": {
"type": "array",
"title": "Matched Asset Value",
"description": "List of matched asset values",
"items": {
"type": "string"
},
"order": 5
},
"network_type": {
"type": "array",
"title": "Network Type",
"description": "List of network type. Allowed values: ClearWeb, DarkWeb",
"items": {
"type": "string"
},
"order": 4
},
"remediation_status": {
"type": "array",
"title": "Remediation Status",
"description": "List of remediation statuses. Allowed values: InProgress, Pending, CancellationInProgress, Cancelled, CompletedSuccessfully, Failed",
"items": {
"type": "string"
},
"order": 6
},
"severity": {
"type": "array",
"title": "Severity",
"description": "List of alerts severity. Allowed values: High, Medium, Low",
"items": {
"type": "string"
},
"order": 2
},
"source_date_from": {
"type": "string",
"title": "Source Date From",
"description": "Start date to fetch from in Unix Millisecond Timestamp",
"order": 7
},
"source_date_to": {
"type": "string",
"title": "Source Date To",
"description": "End date to fetch to in Unix Millisecond Timestamp",
"order": 8
},
"source_type": {
"type": "array",
"title": "Source Type",
"description": "List of alerts source type. Allowed values: Application Store, Cyber Security Blog, Hacking News, Cyber Crime Forum, Hacktivism Forum, Social Media, Facebook, Twitter, LinkedIn, Google Plus, VK, Vimeo, YouTube, IRC Channel, IOC Block List, Credit Card Black Market, Paste Site, Data Leakage Website, Leaked Database, File Sharing Website, Gray Hat Website, Black Market, WHOIS servers, Company Website, Wikileaks, Pinterest, Tumblr, Instagram, Telegram, Webmail, Malware Analysis, Firehol, VRA, Other",
"items": {
"type": "string"
},
"order": 3
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class GetAlertsOutput(insightconnect_plugin_runtime.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"alert_ids": {
"type": "array",
"title": "Alert IDs",
"description": "List of alert IDs",
"items": {
"type": "string"
},
"order": 1
}
},
"required": [
"alert_ids"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| true | true |
1c4adc4be002dfe987da7b17651e87a0b87e7162 | 390 | py | Python | look/urls.py | scotthou94/myinstagram | 7313c89aa38e70c7c4f9b8969d699f850145afca | [
"MIT"
] | null | null | null | look/urls.py | scotthou94/myinstagram | 7313c89aa38e70c7c4f9b8969d699f850145afca | [
"MIT"
] | null | null | null | look/urls.py | scotthou94/myinstagram | 7313c89aa38e70c7c4f9b8969d699f850145afca | [
"MIT"
] | null | null | null | from django.conf.urls import url,include
from . import views
from django.contrib.auth.decorators import login_required
urlpatterns = [
url(r'^$', views.index),
url(r'^discover/', login_required(views.discover)),
url(r'^follow/$', login_required(views.follow), name='follow'),
url(r'^unfollow/(?P<target_id>\d+)/$', login_required(views.unfollow), name='unfollow')
]
| 32.5 | 90 | 0.694872 | from django.conf.urls import url,include
from . import views
from django.contrib.auth.decorators import login_required
urlpatterns = [
url(r'^$', views.index),
url(r'^discover/', login_required(views.discover)),
url(r'^follow/$', login_required(views.follow), name='follow'),
url(r'^unfollow/(?P<target_id>\d+)/$', login_required(views.unfollow), name='unfollow')
]
| true | true |
1c4adcf7e64b4d92e082b9ba0e6857c359ce8ed3 | 115 | py | Python | ex030.py | Roninho514/Treinamento-Python | fc6ad0b64fb3dc3cfa5381f8fc53b5b3243a7ff6 | [
"MIT"
] | null | null | null | ex030.py | Roninho514/Treinamento-Python | fc6ad0b64fb3dc3cfa5381f8fc53b5b3243a7ff6 | [
"MIT"
] | null | null | null | ex030.py | Roninho514/Treinamento-Python | fc6ad0b64fb3dc3cfa5381f8fc53b5b3243a7ff6 | [
"MIT"
] | null | null | null | numero = int(input('Digite um número:'))
print('Esse número é par' if numero % 2 == 0 else 'Esse número é impa7r')
| 38.333333 | 73 | 0.678261 | numero = int(input('Digite um número:'))
print('Esse número é par' if numero % 2 == 0 else 'Esse número é impa7r')
| true | true |
1c4adda868e850063084c445480ede3817802d12 | 93,297 | py | Python | python/ray/tests/test_basic.py | gaocegege/ray | 03d05c8765bb6cfd30fdbbcd4577dc22c5dc5af7 | [
"Apache-2.0"
] | 1 | 2019-08-21T08:31:10.000Z | 2019-08-21T08:31:10.000Z | python/ray/tests/test_basic.py | GitAlanWong/ray | c852213b8349b6b9e9e7353573e2259a1b9ef925 | [
"Apache-2.0"
] | null | null | null | python/ray/tests/test_basic.py | GitAlanWong/ray | c852213b8349b6b9e9e7353573e2259a1b9ef925 | [
"Apache-2.0"
] | 2 | 2019-09-04T13:27:51.000Z | 2019-09-17T04:20:38.000Z | # coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from concurrent.futures import ThreadPoolExecutor
import json
import logging
from multiprocessing import Process
import os
import random
import re
import setproctitle
import shutil
import six
import socket
import string
import subprocess
import sys
import tempfile
import threading
import time
import numpy as np
import pickle
import pytest
import ray
import ray.tests.cluster_utils
import ray.tests.utils
logger = logging.getLogger(__name__)
def test_simple_serialization(ray_start_regular):
primitive_objects = [
# Various primitive types.
0,
0.0,
0.9,
1 << 62,
1 << 999,
"a",
string.printable,
"\u262F",
u"hello world",
u"\xff\xfe\x9c\x001\x000\x00",
None,
True,
False,
[],
(),
{},
type,
int,
set(),
# Collections types.
collections.Counter([np.random.randint(0, 10) for _ in range(100)]),
collections.OrderedDict([("hello", 1), ("world", 2)]),
collections.defaultdict(lambda: 0, [("hello", 1), ("world", 2)]),
collections.defaultdict(lambda: [], [("hello", 1), ("world", 2)]),
collections.deque([1, 2, 3, "a", "b", "c", 3.5]),
# Numpy dtypes.
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
]
if sys.version_info < (3, 0):
primitive_objects.append(long(0)) # noqa: E501,F821
composite_objects = (
[[obj]
for obj in primitive_objects] + [(obj, )
for obj in primitive_objects] + [{
(): obj
} for obj in primitive_objects])
@ray.remote
def f(x):
return x
# Check that we can pass arguments by value to remote functions and
# that they are uncorrupted.
for obj in primitive_objects + composite_objects:
new_obj_1 = ray.get(f.remote(obj))
new_obj_2 = ray.get(ray.put(obj))
assert obj == new_obj_1
assert obj == new_obj_2
# TODO(rkn): The numpy dtypes currently come back as regular integers
# or floats.
if type(obj).__module__ != "numpy":
assert type(obj) == type(new_obj_1)
assert type(obj) == type(new_obj_2)
def test_complex_serialization(ray_start_regular):
def assert_equal(obj1, obj2):
module_numpy = (type(obj1).__module__ == np.__name__
or type(obj2).__module__ == np.__name__)
if module_numpy:
empty_shape = ((hasattr(obj1, "shape") and obj1.shape == ())
or (hasattr(obj2, "shape") and obj2.shape == ()))
if empty_shape:
# This is a special case because currently
# np.testing.assert_equal fails because we do not properly
# handle different numerical types.
assert obj1 == obj2, ("Objects {} and {} are "
"different.".format(obj1, obj2))
else:
np.testing.assert_equal(obj1, obj2)
elif hasattr(obj1, "__dict__") and hasattr(obj2, "__dict__"):
special_keys = ["_pytype_"]
assert (set(list(obj1.__dict__.keys()) + special_keys) == set(
list(obj2.__dict__.keys()) + special_keys)), (
"Objects {} and {} are different.".format(obj1, obj2))
for key in obj1.__dict__.keys():
if key not in special_keys:
assert_equal(obj1.__dict__[key], obj2.__dict__[key])
elif type(obj1) is dict or type(obj2) is dict:
assert_equal(obj1.keys(), obj2.keys())
for key in obj1.keys():
assert_equal(obj1[key], obj2[key])
elif type(obj1) is list or type(obj2) is list:
assert len(obj1) == len(obj2), ("Objects {} and {} are lists with "
"different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif type(obj1) is tuple or type(obj2) is tuple:
assert len(obj1) == len(obj2), ("Objects {} and {} are tuples "
"with different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif (ray.serialization.is_named_tuple(type(obj1))
or ray.serialization.is_named_tuple(type(obj2))):
assert len(obj1) == len(obj2), (
"Objects {} and {} are named "
"tuples with different lengths.".format(obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
else:
assert obj1 == obj2, "Objects {} and {} are different.".format(
obj1, obj2)
if sys.version_info >= (3, 0):
long_extras = [0, np.array([["hi", u"hi"], [1.3, 1]])]
else:
long_extras = [
long(0), # noqa: E501,F821
np.array([
["hi", u"hi"],
[1.3, long(1)] # noqa: E501,F821
])
]
PRIMITIVE_OBJECTS = [
0, 0.0, 0.9, 1 << 62, 1 << 100, 1 << 999, [1 << 100, [1 << 100]], "a",
string.printable, "\u262F", u"hello world",
u"\xff\xfe\x9c\x001\x000\x00", None, True, False, [], (), {},
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
np.zeros([100, 100]),
np.random.normal(size=[100, 100]),
np.array(["hi", 3]),
np.array(["hi", 3], dtype=object)
] + long_extras
COMPLEX_OBJECTS = [
[[[[[[[[[[[[]]]]]]]]]]]],
{
"obj{}".format(i): np.random.normal(size=[100, 100])
for i in range(10)
},
# {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {
# (): {(): {}}}}}}}}}}}}},
(
(((((((((), ), ), ), ), ), ), ), ), ),
{
"a": {
"b": {
"c": {
"d": {}
}
}
}
},
]
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
class Bar(object):
def __init__(self):
for i, val in enumerate(PRIMITIVE_OBJECTS + COMPLEX_OBJECTS):
setattr(self, "field{}".format(i), val)
class Baz(object):
def __init__(self):
self.foo = Foo()
self.bar = Bar()
def method(self, arg):
pass
class Qux(object):
def __init__(self):
self.objs = [Foo(), Bar(), Baz()]
class SubQux(Qux):
def __init__(self):
Qux.__init__(self)
class CustomError(Exception):
pass
Point = collections.namedtuple("Point", ["x", "y"])
NamedTupleExample = collections.namedtuple(
"Example", "field1, field2, field3, field4, field5")
CUSTOM_OBJECTS = [
Exception("Test object."),
CustomError(),
Point(11, y=22),
Foo(),
Bar(),
Baz(), # Qux(), SubQux(),
NamedTupleExample(1, 1.0, "hi", np.zeros([3, 5]), [1, 2, 3]),
]
# Test dataclasses in Python 3.7.
if sys.version_info >= (3, 7):
from dataclasses import make_dataclass
DataClass0 = make_dataclass("DataClass0", [("number", int)])
CUSTOM_OBJECTS.append(DataClass0(number=3))
class CustomClass(object):
def __init__(self, value):
self.value = value
DataClass1 = make_dataclass("DataClass1", [("custom", CustomClass)])
class DataClass2(DataClass1):
@classmethod
def from_custom(cls, data):
custom = CustomClass(data)
return cls(custom)
def __reduce__(self):
return (self.from_custom, (self.custom.value, ))
CUSTOM_OBJECTS.append(DataClass2(custom=CustomClass(43)))
BASE_OBJECTS = PRIMITIVE_OBJECTS + COMPLEX_OBJECTS + CUSTOM_OBJECTS
LIST_OBJECTS = [[obj] for obj in BASE_OBJECTS]
TUPLE_OBJECTS = [(obj, ) for obj in BASE_OBJECTS]
# The check that type(obj).__module__ != "numpy" should be unnecessary, but
# otherwise this seems to fail on Mac OS X on Travis.
DICT_OBJECTS = ([{
obj: obj
} for obj in PRIMITIVE_OBJECTS if (
obj.__hash__ is not None and type(obj).__module__ != "numpy")] + [{
0: obj
} for obj in BASE_OBJECTS] + [{
Foo(123): Foo(456)
}])
RAY_TEST_OBJECTS = (
BASE_OBJECTS + LIST_OBJECTS + TUPLE_OBJECTS + DICT_OBJECTS)
@ray.remote
def f(x):
return x
# Check that we can pass arguments by value to remote functions and
# that they are uncorrupted.
for obj in RAY_TEST_OBJECTS:
assert_equal(obj, ray.get(f.remote(obj)))
assert_equal(obj, ray.get(ray.put(obj)))
def test_nested_functions(ray_start_regular):
# Make sure that remote functions can use other values that are defined
# after the remote function but before the first function invocation.
@ray.remote
def f():
return g(), ray.get(h.remote())
def g():
return 1
@ray.remote
def h():
return 2
assert ray.get(f.remote()) == (1, 2)
# Test a remote function that recursively calls itself.
@ray.remote
def factorial(n):
if n == 0:
return 1
return n * ray.get(factorial.remote(n - 1))
assert ray.get(factorial.remote(0)) == 1
assert ray.get(factorial.remote(1)) == 1
assert ray.get(factorial.remote(2)) == 2
assert ray.get(factorial.remote(3)) == 6
assert ray.get(factorial.remote(4)) == 24
assert ray.get(factorial.remote(5)) == 120
# Test remote functions that recursively call each other.
@ray.remote
def factorial_even(n):
assert n % 2 == 0
if n == 0:
return 1
return n * ray.get(factorial_odd.remote(n - 1))
@ray.remote
def factorial_odd(n):
assert n % 2 == 1
return n * ray.get(factorial_even.remote(n - 1))
assert ray.get(factorial_even.remote(4)) == 24
assert ray.get(factorial_odd.remote(5)) == 120
def test_ray_recursive_objects(ray_start_regular):
class ClassA(object):
pass
# Make a list that contains itself.
lst = []
lst.append(lst)
# Make an object that contains itself as a field.
a1 = ClassA()
a1.field = a1
# Make two objects that contain each other as fields.
a2 = ClassA()
a3 = ClassA()
a2.field = a3
a3.field = a2
# Make a dictionary that contains itself.
d1 = {}
d1["key"] = d1
# Create a list of recursive objects.
recursive_objects = [lst, a1, a2, a3, d1]
# Check that exceptions are thrown when we serialize the recursive
# objects.
for obj in recursive_objects:
with pytest.raises(Exception):
ray.put(obj)
def test_passing_arguments_by_value_out_of_the_box(ray_start_regular):
@ray.remote
def f(x):
return x
# Test passing lambdas.
def temp():
return 1
assert ray.get(f.remote(temp))() == 1
assert ray.get(f.remote(lambda x: x + 1))(3) == 4
# Test sets.
assert ray.get(f.remote(set())) == set()
s = {1, (1, 2, "hi")}
assert ray.get(f.remote(s)) == s
# Test types.
assert ray.get(f.remote(int)) == int
assert ray.get(f.remote(float)) == float
assert ray.get(f.remote(str)) == str
class Foo(object):
def __init__(self):
pass
# Make sure that we can put and get a custom type. Note that the result
# won't be "equal" to Foo.
ray.get(ray.put(Foo))
def test_putting_object_that_closes_over_object_id(ray_start_regular):
# This test is here to prevent a regression of
# https://github.com/ray-project/ray/issues/1317.
class Foo(object):
def __init__(self):
self.val = ray.put(0)
def method(self):
f
f = Foo()
ray.put(f)
def test_put_get(shutdown_only):
ray.init(num_cpus=0)
for i in range(100):
value_before = i * 10**6
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = i * 10**6 * 1.0
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = "h" * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = [1] * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
def test_custom_serializers(ray_start_regular):
class Foo(object):
def __init__(self):
self.x = 3
def custom_serializer(obj):
return 3, "string1", type(obj).__name__
def custom_deserializer(serialized_obj):
return serialized_obj, "string2"
ray.register_custom_serializer(
Foo, serializer=custom_serializer, deserializer=custom_deserializer)
assert ray.get(ray.put(Foo())) == ((3, "string1", Foo.__name__), "string2")
class Bar(object):
def __init__(self):
self.x = 3
ray.register_custom_serializer(
Bar, serializer=custom_serializer, deserializer=custom_deserializer)
@ray.remote
def f():
return Bar()
assert ray.get(f.remote()) == ((3, "string1", Bar.__name__), "string2")
def test_serialization_final_fallback(ray_start_regular):
pytest.importorskip("catboost")
# This test will only run when "catboost" is installed.
from catboost import CatBoostClassifier
model = CatBoostClassifier(
iterations=2,
depth=2,
learning_rate=1,
loss_function="Logloss",
logging_level="Verbose")
reconstructed_model = ray.get(ray.put(model))
assert set(model.get_params().items()) == set(
reconstructed_model.get_params().items())
def test_register_class(ray_start_2_cpus):
# Check that putting an object of a class that has not been registered
# throws an exception.
class TempClass(object):
pass
ray.get(ray.put(TempClass()))
# Test passing custom classes into remote functions from the driver.
@ray.remote
def f(x):
return x
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
foo = ray.get(f.remote(Foo(7)))
assert foo == Foo(7)
regex = re.compile(r"\d+\.\d*")
new_regex = ray.get(f.remote(regex))
# This seems to fail on the system Python 3 that comes with
# Ubuntu, so it is commented out for now:
# assert regex == new_regex
# Instead, we do this:
assert regex.pattern == new_regex.pattern
class TempClass1(object):
def __init__(self):
self.value = 1
# Test returning custom classes created on workers.
@ray.remote
def g():
class TempClass2(object):
def __init__(self):
self.value = 2
return TempClass1(), TempClass2()
object_1, object_2 = ray.get(g.remote())
assert object_1.value == 1
assert object_2.value == 2
# Test exporting custom class definitions from one worker to another
# when the worker is blocked in a get.
class NewTempClass(object):
def __init__(self, value):
self.value = value
@ray.remote
def h1(x):
return NewTempClass(x)
@ray.remote
def h2(x):
return ray.get(h1.remote(x))
assert ray.get(h2.remote(10)).value == 10
# Test registering multiple classes with the same name.
@ray.remote(num_return_vals=3)
def j():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = []
for _ in range(5):
results += j.remote()
for i in range(len(results) // 3):
c0, c1, c2 = ray.get(results[(3 * i):(3 * (i + 1))])
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
@ray.remote
def k():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = ray.get([k.remote() for _ in range(5)])
for c0, c1, c2 in results:
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
def test_keyword_args(ray_start_regular):
@ray.remote
def keyword_fct1(a, b="hello"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct2(a="hello", b="world"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct3(a, b, c="hello", d="world"):
return "{} {} {} {}".format(a, b, c, d)
x = keyword_fct1.remote(1)
assert ray.get(x) == "1 hello"
x = keyword_fct1.remote(1, "hi")
assert ray.get(x) == "1 hi"
x = keyword_fct1.remote(1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct1.remote(a=1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct2.remote(a="w", b="hi")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(b="hi", a="w")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(a="w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote(b="hi")
assert ray.get(x) == "hello hi"
x = keyword_fct2.remote("w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote("w", "hi")
assert ray.get(x) == "w hi"
x = keyword_fct3.remote(0, 1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(a=0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, d="hi", c="w")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, c="w")
assert ray.get(x) == "0 1 w world"
x = keyword_fct3.remote(0, 1, d="hi")
assert ray.get(x) == "0 1 hello hi"
x = keyword_fct3.remote(0, 1)
assert ray.get(x) == "0 1 hello world"
x = keyword_fct3.remote(a=0, b=1)
assert ray.get(x) == "0 1 hello world"
# Check that we cannot pass invalid keyword arguments to functions.
@ray.remote
def f1():
return
@ray.remote
def f2(x, y=0, z=0):
return
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f1.remote(3)
with pytest.raises(Exception):
f1.remote(x=3)
with pytest.raises(Exception):
f2.remote(0, w=0)
with pytest.raises(Exception):
f2.remote(3, x=3)
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f2.remote(1, 2, 3, 4)
@ray.remote
def f3(x):
return x
assert ray.get(f3.remote(4)) == 4
def test_variable_number_of_args(shutdown_only):
@ray.remote
def varargs_fct1(*a):
return " ".join(map(str, a))
@ray.remote
def varargs_fct2(a, *b):
return " ".join(map(str, b))
try:
@ray.remote
def kwargs_throw_exception(**c):
return ()
kwargs_exception_thrown = False
except Exception:
kwargs_exception_thrown = True
ray.init(num_cpus=1)
x = varargs_fct1.remote(0, 1, 2)
assert ray.get(x) == "0 1 2"
x = varargs_fct2.remote(0, 1, 2)
assert ray.get(x) == "1 2"
assert kwargs_exception_thrown
@ray.remote
def f1(*args):
return args
@ray.remote
def f2(x, y, *args):
return x, y, args
assert ray.get(f1.remote()) == ()
assert ray.get(f1.remote(1)) == (1, )
assert ray.get(f1.remote(1, 2, 3)) == (1, 2, 3)
with pytest.raises(Exception):
f2.remote()
with pytest.raises(Exception):
f2.remote(1)
assert ray.get(f2.remote(1, 2)) == (1, 2, ())
assert ray.get(f2.remote(1, 2, 3)) == (1, 2, (3, ))
assert ray.get(f2.remote(1, 2, 3, 4)) == (1, 2, (3, 4))
def testNoArgs(self):
@ray.remote
def no_op():
pass
self.ray_start()
ray.get(no_op.remote())
def test_defining_remote_functions(shutdown_only):
ray.init(num_cpus=3)
# Test that we can define a remote function in the shell.
@ray.remote
def f(x):
return x + 1
assert ray.get(f.remote(0)) == 1
# Test that we can redefine the remote function.
@ray.remote
def f(x):
return x + 10
while True:
val = ray.get(f.remote(0))
assert val in [1, 10]
if val == 10:
break
else:
logger.info("Still using old definition of f, trying again.")
# Test that we can close over plain old data.
data = [
np.zeros([3, 5]), (1, 2, "a"), [0.0, 1.0, 1 << 62], 1 << 60, {
"a": np.zeros(3)
}
]
@ray.remote
def g():
return data
ray.get(g.remote())
# Test that we can close over modules.
@ray.remote
def h():
return np.zeros([3, 5])
assert np.alltrue(ray.get(h.remote()) == np.zeros([3, 5]))
@ray.remote
def j():
return time.time()
ray.get(j.remote())
# Test that we can define remote functions that call other remote
# functions.
@ray.remote
def k(x):
return x + 1
@ray.remote
def k2(x):
return ray.get(k.remote(x))
@ray.remote
def m(x):
return ray.get(k2.remote(x))
assert ray.get(k.remote(1)) == 2
assert ray.get(k2.remote(1)) == 2
assert ray.get(m.remote(1)) == 2
def test_submit_api(shutdown_only):
ray.init(num_cpus=2, num_gpus=1, resources={"Custom": 1})
@ray.remote
def f(n):
return list(range(n))
@ray.remote
def g():
return ray.get_gpu_ids()
assert f._remote([0], num_return_vals=0) is None
id1 = f._remote(args=[1], num_return_vals=1)
assert ray.get(id1) == [0]
id1, id2 = f._remote(args=[2], num_return_vals=2)
assert ray.get([id1, id2]) == [0, 1]
id1, id2, id3 = f._remote(args=[3], num_return_vals=3)
assert ray.get([id1, id2, id3]) == [0, 1, 2]
assert ray.get(
g._remote(args=[], num_cpus=1, num_gpus=1,
resources={"Custom": 1})) == [0]
infeasible_id = g._remote(args=[], resources={"NonexistentCustom": 1})
assert ray.get(g._remote()) == []
ready_ids, remaining_ids = ray.wait([infeasible_id], timeout=0.05)
assert len(ready_ids) == 0
assert len(remaining_ids) == 1
@ray.remote
class Actor(object):
def __init__(self, x, y=0):
self.x = x
self.y = y
def method(self, a, b=0):
return self.x, self.y, a, b
def gpu_ids(self):
return ray.get_gpu_ids()
@ray.remote
class Actor2(object):
def __init__(self):
pass
def method(self):
pass
a = Actor._remote(
args=[0], kwargs={"y": 1}, num_gpus=1, resources={"Custom": 1})
a2 = Actor2._remote()
ray.get(a2.method._remote())
id1, id2, id3, id4 = a.method._remote(
args=["test"], kwargs={"b": 2}, num_return_vals=4)
assert ray.get([id1, id2, id3, id4]) == [0, 1, "test", 2]
def test_many_fractional_resources(shutdown_only):
ray.init(num_cpus=2, num_gpus=2, resources={"Custom": 2})
@ray.remote
def g():
return 1
@ray.remote
def f(block, accepted_resources):
true_resources = {
resource: value[0][1]
for resource, value in ray.get_resource_ids().items()
}
if block:
ray.get(g.remote())
return true_resources == accepted_resources
# Check that the resource are assigned correctly.
result_ids = []
for rand1, rand2, rand3 in np.random.uniform(size=(100, 3)):
resource_set = {"CPU": int(rand1 * 10000) / 10000}
result_ids.append(f._remote([False, resource_set], num_cpus=rand1))
resource_set = {"CPU": 1, "GPU": int(rand1 * 10000) / 10000}
result_ids.append(f._remote([False, resource_set], num_gpus=rand1))
resource_set = {"CPU": 1, "Custom": int(rand1 * 10000) / 10000}
result_ids.append(
f._remote([False, resource_set], resources={"Custom": rand1}))
resource_set = {
"CPU": int(rand1 * 10000) / 10000,
"GPU": int(rand2 * 10000) / 10000,
"Custom": int(rand3 * 10000) / 10000
}
result_ids.append(
f._remote(
[False, resource_set],
num_cpus=rand1,
num_gpus=rand2,
resources={"Custom": rand3}))
result_ids.append(
f._remote(
[True, resource_set],
num_cpus=rand1,
num_gpus=rand2,
resources={"Custom": rand3}))
assert all(ray.get(result_ids))
# Check that the available resources at the end are the same as the
# beginning.
stop_time = time.time() + 10
correct_available_resources = False
while time.time() < stop_time:
if ray.available_resources() == {
"CPU": 2.0,
"GPU": 2.0,
"Custom": 2.0,
}:
correct_available_resources = True
break
if not correct_available_resources:
assert False, "Did not get correct available resources."
def test_get_multiple(ray_start_regular):
object_ids = [ray.put(i) for i in range(10)]
assert ray.get(object_ids) == list(range(10))
# Get a random choice of object IDs with duplicates.
indices = list(np.random.choice(range(10), 5))
indices += indices
results = ray.get([object_ids[i] for i in indices])
assert results == indices
def test_get_multiple_experimental(ray_start_regular):
object_ids = [ray.put(i) for i in range(10)]
object_ids_tuple = tuple(object_ids)
assert ray.experimental.get(object_ids_tuple) == list(range(10))
object_ids_nparray = np.array(object_ids)
assert ray.experimental.get(object_ids_nparray) == list(range(10))
def test_get_dict(ray_start_regular):
d = {str(i): ray.put(i) for i in range(5)}
for i in range(5, 10):
d[str(i)] = i
result = ray.experimental.get(d)
expected = {str(i): i for i in range(10)}
assert result == expected
def test_wait(ray_start_regular):
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
ready_ids, remaining_ids = ray.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
ready_ids, remaining_ids = ray.wait(objectids, num_returns=4)
assert set(ready_ids) == set(objectids)
assert remaining_ids == []
objectids = [f.remote(0.5), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=1.75, num_returns=4)
assert time.time() - start_time < 2
assert len(ready_ids) == 3
assert len(remaining_ids) == 1
ray.wait(objectids)
objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=5.0)
assert time.time() - start_time < 5
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
# Verify that calling wait with duplicate object IDs throws an
# exception.
x = ray.put(1)
with pytest.raises(Exception):
ray.wait([x, x])
# Make sure it is possible to call wait with an empty list.
ready_ids, remaining_ids = ray.wait([])
assert ready_ids == []
assert remaining_ids == []
# Test semantics of num_returns with no timeout.
oids = [ray.put(i) for i in range(10)]
(found, rest) = ray.wait(oids, num_returns=2)
assert len(found) == 2
assert len(rest) == 8
# Verify that incorrect usage raises a TypeError.
x = ray.put(1)
with pytest.raises(TypeError):
ray.wait(x)
with pytest.raises(TypeError):
ray.wait(1)
with pytest.raises(TypeError):
ray.wait([1])
def test_wait_iterables(ray_start_regular):
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = (f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5))
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
objectids = np.array(
[f.remote(1.0),
f.remote(0.5),
f.remote(0.5),
f.remote(0.5)])
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
def test_multiple_waits_and_gets(shutdown_only):
# It is important to use three workers here, so that the three tasks
# launched in this experiment can run at the same time.
ray.init(num_cpus=3)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
@ray.remote
def g(l):
# The argument l should be a list containing one object ID.
ray.wait([l[0]])
@ray.remote
def h(l):
# The argument l should be a list containing one object ID.
ray.get(l[0])
# Make sure that multiple wait requests involving the same object ID
# all return.
x = f.remote(1)
ray.get([g.remote([x]), g.remote([x])])
# Make sure that multiple get requests involving the same object ID all
# return.
x = f.remote(1)
ray.get([h.remote([x]), h.remote([x])])
def test_caching_functions_to_run(shutdown_only):
# Test that we export functions to run on all workers before the driver
# is connected.
def f(worker_info):
sys.path.append(1)
ray.worker.global_worker.run_function_on_all_workers(f)
def f(worker_info):
sys.path.append(2)
ray.worker.global_worker.run_function_on_all_workers(f)
def g(worker_info):
sys.path.append(3)
ray.worker.global_worker.run_function_on_all_workers(g)
def f(worker_info):
sys.path.append(4)
ray.worker.global_worker.run_function_on_all_workers(f)
ray.init(num_cpus=1)
@ray.remote
def get_state():
time.sleep(1)
return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1]
res1 = get_state.remote()
res2 = get_state.remote()
assert ray.get(res1) == (1, 2, 3, 4)
assert ray.get(res2) == (1, 2, 3, 4)
# Clean up the path on the workers.
def f(worker_info):
sys.path.pop()
sys.path.pop()
sys.path.pop()
sys.path.pop()
ray.worker.global_worker.run_function_on_all_workers(f)
def test_running_function_on_all_workers(ray_start_regular):
def f(worker_info):
sys.path.append("fake_directory")
ray.worker.global_worker.run_function_on_all_workers(f)
@ray.remote
def get_path1():
return sys.path
assert "fake_directory" == ray.get(get_path1.remote())[-1]
def f(worker_info):
sys.path.pop(-1)
ray.worker.global_worker.run_function_on_all_workers(f)
# Create a second remote function to guarantee that when we call
# get_path2.remote(), the second function to run will have been run on
# the worker.
@ray.remote
def get_path2():
return sys.path
assert "fake_directory" not in ray.get(get_path2.remote())
def test_profiling_api(ray_start_2_cpus):
@ray.remote
def f():
with ray.profile(
"custom_event",
extra_data={"name": "custom name"}) as ray_prof:
ray_prof.set_attribute("key", "value")
ray.put(1)
object_id = f.remote()
ray.wait([object_id])
ray.get(object_id)
# Wait until all of the profiling information appears in the profile
# table.
timeout_seconds = 20
start_time = time.time()
while True:
if time.time() - start_time > timeout_seconds:
raise Exception("Timed out while waiting for information in "
"profile table.")
profile_data = ray.timeline()
event_types = {event["cat"] for event in profile_data}
expected_types = [
"worker_idle",
"task",
"task:deserialize_arguments",
"task:execute",
"task:store_outputs",
"wait_for_function",
"ray.get",
"ray.put",
"ray.wait",
"submit_task",
"fetch_and_run_function",
"register_remote_function",
"custom_event", # This is the custom one from ray.profile.
]
if all(expected_type in event_types
for expected_type in expected_types):
break
def test_wait_cluster(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=1, resources={"RemoteResource": 1})
cluster.add_node(num_cpus=1, resources={"RemoteResource": 1})
ray.init(redis_address=cluster.redis_address)
@ray.remote(resources={"RemoteResource": 1})
def f():
return
# Make sure we have enough workers on the remote nodes to execute some
# tasks.
tasks = [f.remote() for _ in range(10)]
start = time.time()
ray.get(tasks)
end = time.time()
# Submit some more tasks that can only be executed on the remote nodes.
tasks = [f.remote() for _ in range(10)]
# Sleep for a bit to let the tasks finish.
time.sleep((end - start) * 2)
_, unready = ray.wait(tasks, num_returns=len(tasks), timeout=0)
# All remote tasks should have finished.
assert len(unready) == 0
def test_object_transfer_dump(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 3
for i in range(num_nodes):
cluster.add_node(resources={str(i): 1}, object_store_memory=10**9)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f(x):
return
# These objects will live on different nodes.
object_ids = [
f._remote(args=[1], resources={str(i): 1}) for i in range(num_nodes)
]
# Broadcast each object from each machine to each other machine.
for object_id in object_ids:
ray.get([
f._remote(args=[object_id], resources={str(i): 1})
for i in range(num_nodes)
])
# The profiling information only flushes once every second.
time.sleep(1.1)
transfer_dump = ray.object_transfer_timeline()
# Make sure the transfer dump can be serialized with JSON.
json.loads(json.dumps(transfer_dump))
assert len(transfer_dump) >= num_nodes**2
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_receive"
}) == num_nodes
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_send"
}) == num_nodes
def test_identical_function_names(ray_start_regular):
# Define a bunch of remote functions and make sure that we don't
# accidentally call an older version.
num_calls = 200
@ray.remote
def f():
return 1
results1 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 2
results2 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 3
results3 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 4
results4 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 5
results5 = [f.remote() for _ in range(num_calls)]
assert ray.get(results1) == num_calls * [1]
assert ray.get(results2) == num_calls * [2]
assert ray.get(results3) == num_calls * [3]
assert ray.get(results4) == num_calls * [4]
assert ray.get(results5) == num_calls * [5]
@ray.remote
def g():
return 1
@ray.remote # noqa: F811
def g():
return 2
@ray.remote # noqa: F811
def g():
return 3
@ray.remote # noqa: F811
def g():
return 4
@ray.remote # noqa: F811
def g():
return 5
result_values = ray.get([g.remote() for _ in range(num_calls)])
assert result_values == num_calls * [5]
def test_illegal_api_calls(ray_start_regular):
# Verify that we cannot call put on an ObjectID.
x = ray.put(1)
with pytest.raises(Exception):
ray.put(x)
# Verify that we cannot call get on a regular value.
with pytest.raises(Exception):
ray.get(3)
# TODO(hchen): This test currently doesn't work in Python 2. This is likely
# because plasma client isn't thread-safe. This needs to be fixed from the
# Arrow side. See #4107 for relevant discussions.
@pytest.mark.skipif(six.PY2, reason="Doesn't work in Python 2.")
def test_multithreading(ray_start_2_cpus):
# This test requires at least 2 CPUs to finish since the worker does not
# release resources when joining the threads.
def run_test_in_multi_threads(test_case, num_threads=10, num_repeats=25):
"""A helper function that runs test cases in multiple threads."""
def wrapper():
for _ in range(num_repeats):
test_case()
time.sleep(random.randint(0, 10) / 1000.0)
return "ok"
executor = ThreadPoolExecutor(max_workers=num_threads)
futures = [executor.submit(wrapper) for _ in range(num_threads)]
for future in futures:
assert future.result() == "ok"
@ray.remote
def echo(value, delay_ms=0):
if delay_ms > 0:
time.sleep(delay_ms / 1000.0)
return value
@ray.remote
class Echo(object):
def echo(self, value):
return value
def test_api_in_multi_threads():
"""Test using Ray api in multiple threads."""
# Test calling remote functions in multiple threads.
def test_remote_call():
value = random.randint(0, 1000000)
result = ray.get(echo.remote(value))
assert value == result
run_test_in_multi_threads(test_remote_call)
# Test multiple threads calling one actor.
actor = Echo.remote()
def test_call_actor():
value = random.randint(0, 1000000)
result = ray.get(actor.echo.remote(value))
assert value == result
run_test_in_multi_threads(test_call_actor)
# Test put and get.
def test_put_and_get():
value = random.randint(0, 1000000)
result = ray.get(ray.put(value))
assert value == result
run_test_in_multi_threads(test_put_and_get)
# Test multiple threads waiting for objects.
num_wait_objects = 10
objects = [
echo.remote(i, delay_ms=10) for i in range(num_wait_objects)
]
def test_wait():
ready, _ = ray.wait(
objects,
num_returns=len(objects),
timeout=1000.0,
)
assert len(ready) == num_wait_objects
assert ray.get(ready) == list(range(num_wait_objects))
run_test_in_multi_threads(test_wait, num_repeats=1)
# Run tests in a driver.
test_api_in_multi_threads()
# Run tests in a worker.
@ray.remote
def run_tests_in_worker():
test_api_in_multi_threads()
return "ok"
assert ray.get(run_tests_in_worker.remote()) == "ok"
# Test actor that runs background threads.
@ray.remote
class MultithreadedActor(object):
def __init__(self):
self.lock = threading.Lock()
self.thread_results = []
def background_thread(self, wait_objects):
try:
# Test wait
ready, _ = ray.wait(
wait_objects,
num_returns=len(wait_objects),
timeout=1000.0,
)
assert len(ready) == len(wait_objects)
for _ in range(20):
num = 10
# Test remote call
results = [echo.remote(i) for i in range(num)]
assert ray.get(results) == list(range(num))
# Test put and get
objects = [ray.put(i) for i in range(num)]
assert ray.get(objects) == list(range(num))
time.sleep(random.randint(0, 10) / 1000.0)
except Exception as e:
with self.lock:
self.thread_results.append(e)
else:
with self.lock:
self.thread_results.append("ok")
def spawn(self):
wait_objects = [echo.remote(i, delay_ms=10) for i in range(10)]
self.threads = [
threading.Thread(
target=self.background_thread, args=(wait_objects, ))
for _ in range(20)
]
[thread.start() for thread in self.threads]
def join(self):
[thread.join() for thread in self.threads]
assert self.thread_results == ["ok"] * len(self.threads)
return "ok"
actor = MultithreadedActor.remote()
actor.spawn.remote()
ray.get(actor.join.remote()) == "ok"
def test_free_objects_multi_node(ray_start_cluster):
# This test will do following:
# 1. Create 3 raylets that each hold an actor.
# 2. Each actor creates an object which is the deletion target.
# 3. Wait 0.1 second for the objects to be deleted.
# 4. Check that the deletion targets have been deleted.
# Caution: if remote functions are used instead of actor methods,
# one raylet may create more than one worker to execute the
# tasks, so the flushing operations may be executed in different
# workers and the plasma client holding the deletion target
# may not be flushed.
cluster = ray_start_cluster
config = json.dumps({"object_manager_repeated_push_delay_ms": 1000})
for i in range(3):
cluster.add_node(
num_cpus=1,
resources={"Custom{}".format(i): 1},
_internal_config=config)
ray.init(redis_address=cluster.redis_address)
class RawActor(object):
def get(self):
return ray.worker.global_worker.plasma_client.store_socket_name
ActorOnNode0 = ray.remote(resources={"Custom0": 1})(RawActor)
ActorOnNode1 = ray.remote(resources={"Custom1": 1})(RawActor)
ActorOnNode2 = ray.remote(resources={"Custom2": 1})(RawActor)
def create(actors):
a = actors[0].get.remote()
b = actors[1].get.remote()
c = actors[2].get.remote()
(l1, l2) = ray.wait([a, b, c], num_returns=3)
assert len(l1) == 3
assert len(l2) == 0
return (a, b, c)
def run_one_test(actors, local_only, delete_creating_tasks):
(a, b, c) = create(actors)
# The three objects should be generated on different object stores.
assert ray.get(a) != ray.get(b)
assert ray.get(a) != ray.get(c)
assert ray.get(c) != ray.get(b)
ray.internal.free(
[a, b, c],
local_only=local_only,
delete_creating_tasks=delete_creating_tasks)
# Wait for the objects to be deleted.
time.sleep(0.1)
return (a, b, c)
actors = [
ActorOnNode0.remote(),
ActorOnNode1.remote(),
ActorOnNode2.remote()
]
# Case 1: run this local_only=False. All 3 objects will be deleted.
(a, b, c) = run_one_test(actors, False, False)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=1)
# All the objects are deleted.
assert len(l1) == 0
assert len(l2) == 3
# Case 2: run this local_only=True. Only 1 object will be deleted.
(a, b, c) = run_one_test(actors, True, False)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=3)
# One object is deleted and 2 objects are not.
assert len(l1) == 2
assert len(l2) == 1
# The deleted object will have the same store with the driver.
local_return = ray.worker.global_worker.plasma_client.store_socket_name
for object_id in l1:
assert ray.get(object_id) != local_return
# Case3: These cases test the deleting creating tasks for the object.
(a, b, c) = run_one_test(actors, False, False)
task_table = ray.tasks()
for obj in [a, b, c]:
assert ray._raylet.compute_task_id(obj).hex() in task_table
(a, b, c) = run_one_test(actors, False, True)
task_table = ray.tasks()
for obj in [a, b, c]:
assert ray._raylet.compute_task_id(obj).hex() not in task_table
def test_local_mode(shutdown_only):
@ray.remote
def local_mode_f():
return np.array([0, 0])
@ray.remote
def local_mode_g(x):
x[0] = 1
return x
ray.init(local_mode=True)
@ray.remote
def f():
return np.ones([3, 4, 5])
xref = f.remote()
# Remote functions should return ObjectIDs.
assert isinstance(xref, ray.ObjectID)
assert np.alltrue(ray.get(xref) == np.ones([3, 4, 5]))
y = np.random.normal(size=[11, 12])
# Check that ray.get(ray.put) is the identity.
assert np.alltrue(y == ray.get(ray.put(y)))
# Make sure objects are immutable, this example is why we need to copy
# arguments before passing them into remote functions in python mode
aref = local_mode_f.remote()
assert np.alltrue(ray.get(aref) == np.array([0, 0]))
bref = local_mode_g.remote(ray.get(aref))
# Make sure local_mode_g does not mutate aref.
assert np.alltrue(ray.get(aref) == np.array([0, 0]))
assert np.alltrue(ray.get(bref) == np.array([1, 0]))
# wait should return the first num_returns values passed in as the
# first list and the remaining values as the second list
num_returns = 5
object_ids = [ray.put(i) for i in range(20)]
ready, remaining = ray.wait(
object_ids, num_returns=num_returns, timeout=None)
assert ready == object_ids[:num_returns]
assert remaining == object_ids[num_returns:]
# Check that ray.put() and ray.internal.free() work in local mode.
v1 = np.ones(10)
v2 = np.zeros(10)
k1 = ray.put(v1)
assert np.alltrue(v1 == ray.get(k1))
k2 = ray.put(v2)
assert np.alltrue(v2 == ray.get(k2))
ray.internal.free([k1, k2])
with pytest.raises(Exception):
ray.get(k1)
with pytest.raises(Exception):
ray.get(k2)
# Should fail silently.
ray.internal.free([k1, k2])
# Test actors in LOCAL_MODE.
@ray.remote
class LocalModeTestClass(object):
def __init__(self, array):
self.array = array
def set_array(self, array):
self.array = array
def get_array(self):
return self.array
def modify_and_set_array(self, array):
array[0] = -1
self.array = array
@ray.method(num_return_vals=3)
def returns_multiple(self):
return 1, 2, 3
test_actor = LocalModeTestClass.remote(np.arange(10))
obj = test_actor.get_array.remote()
assert isinstance(obj, ray.ObjectID)
assert np.alltrue(ray.get(obj) == np.arange(10))
test_array = np.arange(10)
# Remote actor functions should not mutate arguments
test_actor.modify_and_set_array.remote(test_array)
assert np.alltrue(test_array == np.arange(10))
# Remote actor functions should keep state
test_array[0] = -1
assert np.alltrue(test_array == ray.get(test_actor.get_array.remote()))
# Check that actor handles work in local mode.
@ray.remote
def use_actor_handle(handle):
array = np.ones(10)
handle.set_array.remote(array)
assert np.alltrue(array == ray.get(handle.get_array.remote()))
ray.get(use_actor_handle.remote(test_actor))
# Check that exceptions are deferred until ray.get().
exception_str = "test_basic remote task exception"
@ray.remote
def throws():
raise Exception(exception_str)
obj = throws.remote()
with pytest.raises(Exception, match=exception_str):
ray.get(obj)
# Check that multiple return values are handled properly.
@ray.remote(num_return_vals=3)
def returns_multiple():
return 1, 2, 3
obj1, obj2, obj3 = returns_multiple.remote()
assert ray.get(obj1) == 1
assert ray.get(obj2) == 2
assert ray.get(obj3) == 3
assert ray.get([obj1, obj2, obj3]) == [1, 2, 3]
obj1, obj2, obj3 = test_actor.returns_multiple.remote()
assert ray.get(obj1) == 1
assert ray.get(obj2) == 2
assert ray.get(obj3) == 3
assert ray.get([obj1, obj2, obj3]) == [1, 2, 3]
@ray.remote(num_return_vals=2)
def returns_multiple_throws():
raise Exception(exception_str)
obj1, obj2 = returns_multiple_throws.remote()
with pytest.raises(Exception, match=exception_str):
ray.get(obj)
ray.get(obj1)
with pytest.raises(Exception, match=exception_str):
ray.get(obj2)
def test_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=2)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
time_buffer = 2
# At most 10 copies of this can run at once.
@ray.remote(num_cpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(10)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(11)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_cpus=3)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_gpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(2)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_multi_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=10)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
@ray.remote(num_cpus=1, num_gpus=9)
def f(n):
time.sleep(n)
@ray.remote(num_cpus=9, num_gpus=1)
def g(n):
time.sleep(n)
time_buffer = 2
start_time = time.time()
ray.get([f.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5), g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_gpu_ids(shutdown_only):
num_gpus = 10
ray.init(num_cpus=10, num_gpus=num_gpus)
def get_gpu_ids(num_gpus_per_worker):
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == num_gpus_per_worker
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
f0 = ray.remote(num_gpus=0)(lambda: get_gpu_ids(0))
f1 = ray.remote(num_gpus=1)(lambda: get_gpu_ids(1))
f2 = ray.remote(num_gpus=2)(lambda: get_gpu_ids(2))
f4 = ray.remote(num_gpus=4)(lambda: get_gpu_ids(4))
f5 = ray.remote(num_gpus=5)(lambda: get_gpu_ids(5))
# Wait for all workers to start up.
@ray.remote
def f():
time.sleep(0.1)
return os.getpid()
start_time = time.time()
while True:
if len(set(ray.get([f.remote() for _ in range(10)]))) == 10:
break
if time.time() > start_time + 10:
raise Exception("Timed out while waiting for workers to start "
"up.")
list_of_ids = ray.get([f0.remote() for _ in range(10)])
assert list_of_ids == 10 * [[]]
list_of_ids = ray.get([f1.remote() for _ in range(10)])
set_of_ids = {tuple(gpu_ids) for gpu_ids in list_of_ids}
assert set_of_ids == {(i, ) for i in range(10)}
list_of_ids = ray.get([f2.remote(), f4.remote(), f4.remote()])
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
assert set(all_ids) == set(range(10))
# There are only 10 GPUs, and each task uses 5 GPUs, so there should only
# be 2 tasks scheduled at a given time.
t1 = time.time()
ray.get([f5.remote() for _ in range(20)])
assert time.time() - t1 >= 10 * 0.1
# Test that actors have CUDA_VISIBLE_DEVICES set properly.
@ray.remote
class Actor0(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
@ray.remote(num_gpus=1)
class Actor1(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
a0 = Actor0.remote()
ray.get(a0.test.remote())
a1 = Actor1.remote()
ray.get(a1.test.remote())
def test_zero_cpus(shutdown_only):
ray.init(num_cpus=0)
# We should be able to execute a task that requires 0 CPU resources.
@ray.remote(num_cpus=0)
def f():
return 1
ray.get(f.remote())
# We should be able to create an actor that requires 0 CPU resources.
@ray.remote(num_cpus=0)
class Actor(object):
def method(self):
pass
a = Actor.remote()
x = a.method.remote()
ray.get(x)
def test_zero_cpus_actor(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
cluster.add_node(num_cpus=2)
ray.init(redis_address=cluster.redis_address)
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote
class Foo(object):
def method(self):
return ray.worker.global_worker.plasma_client.store_socket_name
# Make sure tasks and actors run on the remote raylet.
a = Foo.remote()
assert ray.get(a.method.remote()) != local_plasma
def test_fractional_resources(shutdown_only):
ray.init(num_cpus=6, num_gpus=3, resources={"Custom": 1})
@ray.remote(num_gpus=0.5)
class Foo1(object):
def method(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
return gpu_ids[0]
foos = [Foo1.remote() for _ in range(6)]
gpu_ids = ray.get([f.method.remote() for f in foos])
for i in range(3):
assert gpu_ids.count(i) == 2
del foos
@ray.remote
class Foo2(object):
def method(self):
pass
# Create an actor that requires 0.7 of the custom resource.
f1 = Foo2._remote([], {}, resources={"Custom": 0.7})
ray.get(f1.method.remote())
# Make sure that we cannot create an actor that requires 0.7 of the
# custom resource. TODO(rkn): Re-enable this once ray.wait is
# implemented.
f2 = Foo2._remote([], {}, resources={"Custom": 0.7})
ready, _ = ray.wait([f2.method.remote()], timeout=0.5)
assert len(ready) == 0
# Make sure we can start an actor that requries only 0.3 of the custom
# resource.
f3 = Foo2._remote([], {}, resources={"Custom": 0.3})
ray.get(f3.method.remote())
del f1, f3
# Make sure that we get exceptions if we submit tasks that require a
# fractional number of resources greater than 1.
@ray.remote(num_cpus=1.5)
def test():
pass
with pytest.raises(ValueError):
test.remote()
with pytest.raises(ValueError):
Foo2._remote([], {}, resources={"Custom": 1.5})
def test_multiple_raylets(ray_start_cluster):
# This test will define a bunch of tasks that can only be assigned to
# specific raylets, and we will check that they are assigned
# to the correct raylets.
cluster = ray_start_cluster
cluster.add_node(num_cpus=11, num_gpus=0)
cluster.add_node(num_cpus=5, num_gpus=5)
cluster.add_node(num_cpus=10, num_gpus=1)
ray.init(redis_address=cluster.redis_address)
cluster.wait_for_nodes()
# Define a bunch of remote functions that all return the socket name of
# the plasma store. Since there is a one-to-one correspondence between
# plasma stores and raylets (at least right now), this can be
# used to identify which raylet the task was assigned to.
# This must be run on the zeroth raylet.
@ray.remote(num_cpus=11)
def run_on_0():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the first raylet.
@ray.remote(num_gpus=2)
def run_on_1():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the second raylet.
@ray.remote(num_cpus=6, num_gpus=1)
def run_on_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This can be run anywhere.
@ray.remote(num_cpus=0, num_gpus=0)
def run_on_0_1_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the first or second raylet.
@ray.remote(num_gpus=1)
def run_on_1_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the zeroth or second raylet.
@ray.remote(num_cpus=8)
def run_on_0_2():
return ray.worker.global_worker.plasma_client.store_socket_name
def run_lots_of_tasks():
names = []
results = []
for i in range(100):
index = np.random.randint(6)
if index == 0:
names.append("run_on_0")
results.append(run_on_0.remote())
elif index == 1:
names.append("run_on_1")
results.append(run_on_1.remote())
elif index == 2:
names.append("run_on_2")
results.append(run_on_2.remote())
elif index == 3:
names.append("run_on_0_1_2")
results.append(run_on_0_1_2.remote())
elif index == 4:
names.append("run_on_1_2")
results.append(run_on_1_2.remote())
elif index == 5:
names.append("run_on_0_2")
results.append(run_on_0_2.remote())
return names, results
client_table = ray.nodes()
store_names = []
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"].get("GPU", 0) == 0
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"].get("GPU", 0) == 5
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"].get("GPU", 0) == 1
]
assert len(store_names) == 3
def validate_names_and_results(names, results):
for name, result in zip(names, ray.get(results)):
if name == "run_on_0":
assert result in [store_names[0]]
elif name == "run_on_1":
assert result in [store_names[1]]
elif name == "run_on_2":
assert result in [store_names[2]]
elif name == "run_on_0_1_2":
assert (result in [
store_names[0], store_names[1], store_names[2]
])
elif name == "run_on_1_2":
assert result in [store_names[1], store_names[2]]
elif name == "run_on_0_2":
assert result in [store_names[0], store_names[2]]
else:
raise Exception("This should be unreachable.")
assert set(ray.get(results)) == set(store_names)
names, results = run_lots_of_tasks()
validate_names_and_results(names, results)
# Make sure the same thing works when this is nested inside of a task.
@ray.remote
def run_nested1():
names, results = run_lots_of_tasks()
return names, results
@ray.remote
def run_nested2():
names, results = ray.get(run_nested1.remote())
return names, results
names, results = ray.get(run_nested2.remote())
validate_names_and_results(names, results)
def test_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=3, resources={"CustomResource": 0})
cluster.add_node(num_cpus=3, resources={"CustomResource": 1})
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource": 1})
def h():
ray.get([f.remote() for _ in range(5)])
return ray.worker.global_worker.plasma_client.store_socket_name
# The f tasks should be scheduled on both raylets.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# The g tasks should be scheduled only on the second raylet.
raylet_ids = set(ray.get([g.remote() for _ in range(50)]))
assert len(raylet_ids) == 1
assert list(raylet_ids)[0] != local_plasma
# Make sure that resource bookkeeping works when a task that uses a
# custom resources gets blocked.
ray.get([h.remote() for _ in range(5)])
def test_two_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 1,
"CustomResource2": 2
})
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 3,
"CustomResource2": 4
})
ray.init(redis_address=cluster.redis_address)
@ray.remote(resources={"CustomResource1": 1})
def f():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource2": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource1": 1, "CustomResource2": 3})
def h():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource1": 4})
def j():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource3": 1})
def k():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
# The f and g tasks should be scheduled on both raylets.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
assert len(set(ray.get([g.remote() for _ in range(50)]))) == 2
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# The h tasks should be scheduled only on the second raylet.
raylet_ids = set(ray.get([h.remote() for _ in range(50)]))
assert len(raylet_ids) == 1
assert list(raylet_ids)[0] != local_plasma
# Make sure that tasks with unsatisfied custom resource requirements do
# not get scheduled.
ready_ids, remaining_ids = ray.wait([j.remote(), k.remote()], timeout=0.5)
assert ready_ids == []
def test_many_custom_resources(shutdown_only):
num_custom_resources = 10000
total_resources = {
str(i): np.random.randint(1, 7)
for i in range(num_custom_resources)
}
ray.init(num_cpus=5, resources=total_resources)
def f():
return 1
remote_functions = []
for _ in range(20):
num_resources = np.random.randint(0, num_custom_resources + 1)
permuted_resources = np.random.permutation(
num_custom_resources)[:num_resources]
random_resources = {
str(i): total_resources[str(i)]
for i in permuted_resources
}
remote_function = ray.remote(resources=random_resources)(f)
remote_functions.append(remote_function)
remote_functions.append(ray.remote(f))
remote_functions.append(ray.remote(resources=total_resources)(f))
results = []
for remote_function in remote_functions:
results.append(remote_function.remote())
results.append(remote_function.remote())
results.append(remote_function.remote())
ray.get(results)
# TODO: 5 retry attempts may be too little for Travis and we may need to
# increase it if this test begins to be flaky on Travis.
def test_zero_capacity_deletion_semantics(shutdown_only):
ray.init(num_cpus=2, num_gpus=1, resources={"test_resource": 1})
def test():
resources = ray.available_resources()
MAX_RETRY_ATTEMPTS = 5
retry_count = 0
while resources and retry_count < MAX_RETRY_ATTEMPTS:
time.sleep(0.1)
resources = ray.available_resources()
retry_count += 1
if retry_count >= MAX_RETRY_ATTEMPTS:
raise RuntimeError(
"Resources were available even after five retries.")
return resources
function = ray.remote(
num_cpus=2, num_gpus=1, resources={"test_resource": 1})(test)
cluster_resources = ray.get(function.remote())
# All cluster resources should be utilized and
# cluster_resources must be empty
assert cluster_resources == {}
@pytest.fixture
def save_gpu_ids_shutdown_only():
# Record the curent value of this environment variable so that we can
# reset it after the test.
original_gpu_ids = os.environ.get("CUDA_VISIBLE_DEVICES", None)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
# Reset the environment variable.
if original_gpu_ids is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = original_gpu_ids
else:
del os.environ["CUDA_VISIBLE_DEVICES"]
def test_specific_gpus(save_gpu_ids_shutdown_only):
allowed_gpu_ids = [4, 5, 6]
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
[str(i) for i in allowed_gpu_ids])
ray.init(num_gpus=3)
@ray.remote(num_gpus=1)
def f():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert gpu_ids[0] in allowed_gpu_ids
@ray.remote(num_gpus=2)
def g():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert gpu_ids[0] in allowed_gpu_ids
assert gpu_ids[1] in allowed_gpu_ids
ray.get([f.remote() for _ in range(100)])
ray.get([g.remote() for _ in range(100)])
def test_blocking_tasks(ray_start_regular):
@ray.remote
def f(i, j):
return (i, j)
@ray.remote
def g(i):
# Each instance of g submits and blocks on the result of another
# remote task.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.get(object_ids)
@ray.remote
def h(i):
# Each instance of g submits and blocks on the result of another
# remote task using ray.wait.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.wait(object_ids, num_returns=len(object_ids))
ray.get([h.remote(i) for i in range(4)])
@ray.remote
def _sleep(i):
time.sleep(0.01)
return (i)
@ray.remote
def sleep():
# Each instance of sleep submits and blocks on the result of
# another remote task, which takes some time to execute.
ray.get([_sleep.remote(i) for i in range(10)])
ray.get(sleep.remote())
def test_max_call_tasks(ray_start_regular):
@ray.remote(max_calls=1)
def f():
return os.getpid()
pid = ray.get(f.remote())
ray.tests.utils.wait_for_pid_to_exit(pid)
@ray.remote(max_calls=2)
def f():
return os.getpid()
pid1 = ray.get(f.remote())
pid2 = ray.get(f.remote())
assert pid1 == pid2
ray.tests.utils.wait_for_pid_to_exit(pid1)
def attempt_to_load_balance(remote_function,
args,
total_tasks,
num_nodes,
minimum_count,
num_attempts=100):
attempts = 0
while attempts < num_attempts:
locations = ray.get(
[remote_function.remote(*args) for _ in range(total_tasks)])
names = set(locations)
counts = [locations.count(name) for name in names]
logger.info("Counts are {}.".format(counts))
if (len(names) == num_nodes
and all(count >= minimum_count for count in counts)):
break
attempts += 1
assert attempts < num_attempts
def test_load_balancing(ray_start_cluster):
# This test ensures that tasks are being assigned to all raylets
# in a roughly equal manner.
cluster = ray_start_cluster
num_nodes = 3
num_cpus = 7
for _ in range(num_nodes):
cluster.add_node(num_cpus=num_cpus)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f():
time.sleep(0.01)
return ray.worker.global_worker.plasma_client.store_socket_name
attempt_to_load_balance(f, [], 100, num_nodes, 10)
attempt_to_load_balance(f, [], 1000, num_nodes, 100)
def test_load_balancing_with_dependencies(ray_start_cluster):
# This test ensures that tasks are being assigned to all raylets in a
# roughly equal manner even when the tasks have dependencies.
cluster = ray_start_cluster
num_nodes = 3
for _ in range(num_nodes):
cluster.add_node(num_cpus=1)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f(x):
time.sleep(0.010)
return ray.worker.global_worker.plasma_client.store_socket_name
# This object will be local to one of the raylets. Make sure
# this doesn't prevent tasks from being scheduled on other raylets.
x = ray.put(np.zeros(1000000))
attempt_to_load_balance(f, [x], 100, num_nodes, 25)
def wait_for_num_tasks(num_tasks, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.tasks()) >= num_tasks:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for global state.")
def wait_for_num_objects(num_objects, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.objects()) >= num_objects:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for global state.")
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_global_state_api(shutdown_only):
error_message = ("The ray global state API cannot be used "
"before ray.init has been called.")
with pytest.raises(Exception, match=error_message):
ray.objects()
with pytest.raises(Exception, match=error_message):
ray.tasks()
with pytest.raises(Exception, match=error_message):
ray.nodes()
with pytest.raises(Exception, match=error_message):
ray.jobs()
ray.init(num_cpus=5, num_gpus=3, resources={"CustomResource": 1})
resources = {"CPU": 5, "GPU": 3, "CustomResource": 1}
assert ray.cluster_resources() == resources
assert ray.objects() == {}
job_id = ray.utils.compute_job_id_from_driver(
ray.WorkerID(ray.worker.global_worker.worker_id))
driver_task_id = ray.worker.global_worker.current_task_id.hex()
# One task is put in the task table which corresponds to this driver.
wait_for_num_tasks(1)
task_table = ray.tasks()
assert len(task_table) == 1
assert driver_task_id == list(task_table.keys())[0]
task_spec = task_table[driver_task_id]["TaskSpec"]
nil_unique_id_hex = ray.UniqueID.nil().hex()
nil_actor_id_hex = ray.ActorID.nil().hex()
assert task_spec["TaskID"] == driver_task_id
assert task_spec["ActorID"] == nil_actor_id_hex
assert task_spec["Args"] == []
assert task_spec["JobID"] == job_id.hex()
assert task_spec["FunctionID"] == nil_unique_id_hex
assert task_spec["ReturnObjectIDs"] == []
client_table = ray.nodes()
node_ip_address = ray.worker.global_worker.node_ip_address
assert len(client_table) == 1
assert client_table[0]["NodeManagerAddress"] == node_ip_address
@ray.remote
def f(*xs):
return 1
x_id = ray.put(1)
result_id = f.remote(1, "hi", x_id)
# Wait for one additional task to complete.
wait_for_num_tasks(1 + 1)
task_table = ray.tasks()
assert len(task_table) == 1 + 1
task_id_set = set(task_table.keys())
task_id_set.remove(driver_task_id)
task_id = list(task_id_set)[0]
task_spec = task_table[task_id]["TaskSpec"]
assert task_spec["ActorID"] == nil_actor_id_hex
assert task_spec["Args"] == [1, "hi", x_id]
assert task_spec["JobID"] == job_id.hex()
assert task_spec["ReturnObjectIDs"] == [result_id]
assert task_table[task_id] == ray.tasks(task_id)
# Wait for two objects, one for the x_id and one for result_id.
wait_for_num_objects(2)
def wait_for_object_table():
timeout = 10
start_time = time.time()
while time.time() - start_time < timeout:
object_table = ray.objects()
tables_ready = (object_table[x_id]["ManagerIDs"] is not None and
object_table[result_id]["ManagerIDs"] is not None)
if tables_ready:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for object table to "
"update.")
object_table = ray.objects()
assert len(object_table) == 2
assert object_table[x_id] == ray.objects(x_id)
object_table_entry = ray.objects(result_id)
assert object_table[result_id] == object_table_entry
job_table = ray.jobs()
assert len(job_table) == 1
assert job_table[0]["JobID"] == job_id.hex()
assert job_table[0]["NodeManagerAddress"] == node_ip_address
# TODO(rkn): Pytest actually has tools for capturing stdout and stderr, so we
# should use those, but they seem to conflict with Ray's use of faulthandler.
class CaptureOutputAndError(object):
"""Capture stdout and stderr of some span.
This can be used as follows.
captured = {}
with CaptureOutputAndError(captured):
# Do stuff.
# Access captured["out"] and captured["err"].
"""
def __init__(self, captured_output_and_error):
if sys.version_info >= (3, 0):
import io
self.output_buffer = io.StringIO()
self.error_buffer = io.StringIO()
else:
import cStringIO
self.output_buffer = cStringIO.StringIO()
self.error_buffer = cStringIO.StringIO()
self.captured_output_and_error = captured_output_and_error
def __enter__(self):
sys.stdout.flush()
sys.stderr.flush()
self.old_stdout = sys.stdout
self.old_stderr = sys.stderr
sys.stdout = self.output_buffer
sys.stderr = self.error_buffer
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
self.captured_output_and_error["out"] = self.output_buffer.getvalue()
self.captured_output_and_error["err"] = self.error_buffer.getvalue()
def test_logging_to_driver(shutdown_only):
ray.init(num_cpus=1, log_to_driver=True)
@ray.remote
def f():
# It's important to make sure that these print statements occur even
# without calling sys.stdout.flush() and sys.stderr.flush().
for i in range(100):
print(i)
print(100 + i, file=sys.stderr)
captured = {}
with CaptureOutputAndError(captured):
ray.get(f.remote())
time.sleep(1)
output_lines = captured["out"]
for i in range(200):
assert str(i) in output_lines
# TODO(rkn): Check that no additional logs appear beyond what we expect
# and that there are no duplicate logs. Once we address the issue
# described in https://github.com/ray-project/ray/pull/5462, we should
# also check that nothing is logged to stderr.
def test_not_logging_to_driver(shutdown_only):
ray.init(num_cpus=1, log_to_driver=False)
@ray.remote
def f():
for i in range(100):
print(i)
print(100 + i, file=sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
captured = {}
with CaptureOutputAndError(captured):
ray.get(f.remote())
time.sleep(1)
output_lines = captured["out"]
assert len(output_lines) == 0
# TODO(rkn): Check that no additional logs appear beyond what we expect
# and that there are no duplicate logs. Once we address the issue
# described in https://github.com/ray-project/ray/pull/5462, we should
# also check that nothing is logged to stderr.
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_workers(shutdown_only):
num_workers = 3
ray.init(num_cpus=num_workers)
@ray.remote
def f():
return id(ray.worker.global_worker), os.getpid()
# Wait until all of the workers have started.
worker_ids = set()
while len(worker_ids) != num_workers:
worker_ids = set(ray.get([f.remote() for _ in range(10)]))
def test_specific_job_id():
dummy_driver_id = ray.JobID.from_int(1)
ray.init(num_cpus=1, job_id=dummy_driver_id)
# in driver
assert dummy_driver_id == ray._get_runtime_context().current_driver_id
# in worker
@ray.remote
def f():
return ray._get_runtime_context().current_driver_id
assert dummy_driver_id == ray.get(f.remote())
ray.shutdown()
def test_object_id_properties():
id_bytes = b"00112233445566778899"
object_id = ray.ObjectID(id_bytes)
assert object_id.binary() == id_bytes
object_id = ray.ObjectID.nil()
assert object_id.is_nil()
with pytest.raises(ValueError, match=r".*needs to have length 20.*"):
ray.ObjectID(id_bytes + b"1234")
with pytest.raises(ValueError, match=r".*needs to have length 20.*"):
ray.ObjectID(b"0123456789")
object_id = ray.ObjectID.from_random()
assert not object_id.is_nil()
assert object_id.binary() != id_bytes
id_dumps = pickle.dumps(object_id)
id_from_dumps = pickle.loads(id_dumps)
assert id_from_dumps == object_id
file_prefix = "test_object_id_properties"
# Make sure the ids are fork safe.
def write(index):
str = ray.ObjectID.from_random().hex()
with open("{}{}".format(file_prefix, index), "w") as fo:
fo.write(str)
def read(index):
with open("{}{}".format(file_prefix, index), "r") as fi:
for line in fi:
return line
processes = [Process(target=write, args=(_, )) for _ in range(4)]
for process in processes:
process.start()
for process in processes:
process.join()
hexes = {read(i) for i in range(4)}
[os.remove("{}{}".format(file_prefix, i)) for i in range(4)]
assert len(hexes) == 4
@pytest.fixture
def shutdown_only_with_initialization_check():
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
assert not ray.is_initialized()
def test_initialized(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0)
assert ray.is_initialized()
def test_initialized_local_mode(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0, local_mode=True)
assert ray.is_initialized()
def test_wait_reconstruction(shutdown_only):
ray.init(num_cpus=1, object_store_memory=10**8)
@ray.remote
def f():
return np.zeros(6 * 10**7, dtype=np.uint8)
x_id = f.remote()
ray.wait([x_id])
ray.wait([f.remote()])
assert not ray.worker.global_worker.plasma_client.contains(
ray.pyarrow.plasma.ObjectID(x_id.binary()))
ready_ids, _ = ray.wait([x_id])
assert len(ready_ids) == 1
def test_ray_setproctitle(ray_start_2_cpus):
@ray.remote
class UniqueName(object):
def __init__(self):
assert setproctitle.getproctitle() == "ray_UniqueName:__init__()"
def f(self):
assert setproctitle.getproctitle() == "ray_UniqueName:f()"
@ray.remote
def unique_1():
assert setproctitle.getproctitle(
) == "ray_worker:ray.tests.test_basic.unique_1()"
actor = UniqueName.remote()
ray.get(actor.f.remote())
ray.get(unique_1.remote())
def test_duplicate_error_messages(shutdown_only):
ray.init(num_cpus=0)
driver_id = ray.WorkerID.nil()
error_data = ray.gcs_utils.construct_error_message(driver_id, "test",
"message", 0)
# Push the same message to the GCS twice (they are the same because we
# do not include a timestamp).
r = ray.worker.global_worker.redis_client
r.execute_command("RAY.TABLE_APPEND",
ray.gcs_utils.TablePrefix.Value("ERROR_INFO"),
ray.gcs_utils.TablePubsub.Value("ERROR_INFO_PUBSUB"),
driver_id.binary(), error_data)
# Before https://github.com/ray-project/ray/pull/3316 this would
# give an error
r.execute_command("RAY.TABLE_APPEND",
ray.gcs_utils.TablePrefix.Value("ERROR_INFO"),
ray.gcs_utils.TablePubsub.Value("ERROR_INFO_PUBSUB"),
driver_id.binary(), error_data)
@pytest.mark.skipif(
os.getenv("TRAVIS") is None,
reason="This test should only be run on Travis.")
def test_ray_stack(ray_start_2_cpus):
def unique_name_1():
time.sleep(1000)
@ray.remote
def unique_name_2():
time.sleep(1000)
@ray.remote
def unique_name_3():
unique_name_1()
unique_name_2.remote()
unique_name_3.remote()
success = False
start_time = time.time()
while time.time() - start_time < 30:
# Attempt to parse the "ray stack" call.
output = ray.utils.decode(subprocess.check_output(["ray", "stack"]))
if ("unique_name_1" in output and "unique_name_2" in output
and "unique_name_3" in output):
success = True
break
if not success:
raise Exception("Failed to find necessary information with "
"'ray stack'")
def test_pandas_parquet_serialization():
# Only test this if pandas is installed
pytest.importorskip("pandas")
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
tempdir = tempfile.mkdtemp()
filename = os.path.join(tempdir, "parquet-test")
pd.DataFrame({"col1": [0, 1], "col2": [0, 1]}).to_parquet(filename)
with open(os.path.join(tempdir, "parquet-compression"), "wb") as f:
table = pa.Table.from_arrays([pa.array([1, 2, 3])], ["hello"])
pq.write_table(table, f, compression="lz4")
# Clean up
shutil.rmtree(tempdir)
def test_socket_dir_not_existing(shutdown_only):
random_name = ray.ObjectID.from_random().hex()
temp_raylet_socket_dir = "/tmp/ray/tests/{}".format(random_name)
temp_raylet_socket_name = os.path.join(temp_raylet_socket_dir,
"raylet_socket")
ray.init(num_cpus=1, raylet_socket_name=temp_raylet_socket_name)
def test_raylet_is_robust_to_random_messages(ray_start_regular):
node_manager_address = None
node_manager_port = None
for client in ray.nodes():
if "NodeManagerAddress" in client:
node_manager_address = client["NodeManagerAddress"]
node_manager_port = client["NodeManagerPort"]
assert node_manager_address
assert node_manager_port
# Try to bring down the node manager:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((node_manager_address, node_manager_port))
s.send(1000 * b"asdf")
@ray.remote
def f():
return 1
assert ray.get(f.remote()) == 1
def test_non_ascii_comment(ray_start_regular):
@ray.remote
def f():
# 日本語 Japanese comment
return 1
assert ray.get(f.remote()) == 1
@ray.remote
def echo(x):
return x
@ray.remote
class WithConstructor(object):
def __init__(self, data):
self.data = data
def get_data(self):
return self.data
@ray.remote
class WithoutConstructor(object):
def set_data(self, data):
self.data = data
def get_data(self):
return self.data
class BaseClass(object):
def __init__(self, data):
self.data = data
def get_data(self):
return self.data
@ray.remote
class DerivedClass(BaseClass):
def __init__(self, data):
# Due to different behaviors of super in Python 2 and Python 3,
# we use BaseClass directly here.
BaseClass.__init__(self, data)
def test_load_code_from_local(shutdown_only):
ray.init(load_code_from_local=True, num_cpus=4)
message = "foo"
# Test normal function.
assert ray.get(echo.remote(message)) == message
# Test actor class with constructor.
actor = WithConstructor.remote(1)
assert ray.get(actor.get_data.remote()) == 1
# Test actor class without constructor.
actor = WithoutConstructor.remote()
actor.set_data.remote(1)
assert ray.get(actor.get_data.remote()) == 1
# Test derived actor class.
actor = DerivedClass.remote(1)
assert ray.get(actor.get_data.remote()) == 1
# Test using ray.remote decorator on raw classes.
base_actor_class = ray.remote(num_cpus=1)(BaseClass)
base_actor = base_actor_class.remote(message)
assert ray.get(base_actor.get_data.remote()) == message
def test_shutdown_disconnect_global_state():
ray.init(num_cpus=0)
ray.shutdown()
with pytest.raises(Exception) as e:
ray.objects()
assert str(e.value).endswith("ray.init has been called.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**8], indirect=True)
def test_redis_lru_with_set(ray_start_object_store_memory):
x = np.zeros(8 * 10**7, dtype=np.uint8)
x_id = ray.put(x)
# Remove the object from the object table to simulate Redis LRU eviction.
removed = False
start_time = time.time()
while time.time() < start_time + 10:
if ray.state.state.redis_clients[0].delete(b"OBJECT" +
x_id.binary()) == 1:
removed = True
break
assert removed
# Now evict the object from the object store.
ray.put(x) # This should not crash.
def test_decorated_function(ray_start_regular):
def function_invocation_decorator(f):
def new_f(args, kwargs):
# Reverse the arguments.
return f(args[::-1], {"d": 5}), kwargs
return new_f
def f(a, b, c, d=None):
return a, b, c, d
f.__ray_invocation_decorator__ = function_invocation_decorator
f = ray.remote(f)
result_id, kwargs = f.remote(1, 2, 3, d=4)
assert kwargs == {"d": 4}
assert ray.get(result_id) == (3, 2, 1, 5)
def test_get_postprocess(ray_start_regular):
def get_postprocessor(object_ids, values):
return [value for value in values if value > 0]
ray.worker.global_worker._post_get_hooks.append(get_postprocessor)
assert ray.get(
[ray.put(i) for i in [0, 1, 3, 5, -1, -3, 4]]) == [1, 3, 5, 4]
def test_export_after_shutdown(ray_start_regular):
# This test checks that we can use actor and remote function definitions
# across multiple Ray sessions.
@ray.remote
def f():
pass
@ray.remote
class Actor(object):
def method(self):
pass
ray.get(f.remote())
a = Actor.remote()
ray.get(a.method.remote())
ray.shutdown()
# Start Ray and use the remote function and actor again.
ray.init(num_cpus=1)
ray.get(f.remote())
a = Actor.remote()
ray.get(a.method.remote())
ray.shutdown()
# Start Ray again and make sure that these definitions can be exported from
# workers.
ray.init(num_cpus=2)
@ray.remote
def export_definitions_from_worker(remote_function, actor_class):
ray.get(remote_function.remote())
actor_handle = actor_class.remote()
ray.get(actor_handle.method.remote())
ray.get(export_definitions_from_worker.remote(f, Actor))
| 29.960501 | 79 | 0.60734 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from concurrent.futures import ThreadPoolExecutor
import json
import logging
from multiprocessing import Process
import os
import random
import re
import setproctitle
import shutil
import six
import socket
import string
import subprocess
import sys
import tempfile
import threading
import time
import numpy as np
import pickle
import pytest
import ray
import ray.tests.cluster_utils
import ray.tests.utils
logger = logging.getLogger(__name__)
def test_simple_serialization(ray_start_regular):
primitive_objects = [
0,
0.0,
0.9,
1 << 62,
1 << 999,
"a",
string.printable,
"\u262F",
u"hello world",
u"\xff\xfe\x9c\x001\x000\x00",
None,
True,
False,
[],
(),
{},
type,
int,
set(),
collections.Counter([np.random.randint(0, 10) for _ in range(100)]),
collections.OrderedDict([("hello", 1), ("world", 2)]),
collections.defaultdict(lambda: 0, [("hello", 1), ("world", 2)]),
collections.defaultdict(lambda: [], [("hello", 1), ("world", 2)]),
collections.deque([1, 2, 3, "a", "b", "c", 3.5]),
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
]
if sys.version_info < (3, 0):
primitive_objects.append(long(0))
composite_objects = (
[[obj]
for obj in primitive_objects] + [(obj, )
for obj in primitive_objects] + [{
(): obj
} for obj in primitive_objects])
@ray.remote
def f(x):
return x
for obj in primitive_objects + composite_objects:
new_obj_1 = ray.get(f.remote(obj))
new_obj_2 = ray.get(ray.put(obj))
assert obj == new_obj_1
assert obj == new_obj_2
if type(obj).__module__ != "numpy":
assert type(obj) == type(new_obj_1)
assert type(obj) == type(new_obj_2)
def test_complex_serialization(ray_start_regular):
def assert_equal(obj1, obj2):
module_numpy = (type(obj1).__module__ == np.__name__
or type(obj2).__module__ == np.__name__)
if module_numpy:
empty_shape = ((hasattr(obj1, "shape") and obj1.shape == ())
or (hasattr(obj2, "shape") and obj2.shape == ()))
if empty_shape:
assert obj1 == obj2, ("Objects {} and {} are "
"different.".format(obj1, obj2))
else:
np.testing.assert_equal(obj1, obj2)
elif hasattr(obj1, "__dict__") and hasattr(obj2, "__dict__"):
special_keys = ["_pytype_"]
assert (set(list(obj1.__dict__.keys()) + special_keys) == set(
list(obj2.__dict__.keys()) + special_keys)), (
"Objects {} and {} are different.".format(obj1, obj2))
for key in obj1.__dict__.keys():
if key not in special_keys:
assert_equal(obj1.__dict__[key], obj2.__dict__[key])
elif type(obj1) is dict or type(obj2) is dict:
assert_equal(obj1.keys(), obj2.keys())
for key in obj1.keys():
assert_equal(obj1[key], obj2[key])
elif type(obj1) is list or type(obj2) is list:
assert len(obj1) == len(obj2), ("Objects {} and {} are lists with "
"different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif type(obj1) is tuple or type(obj2) is tuple:
assert len(obj1) == len(obj2), ("Objects {} and {} are tuples "
"with different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif (ray.serialization.is_named_tuple(type(obj1))
or ray.serialization.is_named_tuple(type(obj2))):
assert len(obj1) == len(obj2), (
"Objects {} and {} are named "
"tuples with different lengths.".format(obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
else:
assert obj1 == obj2, "Objects {} and {} are different.".format(
obj1, obj2)
if sys.version_info >= (3, 0):
long_extras = [0, np.array([["hi", u"hi"], [1.3, 1]])]
else:
long_extras = [
long(0), np.array([
["hi", u"hi"],
[1.3, long(1)] ])
]
PRIMITIVE_OBJECTS = [
0, 0.0, 0.9, 1 << 62, 1 << 100, 1 << 999, [1 << 100, [1 << 100]], "a",
string.printable, "\u262F", u"hello world",
u"\xff\xfe\x9c\x001\x000\x00", None, True, False, [], (), {},
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
np.zeros([100, 100]),
np.random.normal(size=[100, 100]),
np.array(["hi", 3]),
np.array(["hi", 3], dtype=object)
] + long_extras
COMPLEX_OBJECTS = [
[[[[[[[[[[[[]]]]]]]]]]]],
{
"obj{}".format(i): np.random.normal(size=[100, 100])
for i in range(10)
},
(
(((((((((), ), ), ), ), ), ), ), ), ),
{
"a": {
"b": {
"c": {
"d": {}
}
}
}
},
]
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
class Bar(object):
def __init__(self):
for i, val in enumerate(PRIMITIVE_OBJECTS + COMPLEX_OBJECTS):
setattr(self, "field{}".format(i), val)
class Baz(object):
def __init__(self):
self.foo = Foo()
self.bar = Bar()
def method(self, arg):
pass
class Qux(object):
def __init__(self):
self.objs = [Foo(), Bar(), Baz()]
class SubQux(Qux):
def __init__(self):
Qux.__init__(self)
class CustomError(Exception):
pass
Point = collections.namedtuple("Point", ["x", "y"])
NamedTupleExample = collections.namedtuple(
"Example", "field1, field2, field3, field4, field5")
CUSTOM_OBJECTS = [
Exception("Test object."),
CustomError(),
Point(11, y=22),
Foo(),
Bar(),
Baz(), NamedTupleExample(1, 1.0, "hi", np.zeros([3, 5]), [1, 2, 3]),
]
if sys.version_info >= (3, 7):
from dataclasses import make_dataclass
DataClass0 = make_dataclass("DataClass0", [("number", int)])
CUSTOM_OBJECTS.append(DataClass0(number=3))
class CustomClass(object):
def __init__(self, value):
self.value = value
DataClass1 = make_dataclass("DataClass1", [("custom", CustomClass)])
class DataClass2(DataClass1):
@classmethod
def from_custom(cls, data):
custom = CustomClass(data)
return cls(custom)
def __reduce__(self):
return (self.from_custom, (self.custom.value, ))
CUSTOM_OBJECTS.append(DataClass2(custom=CustomClass(43)))
BASE_OBJECTS = PRIMITIVE_OBJECTS + COMPLEX_OBJECTS + CUSTOM_OBJECTS
LIST_OBJECTS = [[obj] for obj in BASE_OBJECTS]
TUPLE_OBJECTS = [(obj, ) for obj in BASE_OBJECTS]
DICT_OBJECTS = ([{
obj: obj
} for obj in PRIMITIVE_OBJECTS if (
obj.__hash__ is not None and type(obj).__module__ != "numpy")] + [{
0: obj
} for obj in BASE_OBJECTS] + [{
Foo(123): Foo(456)
}])
RAY_TEST_OBJECTS = (
BASE_OBJECTS + LIST_OBJECTS + TUPLE_OBJECTS + DICT_OBJECTS)
@ray.remote
def f(x):
return x
for obj in RAY_TEST_OBJECTS:
assert_equal(obj, ray.get(f.remote(obj)))
assert_equal(obj, ray.get(ray.put(obj)))
def test_nested_functions(ray_start_regular):
@ray.remote
def f():
return g(), ray.get(h.remote())
def g():
return 1
@ray.remote
def h():
return 2
assert ray.get(f.remote()) == (1, 2)
@ray.remote
def factorial(n):
if n == 0:
return 1
return n * ray.get(factorial.remote(n - 1))
assert ray.get(factorial.remote(0)) == 1
assert ray.get(factorial.remote(1)) == 1
assert ray.get(factorial.remote(2)) == 2
assert ray.get(factorial.remote(3)) == 6
assert ray.get(factorial.remote(4)) == 24
assert ray.get(factorial.remote(5)) == 120
@ray.remote
def factorial_even(n):
assert n % 2 == 0
if n == 0:
return 1
return n * ray.get(factorial_odd.remote(n - 1))
@ray.remote
def factorial_odd(n):
assert n % 2 == 1
return n * ray.get(factorial_even.remote(n - 1))
assert ray.get(factorial_even.remote(4)) == 24
assert ray.get(factorial_odd.remote(5)) == 120
def test_ray_recursive_objects(ray_start_regular):
class ClassA(object):
pass
lst = []
lst.append(lst)
a1 = ClassA()
a1.field = a1
a2 = ClassA()
a3 = ClassA()
a2.field = a3
a3.field = a2
d1 = {}
d1["key"] = d1
recursive_objects = [lst, a1, a2, a3, d1]
for obj in recursive_objects:
with pytest.raises(Exception):
ray.put(obj)
def test_passing_arguments_by_value_out_of_the_box(ray_start_regular):
@ray.remote
def f(x):
return x
def temp():
return 1
assert ray.get(f.remote(temp))() == 1
assert ray.get(f.remote(lambda x: x + 1))(3) == 4
assert ray.get(f.remote(set())) == set()
s = {1, (1, 2, "hi")}
assert ray.get(f.remote(s)) == s
assert ray.get(f.remote(int)) == int
assert ray.get(f.remote(float)) == float
assert ray.get(f.remote(str)) == str
class Foo(object):
def __init__(self):
pass
ray.get(ray.put(Foo))
def test_putting_object_that_closes_over_object_id(ray_start_regular):
# This test is here to prevent a regression of
# https://github.com/ray-project/ray/issues/1317.
class Foo(object):
def __init__(self):
self.val = ray.put(0)
def method(self):
f
f = Foo()
ray.put(f)
def test_put_get(shutdown_only):
ray.init(num_cpus=0)
for i in range(100):
value_before = i * 10**6
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = i * 10**6 * 1.0
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = "h" * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = [1] * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
def test_custom_serializers(ray_start_regular):
class Foo(object):
def __init__(self):
self.x = 3
def custom_serializer(obj):
return 3, "string1", type(obj).__name__
def custom_deserializer(serialized_obj):
return serialized_obj, "string2"
ray.register_custom_serializer(
Foo, serializer=custom_serializer, deserializer=custom_deserializer)
assert ray.get(ray.put(Foo())) == ((3, "string1", Foo.__name__), "string2")
class Bar(object):
def __init__(self):
self.x = 3
ray.register_custom_serializer(
Bar, serializer=custom_serializer, deserializer=custom_deserializer)
@ray.remote
def f():
return Bar()
assert ray.get(f.remote()) == ((3, "string1", Bar.__name__), "string2")
def test_serialization_final_fallback(ray_start_regular):
pytest.importorskip("catboost")
# This test will only run when "catboost" is installed.
from catboost import CatBoostClassifier
model = CatBoostClassifier(
iterations=2,
depth=2,
learning_rate=1,
loss_function="Logloss",
logging_level="Verbose")
reconstructed_model = ray.get(ray.put(model))
assert set(model.get_params().items()) == set(
reconstructed_model.get_params().items())
def test_register_class(ray_start_2_cpus):
# Check that putting an object of a class that has not been registered
# throws an exception.
class TempClass(object):
pass
ray.get(ray.put(TempClass()))
# Test passing custom classes into remote functions from the driver.
@ray.remote
def f(x):
return x
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
foo = ray.get(f.remote(Foo(7)))
assert foo == Foo(7)
regex = re.compile(r"\d+\.\d*")
new_regex = ray.get(f.remote(regex))
# This seems to fail on the system Python 3 that comes with
# Ubuntu, so it is commented out for now:
# assert regex == new_regex
# Instead, we do this:
assert regex.pattern == new_regex.pattern
class TempClass1(object):
def __init__(self):
self.value = 1
# Test returning custom classes created on workers.
@ray.remote
def g():
class TempClass2(object):
def __init__(self):
self.value = 2
return TempClass1(), TempClass2()
object_1, object_2 = ray.get(g.remote())
assert object_1.value == 1
assert object_2.value == 2
# Test exporting custom class definitions from one worker to another
# when the worker is blocked in a get.
class NewTempClass(object):
def __init__(self, value):
self.value = value
@ray.remote
def h1(x):
return NewTempClass(x)
@ray.remote
def h2(x):
return ray.get(h1.remote(x))
assert ray.get(h2.remote(10)).value == 10
# Test registering multiple classes with the same name.
@ray.remote(num_return_vals=3)
def j():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = []
for _ in range(5):
results += j.remote()
for i in range(len(results) // 3):
c0, c1, c2 = ray.get(results[(3 * i):(3 * (i + 1))])
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
@ray.remote
def k():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = ray.get([k.remote() for _ in range(5)])
for c0, c1, c2 in results:
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
def test_keyword_args(ray_start_regular):
@ray.remote
def keyword_fct1(a, b="hello"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct2(a="hello", b="world"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct3(a, b, c="hello", d="world"):
return "{} {} {} {}".format(a, b, c, d)
x = keyword_fct1.remote(1)
assert ray.get(x) == "1 hello"
x = keyword_fct1.remote(1, "hi")
assert ray.get(x) == "1 hi"
x = keyword_fct1.remote(1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct1.remote(a=1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct2.remote(a="w", b="hi")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(b="hi", a="w")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(a="w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote(b="hi")
assert ray.get(x) == "hello hi"
x = keyword_fct2.remote("w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote("w", "hi")
assert ray.get(x) == "w hi"
x = keyword_fct3.remote(0, 1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(a=0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, d="hi", c="w")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, c="w")
assert ray.get(x) == "0 1 w world"
x = keyword_fct3.remote(0, 1, d="hi")
assert ray.get(x) == "0 1 hello hi"
x = keyword_fct3.remote(0, 1)
assert ray.get(x) == "0 1 hello world"
x = keyword_fct3.remote(a=0, b=1)
assert ray.get(x) == "0 1 hello world"
# Check that we cannot pass invalid keyword arguments to functions.
@ray.remote
def f1():
return
@ray.remote
def f2(x, y=0, z=0):
return
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f1.remote(3)
with pytest.raises(Exception):
f1.remote(x=3)
with pytest.raises(Exception):
f2.remote(0, w=0)
with pytest.raises(Exception):
f2.remote(3, x=3)
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f2.remote(1, 2, 3, 4)
@ray.remote
def f3(x):
return x
assert ray.get(f3.remote(4)) == 4
def test_variable_number_of_args(shutdown_only):
@ray.remote
def varargs_fct1(*a):
return " ".join(map(str, a))
@ray.remote
def varargs_fct2(a, *b):
return " ".join(map(str, b))
try:
@ray.remote
def kwargs_throw_exception(**c):
return ()
kwargs_exception_thrown = False
except Exception:
kwargs_exception_thrown = True
ray.init(num_cpus=1)
x = varargs_fct1.remote(0, 1, 2)
assert ray.get(x) == "0 1 2"
x = varargs_fct2.remote(0, 1, 2)
assert ray.get(x) == "1 2"
assert kwargs_exception_thrown
@ray.remote
def f1(*args):
return args
@ray.remote
def f2(x, y, *args):
return x, y, args
assert ray.get(f1.remote()) == ()
assert ray.get(f1.remote(1)) == (1, )
assert ray.get(f1.remote(1, 2, 3)) == (1, 2, 3)
with pytest.raises(Exception):
f2.remote()
with pytest.raises(Exception):
f2.remote(1)
assert ray.get(f2.remote(1, 2)) == (1, 2, ())
assert ray.get(f2.remote(1, 2, 3)) == (1, 2, (3, ))
assert ray.get(f2.remote(1, 2, 3, 4)) == (1, 2, (3, 4))
def testNoArgs(self):
@ray.remote
def no_op():
pass
self.ray_start()
ray.get(no_op.remote())
def test_defining_remote_functions(shutdown_only):
ray.init(num_cpus=3)
# Test that we can define a remote function in the shell.
@ray.remote
def f(x):
return x + 1
assert ray.get(f.remote(0)) == 1
# Test that we can redefine the remote function.
@ray.remote
def f(x):
return x + 10
while True:
val = ray.get(f.remote(0))
assert val in [1, 10]
if val == 10:
break
else:
logger.info("Still using old definition of f, trying again.")
# Test that we can close over plain old data.
data = [
np.zeros([3, 5]), (1, 2, "a"), [0.0, 1.0, 1 << 62], 1 << 60, {
"a": np.zeros(3)
}
]
@ray.remote
def g():
return data
ray.get(g.remote())
# Test that we can close over modules.
@ray.remote
def h():
return np.zeros([3, 5])
assert np.alltrue(ray.get(h.remote()) == np.zeros([3, 5]))
@ray.remote
def j():
return time.time()
ray.get(j.remote())
# Test that we can define remote functions that call other remote
# functions.
@ray.remote
def k(x):
return x + 1
@ray.remote
def k2(x):
return ray.get(k.remote(x))
@ray.remote
def m(x):
return ray.get(k2.remote(x))
assert ray.get(k.remote(1)) == 2
assert ray.get(k2.remote(1)) == 2
assert ray.get(m.remote(1)) == 2
def test_submit_api(shutdown_only):
ray.init(num_cpus=2, num_gpus=1, resources={"Custom": 1})
@ray.remote
def f(n):
return list(range(n))
@ray.remote
def g():
return ray.get_gpu_ids()
assert f._remote([0], num_return_vals=0) is None
id1 = f._remote(args=[1], num_return_vals=1)
assert ray.get(id1) == [0]
id1, id2 = f._remote(args=[2], num_return_vals=2)
assert ray.get([id1, id2]) == [0, 1]
id1, id2, id3 = f._remote(args=[3], num_return_vals=3)
assert ray.get([id1, id2, id3]) == [0, 1, 2]
assert ray.get(
g._remote(args=[], num_cpus=1, num_gpus=1,
resources={"Custom": 1})) == [0]
infeasible_id = g._remote(args=[], resources={"NonexistentCustom": 1})
assert ray.get(g._remote()) == []
ready_ids, remaining_ids = ray.wait([infeasible_id], timeout=0.05)
assert len(ready_ids) == 0
assert len(remaining_ids) == 1
@ray.remote
class Actor(object):
def __init__(self, x, y=0):
self.x = x
self.y = y
def method(self, a, b=0):
return self.x, self.y, a, b
def gpu_ids(self):
return ray.get_gpu_ids()
@ray.remote
class Actor2(object):
def __init__(self):
pass
def method(self):
pass
a = Actor._remote(
args=[0], kwargs={"y": 1}, num_gpus=1, resources={"Custom": 1})
a2 = Actor2._remote()
ray.get(a2.method._remote())
id1, id2, id3, id4 = a.method._remote(
args=["test"], kwargs={"b": 2}, num_return_vals=4)
assert ray.get([id1, id2, id3, id4]) == [0, 1, "test", 2]
def test_many_fractional_resources(shutdown_only):
ray.init(num_cpus=2, num_gpus=2, resources={"Custom": 2})
@ray.remote
def g():
return 1
@ray.remote
def f(block, accepted_resources):
true_resources = {
resource: value[0][1]
for resource, value in ray.get_resource_ids().items()
}
if block:
ray.get(g.remote())
return true_resources == accepted_resources
# Check that the resource are assigned correctly.
result_ids = []
for rand1, rand2, rand3 in np.random.uniform(size=(100, 3)):
resource_set = {"CPU": int(rand1 * 10000) / 10000}
result_ids.append(f._remote([False, resource_set], num_cpus=rand1))
resource_set = {"CPU": 1, "GPU": int(rand1 * 10000) / 10000}
result_ids.append(f._remote([False, resource_set], num_gpus=rand1))
resource_set = {"CPU": 1, "Custom": int(rand1 * 10000) / 10000}
result_ids.append(
f._remote([False, resource_set], resources={"Custom": rand1}))
resource_set = {
"CPU": int(rand1 * 10000) / 10000,
"GPU": int(rand2 * 10000) / 10000,
"Custom": int(rand3 * 10000) / 10000
}
result_ids.append(
f._remote(
[False, resource_set],
num_cpus=rand1,
num_gpus=rand2,
resources={"Custom": rand3}))
result_ids.append(
f._remote(
[True, resource_set],
num_cpus=rand1,
num_gpus=rand2,
resources={"Custom": rand3}))
assert all(ray.get(result_ids))
# Check that the available resources at the end are the same as the
# beginning.
stop_time = time.time() + 10
correct_available_resources = False
while time.time() < stop_time:
if ray.available_resources() == {
"CPU": 2.0,
"GPU": 2.0,
"Custom": 2.0,
}:
correct_available_resources = True
break
if not correct_available_resources:
assert False, "Did not get correct available resources."
def test_get_multiple(ray_start_regular):
object_ids = [ray.put(i) for i in range(10)]
assert ray.get(object_ids) == list(range(10))
# Get a random choice of object IDs with duplicates.
indices = list(np.random.choice(range(10), 5))
indices += indices
results = ray.get([object_ids[i] for i in indices])
assert results == indices
def test_get_multiple_experimental(ray_start_regular):
object_ids = [ray.put(i) for i in range(10)]
object_ids_tuple = tuple(object_ids)
assert ray.experimental.get(object_ids_tuple) == list(range(10))
object_ids_nparray = np.array(object_ids)
assert ray.experimental.get(object_ids_nparray) == list(range(10))
def test_get_dict(ray_start_regular):
d = {str(i): ray.put(i) for i in range(5)}
for i in range(5, 10):
d[str(i)] = i
result = ray.experimental.get(d)
expected = {str(i): i for i in range(10)}
assert result == expected
def test_wait(ray_start_regular):
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
ready_ids, remaining_ids = ray.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
ready_ids, remaining_ids = ray.wait(objectids, num_returns=4)
assert set(ready_ids) == set(objectids)
assert remaining_ids == []
objectids = [f.remote(0.5), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=1.75, num_returns=4)
assert time.time() - start_time < 2
assert len(ready_ids) == 3
assert len(remaining_ids) == 1
ray.wait(objectids)
objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=5.0)
assert time.time() - start_time < 5
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
# Verify that calling wait with duplicate object IDs throws an
# exception.
x = ray.put(1)
with pytest.raises(Exception):
ray.wait([x, x])
# Make sure it is possible to call wait with an empty list.
ready_ids, remaining_ids = ray.wait([])
assert ready_ids == []
assert remaining_ids == []
# Test semantics of num_returns with no timeout.
oids = [ray.put(i) for i in range(10)]
(found, rest) = ray.wait(oids, num_returns=2)
assert len(found) == 2
assert len(rest) == 8
# Verify that incorrect usage raises a TypeError.
x = ray.put(1)
with pytest.raises(TypeError):
ray.wait(x)
with pytest.raises(TypeError):
ray.wait(1)
with pytest.raises(TypeError):
ray.wait([1])
def test_wait_iterables(ray_start_regular):
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = (f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5))
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
objectids = np.array(
[f.remote(1.0),
f.remote(0.5),
f.remote(0.5),
f.remote(0.5)])
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
def test_multiple_waits_and_gets(shutdown_only):
# It is important to use three workers here, so that the three tasks
# launched in this experiment can run at the same time.
ray.init(num_cpus=3)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
@ray.remote
def g(l):
# The argument l should be a list containing one object ID.
ray.wait([l[0]])
@ray.remote
def h(l):
# The argument l should be a list containing one object ID.
ray.get(l[0])
# Make sure that multiple wait requests involving the same object ID
# all return.
x = f.remote(1)
ray.get([g.remote([x]), g.remote([x])])
# Make sure that multiple get requests involving the same object ID all
# return.
x = f.remote(1)
ray.get([h.remote([x]), h.remote([x])])
def test_caching_functions_to_run(shutdown_only):
# Test that we export functions to run on all workers before the driver
# is connected.
def f(worker_info):
sys.path.append(1)
ray.worker.global_worker.run_function_on_all_workers(f)
def f(worker_info):
sys.path.append(2)
ray.worker.global_worker.run_function_on_all_workers(f)
def g(worker_info):
sys.path.append(3)
ray.worker.global_worker.run_function_on_all_workers(g)
def f(worker_info):
sys.path.append(4)
ray.worker.global_worker.run_function_on_all_workers(f)
ray.init(num_cpus=1)
@ray.remote
def get_state():
time.sleep(1)
return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1]
res1 = get_state.remote()
res2 = get_state.remote()
assert ray.get(res1) == (1, 2, 3, 4)
assert ray.get(res2) == (1, 2, 3, 4)
# Clean up the path on the workers.
def f(worker_info):
sys.path.pop()
sys.path.pop()
sys.path.pop()
sys.path.pop()
ray.worker.global_worker.run_function_on_all_workers(f)
def test_running_function_on_all_workers(ray_start_regular):
def f(worker_info):
sys.path.append("fake_directory")
ray.worker.global_worker.run_function_on_all_workers(f)
@ray.remote
def get_path1():
return sys.path
assert "fake_directory" == ray.get(get_path1.remote())[-1]
def f(worker_info):
sys.path.pop(-1)
ray.worker.global_worker.run_function_on_all_workers(f)
# Create a second remote function to guarantee that when we call
# get_path2.remote(), the second function to run will have been run on
# the worker.
@ray.remote
def get_path2():
return sys.path
assert "fake_directory" not in ray.get(get_path2.remote())
def test_profiling_api(ray_start_2_cpus):
@ray.remote
def f():
with ray.profile(
"custom_event",
extra_data={"name": "custom name"}) as ray_prof:
ray_prof.set_attribute("key", "value")
ray.put(1)
object_id = f.remote()
ray.wait([object_id])
ray.get(object_id)
# Wait until all of the profiling information appears in the profile
# table.
timeout_seconds = 20
start_time = time.time()
while True:
if time.time() - start_time > timeout_seconds:
raise Exception("Timed out while waiting for information in "
"profile table.")
profile_data = ray.timeline()
event_types = {event["cat"] for event in profile_data}
expected_types = [
"worker_idle",
"task",
"task:deserialize_arguments",
"task:execute",
"task:store_outputs",
"wait_for_function",
"ray.get",
"ray.put",
"ray.wait",
"submit_task",
"fetch_and_run_function",
"register_remote_function",
"custom_event", # This is the custom one from ray.profile.
]
if all(expected_type in event_types
for expected_type in expected_types):
break
def test_wait_cluster(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=1, resources={"RemoteResource": 1})
cluster.add_node(num_cpus=1, resources={"RemoteResource": 1})
ray.init(redis_address=cluster.redis_address)
@ray.remote(resources={"RemoteResource": 1})
def f():
return
# Make sure we have enough workers on the remote nodes to execute some
# tasks.
tasks = [f.remote() for _ in range(10)]
start = time.time()
ray.get(tasks)
end = time.time()
# Submit some more tasks that can only be executed on the remote nodes.
tasks = [f.remote() for _ in range(10)]
# Sleep for a bit to let the tasks finish.
time.sleep((end - start) * 2)
_, unready = ray.wait(tasks, num_returns=len(tasks), timeout=0)
# All remote tasks should have finished.
assert len(unready) == 0
def test_object_transfer_dump(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 3
for i in range(num_nodes):
cluster.add_node(resources={str(i): 1}, object_store_memory=10**9)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f(x):
return
# These objects will live on different nodes.
object_ids = [
f._remote(args=[1], resources={str(i): 1}) for i in range(num_nodes)
]
# Broadcast each object from each machine to each other machine.
for object_id in object_ids:
ray.get([
f._remote(args=[object_id], resources={str(i): 1})
for i in range(num_nodes)
])
# The profiling information only flushes once every second.
time.sleep(1.1)
transfer_dump = ray.object_transfer_timeline()
# Make sure the transfer dump can be serialized with JSON.
json.loads(json.dumps(transfer_dump))
assert len(transfer_dump) >= num_nodes**2
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_receive"
}) == num_nodes
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_send"
}) == num_nodes
def test_identical_function_names(ray_start_regular):
# Define a bunch of remote functions and make sure that we don't
num_calls = 200
@ray.remote
def f():
return 1
results1 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 2
results2 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 3
results3 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 4
results4 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 5
results5 = [f.remote() for _ in range(num_calls)]
assert ray.get(results1) == num_calls * [1]
assert ray.get(results2) == num_calls * [2]
assert ray.get(results3) == num_calls * [3]
assert ray.get(results4) == num_calls * [4]
assert ray.get(results5) == num_calls * [5]
@ray.remote
def g():
return 1
@ray.remote def g():
return 2
@ray.remote def g():
return 3
@ray.remote def g():
return 4
@ray.remote def g():
return 5
result_values = ray.get([g.remote() for _ in range(num_calls)])
assert result_values == num_calls * [5]
def test_illegal_api_calls(ray_start_regular):
x = ray.put(1)
with pytest.raises(Exception):
ray.put(x)
with pytest.raises(Exception):
ray.get(3)
# because plasma client isn't thread-safe. This needs to be fixed from the
@pytest.mark.skipif(six.PY2, reason="Doesn't work in Python 2.")
def test_multithreading(ray_start_2_cpus):
# This test requires at least 2 CPUs to finish since the worker does not
# release resources when joining the threads.
def run_test_in_multi_threads(test_case, num_threads=10, num_repeats=25):
def wrapper():
for _ in range(num_repeats):
test_case()
time.sleep(random.randint(0, 10) / 1000.0)
return "ok"
executor = ThreadPoolExecutor(max_workers=num_threads)
futures = [executor.submit(wrapper) for _ in range(num_threads)]
for future in futures:
assert future.result() == "ok"
@ray.remote
def echo(value, delay_ms=0):
if delay_ms > 0:
time.sleep(delay_ms / 1000.0)
return value
@ray.remote
class Echo(object):
def echo(self, value):
return value
def test_api_in_multi_threads():
# Test calling remote functions in multiple threads.
def test_remote_call():
value = random.randint(0, 1000000)
result = ray.get(echo.remote(value))
assert value == result
run_test_in_multi_threads(test_remote_call)
# Test multiple threads calling one actor.
actor = Echo.remote()
def test_call_actor():
value = random.randint(0, 1000000)
result = ray.get(actor.echo.remote(value))
assert value == result
run_test_in_multi_threads(test_call_actor)
# Test put and get.
def test_put_and_get():
value = random.randint(0, 1000000)
result = ray.get(ray.put(value))
assert value == result
run_test_in_multi_threads(test_put_and_get)
# Test multiple threads waiting for objects.
num_wait_objects = 10
objects = [
echo.remote(i, delay_ms=10) for i in range(num_wait_objects)
]
def test_wait():
ready, _ = ray.wait(
objects,
num_returns=len(objects),
timeout=1000.0,
)
assert len(ready) == num_wait_objects
assert ray.get(ready) == list(range(num_wait_objects))
run_test_in_multi_threads(test_wait, num_repeats=1)
# Run tests in a driver.
test_api_in_multi_threads()
# Run tests in a worker.
@ray.remote
def run_tests_in_worker():
test_api_in_multi_threads()
return "ok"
assert ray.get(run_tests_in_worker.remote()) == "ok"
# Test actor that runs background threads.
@ray.remote
class MultithreadedActor(object):
def __init__(self):
self.lock = threading.Lock()
self.thread_results = []
def background_thread(self, wait_objects):
try:
# Test wait
ready, _ = ray.wait(
wait_objects,
num_returns=len(wait_objects),
timeout=1000.0,
)
assert len(ready) == len(wait_objects)
for _ in range(20):
num = 10
# Test remote call
results = [echo.remote(i) for i in range(num)]
assert ray.get(results) == list(range(num))
# Test put and get
objects = [ray.put(i) for i in range(num)]
assert ray.get(objects) == list(range(num))
time.sleep(random.randint(0, 10) / 1000.0)
except Exception as e:
with self.lock:
self.thread_results.append(e)
else:
with self.lock:
self.thread_results.append("ok")
def spawn(self):
wait_objects = [echo.remote(i, delay_ms=10) for i in range(10)]
self.threads = [
threading.Thread(
target=self.background_thread, args=(wait_objects, ))
for _ in range(20)
]
[thread.start() for thread in self.threads]
def join(self):
[thread.join() for thread in self.threads]
assert self.thread_results == ["ok"] * len(self.threads)
return "ok"
actor = MultithreadedActor.remote()
actor.spawn.remote()
ray.get(actor.join.remote()) == "ok"
def test_free_objects_multi_node(ray_start_cluster):
# This test will do following:
# 1. Create 3 raylets that each hold an actor.
# 2. Each actor creates an object which is the deletion target.
# 3. Wait 0.1 second for the objects to be deleted.
# 4. Check that the deletion targets have been deleted.
# Caution: if remote functions are used instead of actor methods,
# one raylet may create more than one worker to execute the
# tasks, so the flushing operations may be executed in different
# workers and the plasma client holding the deletion target
# may not be flushed.
cluster = ray_start_cluster
config = json.dumps({"object_manager_repeated_push_delay_ms": 1000})
for i in range(3):
cluster.add_node(
num_cpus=1,
resources={"Custom{}".format(i): 1},
_internal_config=config)
ray.init(redis_address=cluster.redis_address)
class RawActor(object):
def get(self):
return ray.worker.global_worker.plasma_client.store_socket_name
ActorOnNode0 = ray.remote(resources={"Custom0": 1})(RawActor)
ActorOnNode1 = ray.remote(resources={"Custom1": 1})(RawActor)
ActorOnNode2 = ray.remote(resources={"Custom2": 1})(RawActor)
def create(actors):
a = actors[0].get.remote()
b = actors[1].get.remote()
c = actors[2].get.remote()
(l1, l2) = ray.wait([a, b, c], num_returns=3)
assert len(l1) == 3
assert len(l2) == 0
return (a, b, c)
def run_one_test(actors, local_only, delete_creating_tasks):
(a, b, c) = create(actors)
# The three objects should be generated on different object stores.
assert ray.get(a) != ray.get(b)
assert ray.get(a) != ray.get(c)
assert ray.get(c) != ray.get(b)
ray.internal.free(
[a, b, c],
local_only=local_only,
delete_creating_tasks=delete_creating_tasks)
# Wait for the objects to be deleted.
time.sleep(0.1)
return (a, b, c)
actors = [
ActorOnNode0.remote(),
ActorOnNode1.remote(),
ActorOnNode2.remote()
]
# Case 1: run this local_only=False. All 3 objects will be deleted.
(a, b, c) = run_one_test(actors, False, False)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=1)
# All the objects are deleted.
assert len(l1) == 0
assert len(l2) == 3
# Case 2: run this local_only=True. Only 1 object will be deleted.
(a, b, c) = run_one_test(actors, True, False)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=3)
# One object is deleted and 2 objects are not.
assert len(l1) == 2
assert len(l2) == 1
# The deleted object will have the same store with the driver.
local_return = ray.worker.global_worker.plasma_client.store_socket_name
for object_id in l1:
assert ray.get(object_id) != local_return
# Case3: These cases test the deleting creating tasks for the object.
(a, b, c) = run_one_test(actors, False, False)
task_table = ray.tasks()
for obj in [a, b, c]:
assert ray._raylet.compute_task_id(obj).hex() in task_table
(a, b, c) = run_one_test(actors, False, True)
task_table = ray.tasks()
for obj in [a, b, c]:
assert ray._raylet.compute_task_id(obj).hex() not in task_table
def test_local_mode(shutdown_only):
@ray.remote
def local_mode_f():
return np.array([0, 0])
@ray.remote
def local_mode_g(x):
x[0] = 1
return x
ray.init(local_mode=True)
@ray.remote
def f():
return np.ones([3, 4, 5])
xref = f.remote()
# Remote functions should return ObjectIDs.
assert isinstance(xref, ray.ObjectID)
assert np.alltrue(ray.get(xref) == np.ones([3, 4, 5]))
y = np.random.normal(size=[11, 12])
# Check that ray.get(ray.put) is the identity.
assert np.alltrue(y == ray.get(ray.put(y)))
# Make sure objects are immutable, this example is why we need to copy
# arguments before passing them into remote functions in python mode
aref = local_mode_f.remote()
assert np.alltrue(ray.get(aref) == np.array([0, 0]))
bref = local_mode_g.remote(ray.get(aref))
# Make sure local_mode_g does not mutate aref.
assert np.alltrue(ray.get(aref) == np.array([0, 0]))
assert np.alltrue(ray.get(bref) == np.array([1, 0]))
# wait should return the first num_returns values passed in as the
# first list and the remaining values as the second list
num_returns = 5
object_ids = [ray.put(i) for i in range(20)]
ready, remaining = ray.wait(
object_ids, num_returns=num_returns, timeout=None)
assert ready == object_ids[:num_returns]
assert remaining == object_ids[num_returns:]
# Check that ray.put() and ray.internal.free() work in local mode.
v1 = np.ones(10)
v2 = np.zeros(10)
k1 = ray.put(v1)
assert np.alltrue(v1 == ray.get(k1))
k2 = ray.put(v2)
assert np.alltrue(v2 == ray.get(k2))
ray.internal.free([k1, k2])
with pytest.raises(Exception):
ray.get(k1)
with pytest.raises(Exception):
ray.get(k2)
# Should fail silently.
ray.internal.free([k1, k2])
# Test actors in LOCAL_MODE.
@ray.remote
class LocalModeTestClass(object):
def __init__(self, array):
self.array = array
def set_array(self, array):
self.array = array
def get_array(self):
return self.array
def modify_and_set_array(self, array):
array[0] = -1
self.array = array
@ray.method(num_return_vals=3)
def returns_multiple(self):
return 1, 2, 3
test_actor = LocalModeTestClass.remote(np.arange(10))
obj = test_actor.get_array.remote()
assert isinstance(obj, ray.ObjectID)
assert np.alltrue(ray.get(obj) == np.arange(10))
test_array = np.arange(10)
# Remote actor functions should not mutate arguments
test_actor.modify_and_set_array.remote(test_array)
assert np.alltrue(test_array == np.arange(10))
# Remote actor functions should keep state
test_array[0] = -1
assert np.alltrue(test_array == ray.get(test_actor.get_array.remote()))
# Check that actor handles work in local mode.
@ray.remote
def use_actor_handle(handle):
array = np.ones(10)
handle.set_array.remote(array)
assert np.alltrue(array == ray.get(handle.get_array.remote()))
ray.get(use_actor_handle.remote(test_actor))
# Check that exceptions are deferred until ray.get().
exception_str = "test_basic remote task exception"
@ray.remote
def throws():
raise Exception(exception_str)
obj = throws.remote()
with pytest.raises(Exception, match=exception_str):
ray.get(obj)
# Check that multiple return values are handled properly.
@ray.remote(num_return_vals=3)
def returns_multiple():
return 1, 2, 3
obj1, obj2, obj3 = returns_multiple.remote()
assert ray.get(obj1) == 1
assert ray.get(obj2) == 2
assert ray.get(obj3) == 3
assert ray.get([obj1, obj2, obj3]) == [1, 2, 3]
obj1, obj2, obj3 = test_actor.returns_multiple.remote()
assert ray.get(obj1) == 1
assert ray.get(obj2) == 2
assert ray.get(obj3) == 3
assert ray.get([obj1, obj2, obj3]) == [1, 2, 3]
@ray.remote(num_return_vals=2)
def returns_multiple_throws():
raise Exception(exception_str)
obj1, obj2 = returns_multiple_throws.remote()
with pytest.raises(Exception, match=exception_str):
ray.get(obj)
ray.get(obj1)
with pytest.raises(Exception, match=exception_str):
ray.get(obj2)
def test_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=2)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
time_buffer = 2
# At most 10 copies of this can run at once.
@ray.remote(num_cpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(10)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(11)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_cpus=3)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_gpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(2)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_multi_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=10)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
@ray.remote(num_cpus=1, num_gpus=9)
def f(n):
time.sleep(n)
@ray.remote(num_cpus=9, num_gpus=1)
def g(n):
time.sleep(n)
time_buffer = 2
start_time = time.time()
ray.get([f.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5), g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_gpu_ids(shutdown_only):
num_gpus = 10
ray.init(num_cpus=10, num_gpus=num_gpus)
def get_gpu_ids(num_gpus_per_worker):
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == num_gpus_per_worker
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
f0 = ray.remote(num_gpus=0)(lambda: get_gpu_ids(0))
f1 = ray.remote(num_gpus=1)(lambda: get_gpu_ids(1))
f2 = ray.remote(num_gpus=2)(lambda: get_gpu_ids(2))
f4 = ray.remote(num_gpus=4)(lambda: get_gpu_ids(4))
f5 = ray.remote(num_gpus=5)(lambda: get_gpu_ids(5))
# Wait for all workers to start up.
@ray.remote
def f():
time.sleep(0.1)
return os.getpid()
start_time = time.time()
while True:
if len(set(ray.get([f.remote() for _ in range(10)]))) == 10:
break
if time.time() > start_time + 10:
raise Exception("Timed out while waiting for workers to start "
"up.")
list_of_ids = ray.get([f0.remote() for _ in range(10)])
assert list_of_ids == 10 * [[]]
list_of_ids = ray.get([f1.remote() for _ in range(10)])
set_of_ids = {tuple(gpu_ids) for gpu_ids in list_of_ids}
assert set_of_ids == {(i, ) for i in range(10)}
list_of_ids = ray.get([f2.remote(), f4.remote(), f4.remote()])
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
assert set(all_ids) == set(range(10))
# There are only 10 GPUs, and each task uses 5 GPUs, so there should only
# be 2 tasks scheduled at a given time.
t1 = time.time()
ray.get([f5.remote() for _ in range(20)])
assert time.time() - t1 >= 10 * 0.1
# Test that actors have CUDA_VISIBLE_DEVICES set properly.
@ray.remote
class Actor0(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
@ray.remote(num_gpus=1)
class Actor1(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
a0 = Actor0.remote()
ray.get(a0.test.remote())
a1 = Actor1.remote()
ray.get(a1.test.remote())
def test_zero_cpus(shutdown_only):
ray.init(num_cpus=0)
# We should be able to execute a task that requires 0 CPU resources.
@ray.remote(num_cpus=0)
def f():
return 1
ray.get(f.remote())
# We should be able to create an actor that requires 0 CPU resources.
@ray.remote(num_cpus=0)
class Actor(object):
def method(self):
pass
a = Actor.remote()
x = a.method.remote()
ray.get(x)
def test_zero_cpus_actor(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
cluster.add_node(num_cpus=2)
ray.init(redis_address=cluster.redis_address)
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote
class Foo(object):
def method(self):
return ray.worker.global_worker.plasma_client.store_socket_name
# Make sure tasks and actors run on the remote raylet.
a = Foo.remote()
assert ray.get(a.method.remote()) != local_plasma
def test_fractional_resources(shutdown_only):
ray.init(num_cpus=6, num_gpus=3, resources={"Custom": 1})
@ray.remote(num_gpus=0.5)
class Foo1(object):
def method(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
return gpu_ids[0]
foos = [Foo1.remote() for _ in range(6)]
gpu_ids = ray.get([f.method.remote() for f in foos])
for i in range(3):
assert gpu_ids.count(i) == 2
del foos
@ray.remote
class Foo2(object):
def method(self):
pass
# Create an actor that requires 0.7 of the custom resource.
f1 = Foo2._remote([], {}, resources={"Custom": 0.7})
ray.get(f1.method.remote())
# Make sure that we cannot create an actor that requires 0.7 of the
# custom resource. TODO(rkn): Re-enable this once ray.wait is
# implemented.
f2 = Foo2._remote([], {}, resources={"Custom": 0.7})
ready, _ = ray.wait([f2.method.remote()], timeout=0.5)
assert len(ready) == 0
# Make sure we can start an actor that requries only 0.3 of the custom
# resource.
f3 = Foo2._remote([], {}, resources={"Custom": 0.3})
ray.get(f3.method.remote())
del f1, f3
# Make sure that we get exceptions if we submit tasks that require a
# fractional number of resources greater than 1.
@ray.remote(num_cpus=1.5)
def test():
pass
with pytest.raises(ValueError):
test.remote()
with pytest.raises(ValueError):
Foo2._remote([], {}, resources={"Custom": 1.5})
def test_multiple_raylets(ray_start_cluster):
# This test will define a bunch of tasks that can only be assigned to
# specific raylets, and we will check that they are assigned
# to the correct raylets.
cluster = ray_start_cluster
cluster.add_node(num_cpus=11, num_gpus=0)
cluster.add_node(num_cpus=5, num_gpus=5)
cluster.add_node(num_cpus=10, num_gpus=1)
ray.init(redis_address=cluster.redis_address)
cluster.wait_for_nodes()
# Define a bunch of remote functions that all return the socket name of
# the plasma store. Since there is a one-to-one correspondence between
# plasma stores and raylets (at least right now), this can be
# used to identify which raylet the task was assigned to.
# This must be run on the zeroth raylet.
@ray.remote(num_cpus=11)
def run_on_0():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the first raylet.
@ray.remote(num_gpus=2)
def run_on_1():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the second raylet.
@ray.remote(num_cpus=6, num_gpus=1)
def run_on_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This can be run anywhere.
@ray.remote(num_cpus=0, num_gpus=0)
def run_on_0_1_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the first or second raylet.
@ray.remote(num_gpus=1)
def run_on_1_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the zeroth or second raylet.
@ray.remote(num_cpus=8)
def run_on_0_2():
return ray.worker.global_worker.plasma_client.store_socket_name
def run_lots_of_tasks():
names = []
results = []
for i in range(100):
index = np.random.randint(6)
if index == 0:
names.append("run_on_0")
results.append(run_on_0.remote())
elif index == 1:
names.append("run_on_1")
results.append(run_on_1.remote())
elif index == 2:
names.append("run_on_2")
results.append(run_on_2.remote())
elif index == 3:
names.append("run_on_0_1_2")
results.append(run_on_0_1_2.remote())
elif index == 4:
names.append("run_on_1_2")
results.append(run_on_1_2.remote())
elif index == 5:
names.append("run_on_0_2")
results.append(run_on_0_2.remote())
return names, results
client_table = ray.nodes()
store_names = []
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"].get("GPU", 0) == 0
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"].get("GPU", 0) == 5
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"].get("GPU", 0) == 1
]
assert len(store_names) == 3
def validate_names_and_results(names, results):
for name, result in zip(names, ray.get(results)):
if name == "run_on_0":
assert result in [store_names[0]]
elif name == "run_on_1":
assert result in [store_names[1]]
elif name == "run_on_2":
assert result in [store_names[2]]
elif name == "run_on_0_1_2":
assert (result in [
store_names[0], store_names[1], store_names[2]
])
elif name == "run_on_1_2":
assert result in [store_names[1], store_names[2]]
elif name == "run_on_0_2":
assert result in [store_names[0], store_names[2]]
else:
raise Exception("This should be unreachable.")
assert set(ray.get(results)) == set(store_names)
names, results = run_lots_of_tasks()
validate_names_and_results(names, results)
# Make sure the same thing works when this is nested inside of a task.
@ray.remote
def run_nested1():
names, results = run_lots_of_tasks()
return names, results
@ray.remote
def run_nested2():
names, results = ray.get(run_nested1.remote())
return names, results
names, results = ray.get(run_nested2.remote())
validate_names_and_results(names, results)
def test_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=3, resources={"CustomResource": 0})
cluster.add_node(num_cpus=3, resources={"CustomResource": 1})
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource": 1})
def h():
ray.get([f.remote() for _ in range(5)])
return ray.worker.global_worker.plasma_client.store_socket_name
# The f tasks should be scheduled on both raylets.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# The g tasks should be scheduled only on the second raylet.
raylet_ids = set(ray.get([g.remote() for _ in range(50)]))
assert len(raylet_ids) == 1
assert list(raylet_ids)[0] != local_plasma
# Make sure that resource bookkeeping works when a task that uses a
# custom resources gets blocked.
ray.get([h.remote() for _ in range(5)])
def test_two_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 1,
"CustomResource2": 2
})
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 3,
"CustomResource2": 4
})
ray.init(redis_address=cluster.redis_address)
@ray.remote(resources={"CustomResource1": 1})
def f():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource2": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource1": 1, "CustomResource2": 3})
def h():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource1": 4})
def j():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource3": 1})
def k():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
# The f and g tasks should be scheduled on both raylets.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
assert len(set(ray.get([g.remote() for _ in range(50)]))) == 2
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# The h tasks should be scheduled only on the second raylet.
raylet_ids = set(ray.get([h.remote() for _ in range(50)]))
assert len(raylet_ids) == 1
assert list(raylet_ids)[0] != local_plasma
# Make sure that tasks with unsatisfied custom resource requirements do
# not get scheduled.
ready_ids, remaining_ids = ray.wait([j.remote(), k.remote()], timeout=0.5)
assert ready_ids == []
def test_many_custom_resources(shutdown_only):
num_custom_resources = 10000
total_resources = {
str(i): np.random.randint(1, 7)
for i in range(num_custom_resources)
}
ray.init(num_cpus=5, resources=total_resources)
def f():
return 1
remote_functions = []
for _ in range(20):
num_resources = np.random.randint(0, num_custom_resources + 1)
permuted_resources = np.random.permutation(
num_custom_resources)[:num_resources]
random_resources = {
str(i): total_resources[str(i)]
for i in permuted_resources
}
remote_function = ray.remote(resources=random_resources)(f)
remote_functions.append(remote_function)
remote_functions.append(ray.remote(f))
remote_functions.append(ray.remote(resources=total_resources)(f))
results = []
for remote_function in remote_functions:
results.append(remote_function.remote())
results.append(remote_function.remote())
results.append(remote_function.remote())
ray.get(results)
# TODO: 5 retry attempts may be too little for Travis and we may need to
# increase it if this test begins to be flaky on Travis.
def test_zero_capacity_deletion_semantics(shutdown_only):
ray.init(num_cpus=2, num_gpus=1, resources={"test_resource": 1})
def test():
resources = ray.available_resources()
MAX_RETRY_ATTEMPTS = 5
retry_count = 0
while resources and retry_count < MAX_RETRY_ATTEMPTS:
time.sleep(0.1)
resources = ray.available_resources()
retry_count += 1
if retry_count >= MAX_RETRY_ATTEMPTS:
raise RuntimeError(
"Resources were available even after five retries.")
return resources
function = ray.remote(
num_cpus=2, num_gpus=1, resources={"test_resource": 1})(test)
cluster_resources = ray.get(function.remote())
# All cluster resources should be utilized and
# cluster_resources must be empty
assert cluster_resources == {}
@pytest.fixture
def save_gpu_ids_shutdown_only():
# Record the curent value of this environment variable so that we can
# reset it after the test.
original_gpu_ids = os.environ.get("CUDA_VISIBLE_DEVICES", None)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
# Reset the environment variable.
if original_gpu_ids is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = original_gpu_ids
else:
del os.environ["CUDA_VISIBLE_DEVICES"]
def test_specific_gpus(save_gpu_ids_shutdown_only):
allowed_gpu_ids = [4, 5, 6]
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
[str(i) for i in allowed_gpu_ids])
ray.init(num_gpus=3)
@ray.remote(num_gpus=1)
def f():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert gpu_ids[0] in allowed_gpu_ids
@ray.remote(num_gpus=2)
def g():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert gpu_ids[0] in allowed_gpu_ids
assert gpu_ids[1] in allowed_gpu_ids
ray.get([f.remote() for _ in range(100)])
ray.get([g.remote() for _ in range(100)])
def test_blocking_tasks(ray_start_regular):
@ray.remote
def f(i, j):
return (i, j)
@ray.remote
def g(i):
# Each instance of g submits and blocks on the result of another
# remote task.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.get(object_ids)
@ray.remote
def h(i):
# Each instance of g submits and blocks on the result of another
# remote task using ray.wait.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.wait(object_ids, num_returns=len(object_ids))
ray.get([h.remote(i) for i in range(4)])
@ray.remote
def _sleep(i):
time.sleep(0.01)
return (i)
@ray.remote
def sleep():
# Each instance of sleep submits and blocks on the result of
# another remote task, which takes some time to execute.
ray.get([_sleep.remote(i) for i in range(10)])
ray.get(sleep.remote())
def test_max_call_tasks(ray_start_regular):
@ray.remote(max_calls=1)
def f():
return os.getpid()
pid = ray.get(f.remote())
ray.tests.utils.wait_for_pid_to_exit(pid)
@ray.remote(max_calls=2)
def f():
return os.getpid()
pid1 = ray.get(f.remote())
pid2 = ray.get(f.remote())
assert pid1 == pid2
ray.tests.utils.wait_for_pid_to_exit(pid1)
def attempt_to_load_balance(remote_function,
args,
total_tasks,
num_nodes,
minimum_count,
num_attempts=100):
attempts = 0
while attempts < num_attempts:
locations = ray.get(
[remote_function.remote(*args) for _ in range(total_tasks)])
names = set(locations)
counts = [locations.count(name) for name in names]
logger.info("Counts are {}.".format(counts))
if (len(names) == num_nodes
and all(count >= minimum_count for count in counts)):
break
attempts += 1
assert attempts < num_attempts
def test_load_balancing(ray_start_cluster):
# This test ensures that tasks are being assigned to all raylets
# in a roughly equal manner.
cluster = ray_start_cluster
num_nodes = 3
num_cpus = 7
for _ in range(num_nodes):
cluster.add_node(num_cpus=num_cpus)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f():
time.sleep(0.01)
return ray.worker.global_worker.plasma_client.store_socket_name
attempt_to_load_balance(f, [], 100, num_nodes, 10)
attempt_to_load_balance(f, [], 1000, num_nodes, 100)
def test_load_balancing_with_dependencies(ray_start_cluster):
# This test ensures that tasks are being assigned to all raylets in a
# roughly equal manner even when the tasks have dependencies.
cluster = ray_start_cluster
num_nodes = 3
for _ in range(num_nodes):
cluster.add_node(num_cpus=1)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f(x):
time.sleep(0.010)
return ray.worker.global_worker.plasma_client.store_socket_name
# This object will be local to one of the raylets. Make sure
# this doesn't prevent tasks from being scheduled on other raylets.
x = ray.put(np.zeros(1000000))
attempt_to_load_balance(f, [x], 100, num_nodes, 25)
def wait_for_num_tasks(num_tasks, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.tasks()) >= num_tasks:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for global state.")
def wait_for_num_objects(num_objects, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.objects()) >= num_objects:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for global state.")
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_global_state_api(shutdown_only):
error_message = ("The ray global state API cannot be used "
"before ray.init has been called.")
with pytest.raises(Exception, match=error_message):
ray.objects()
with pytest.raises(Exception, match=error_message):
ray.tasks()
with pytest.raises(Exception, match=error_message):
ray.nodes()
with pytest.raises(Exception, match=error_message):
ray.jobs()
ray.init(num_cpus=5, num_gpus=3, resources={"CustomResource": 1})
resources = {"CPU": 5, "GPU": 3, "CustomResource": 1}
assert ray.cluster_resources() == resources
assert ray.objects() == {}
job_id = ray.utils.compute_job_id_from_driver(
ray.WorkerID(ray.worker.global_worker.worker_id))
driver_task_id = ray.worker.global_worker.current_task_id.hex()
# One task is put in the task table which corresponds to this driver.
wait_for_num_tasks(1)
task_table = ray.tasks()
assert len(task_table) == 1
assert driver_task_id == list(task_table.keys())[0]
task_spec = task_table[driver_task_id]["TaskSpec"]
nil_unique_id_hex = ray.UniqueID.nil().hex()
nil_actor_id_hex = ray.ActorID.nil().hex()
assert task_spec["TaskID"] == driver_task_id
assert task_spec["ActorID"] == nil_actor_id_hex
assert task_spec["Args"] == []
assert task_spec["JobID"] == job_id.hex()
assert task_spec["FunctionID"] == nil_unique_id_hex
assert task_spec["ReturnObjectIDs"] == []
client_table = ray.nodes()
node_ip_address = ray.worker.global_worker.node_ip_address
assert len(client_table) == 1
assert client_table[0]["NodeManagerAddress"] == node_ip_address
@ray.remote
def f(*xs):
return 1
x_id = ray.put(1)
result_id = f.remote(1, "hi", x_id)
# Wait for one additional task to complete.
wait_for_num_tasks(1 + 1)
task_table = ray.tasks()
assert len(task_table) == 1 + 1
task_id_set = set(task_table.keys())
task_id_set.remove(driver_task_id)
task_id = list(task_id_set)[0]
task_spec = task_table[task_id]["TaskSpec"]
assert task_spec["ActorID"] == nil_actor_id_hex
assert task_spec["Args"] == [1, "hi", x_id]
assert task_spec["JobID"] == job_id.hex()
assert task_spec["ReturnObjectIDs"] == [result_id]
assert task_table[task_id] == ray.tasks(task_id)
# Wait for two objects, one for the x_id and one for result_id.
wait_for_num_objects(2)
def wait_for_object_table():
timeout = 10
start_time = time.time()
while time.time() - start_time < timeout:
object_table = ray.objects()
tables_ready = (object_table[x_id]["ManagerIDs"] is not None and
object_table[result_id]["ManagerIDs"] is not None)
if tables_ready:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for object table to "
"update.")
object_table = ray.objects()
assert len(object_table) == 2
assert object_table[x_id] == ray.objects(x_id)
object_table_entry = ray.objects(result_id)
assert object_table[result_id] == object_table_entry
job_table = ray.jobs()
assert len(job_table) == 1
assert job_table[0]["JobID"] == job_id.hex()
assert job_table[0]["NodeManagerAddress"] == node_ip_address
# TODO(rkn): Pytest actually has tools for capturing stdout and stderr, so we
# should use those, but they seem to conflict with Ray's use of faulthandler.
class CaptureOutputAndError(object):
def __init__(self, captured_output_and_error):
if sys.version_info >= (3, 0):
import io
self.output_buffer = io.StringIO()
self.error_buffer = io.StringIO()
else:
import cStringIO
self.output_buffer = cStringIO.StringIO()
self.error_buffer = cStringIO.StringIO()
self.captured_output_and_error = captured_output_and_error
def __enter__(self):
sys.stdout.flush()
sys.stderr.flush()
self.old_stdout = sys.stdout
self.old_stderr = sys.stderr
sys.stdout = self.output_buffer
sys.stderr = self.error_buffer
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
self.captured_output_and_error["out"] = self.output_buffer.getvalue()
self.captured_output_and_error["err"] = self.error_buffer.getvalue()
def test_logging_to_driver(shutdown_only):
ray.init(num_cpus=1, log_to_driver=True)
@ray.remote
def f():
# without calling sys.stdout.flush() and sys.stderr.flush().
for i in range(100):
print(i)
print(100 + i, file=sys.stderr)
captured = {}
with CaptureOutputAndError(captured):
ray.get(f.remote())
time.sleep(1)
output_lines = captured["out"]
for i in range(200):
assert str(i) in output_lines
# TODO(rkn): Check that no additional logs appear beyond what we expect
# and that there are no duplicate logs. Once we address the issue
# described in https://github.com/ray-project/ray/pull/5462, we should
# also check that nothing is logged to stderr.
def test_not_logging_to_driver(shutdown_only):
ray.init(num_cpus=1, log_to_driver=False)
@ray.remote
def f():
for i in range(100):
print(i)
print(100 + i, file=sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
captured = {}
with CaptureOutputAndError(captured):
ray.get(f.remote())
time.sleep(1)
output_lines = captured["out"]
assert len(output_lines) == 0
# TODO(rkn): Check that no additional logs appear beyond what we expect
# and that there are no duplicate logs. Once we address the issue
# described in https://github.com/ray-project/ray/pull/5462, we should
# also check that nothing is logged to stderr.
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_workers(shutdown_only):
num_workers = 3
ray.init(num_cpus=num_workers)
@ray.remote
def f():
return id(ray.worker.global_worker), os.getpid()
worker_ids = set()
while len(worker_ids) != num_workers:
worker_ids = set(ray.get([f.remote() for _ in range(10)]))
def test_specific_job_id():
dummy_driver_id = ray.JobID.from_int(1)
ray.init(num_cpus=1, job_id=dummy_driver_id)
assert dummy_driver_id == ray._get_runtime_context().current_driver_id
@ray.remote
def f():
return ray._get_runtime_context().current_driver_id
assert dummy_driver_id == ray.get(f.remote())
ray.shutdown()
def test_object_id_properties():
id_bytes = b"00112233445566778899"
object_id = ray.ObjectID(id_bytes)
assert object_id.binary() == id_bytes
object_id = ray.ObjectID.nil()
assert object_id.is_nil()
with pytest.raises(ValueError, match=r".*needs to have length 20.*"):
ray.ObjectID(id_bytes + b"1234")
with pytest.raises(ValueError, match=r".*needs to have length 20.*"):
ray.ObjectID(b"0123456789")
object_id = ray.ObjectID.from_random()
assert not object_id.is_nil()
assert object_id.binary() != id_bytes
id_dumps = pickle.dumps(object_id)
id_from_dumps = pickle.loads(id_dumps)
assert id_from_dumps == object_id
file_prefix = "test_object_id_properties"
def write(index):
str = ray.ObjectID.from_random().hex()
with open("{}{}".format(file_prefix, index), "w") as fo:
fo.write(str)
def read(index):
with open("{}{}".format(file_prefix, index), "r") as fi:
for line in fi:
return line
processes = [Process(target=write, args=(_, )) for _ in range(4)]
for process in processes:
process.start()
for process in processes:
process.join()
hexes = {read(i) for i in range(4)}
[os.remove("{}{}".format(file_prefix, i)) for i in range(4)]
assert len(hexes) == 4
@pytest.fixture
def shutdown_only_with_initialization_check():
yield None
ray.shutdown()
assert not ray.is_initialized()
def test_initialized(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0)
assert ray.is_initialized()
def test_initialized_local_mode(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0, local_mode=True)
assert ray.is_initialized()
def test_wait_reconstruction(shutdown_only):
ray.init(num_cpus=1, object_store_memory=10**8)
@ray.remote
def f():
return np.zeros(6 * 10**7, dtype=np.uint8)
x_id = f.remote()
ray.wait([x_id])
ray.wait([f.remote()])
assert not ray.worker.global_worker.plasma_client.contains(
ray.pyarrow.plasma.ObjectID(x_id.binary()))
ready_ids, _ = ray.wait([x_id])
assert len(ready_ids) == 1
def test_ray_setproctitle(ray_start_2_cpus):
@ray.remote
class UniqueName(object):
def __init__(self):
assert setproctitle.getproctitle() == "ray_UniqueName:__init__()"
def f(self):
assert setproctitle.getproctitle() == "ray_UniqueName:f()"
@ray.remote
def unique_1():
assert setproctitle.getproctitle(
) == "ray_worker:ray.tests.test_basic.unique_1()"
actor = UniqueName.remote()
ray.get(actor.f.remote())
ray.get(unique_1.remote())
def test_duplicate_error_messages(shutdown_only):
ray.init(num_cpus=0)
driver_id = ray.WorkerID.nil()
error_data = ray.gcs_utils.construct_error_message(driver_id, "test",
"message", 0)
r = ray.worker.global_worker.redis_client
r.execute_command("RAY.TABLE_APPEND",
ray.gcs_utils.TablePrefix.Value("ERROR_INFO"),
ray.gcs_utils.TablePubsub.Value("ERROR_INFO_PUBSUB"),
driver_id.binary(), error_data)
r.execute_command("RAY.TABLE_APPEND",
ray.gcs_utils.TablePrefix.Value("ERROR_INFO"),
ray.gcs_utils.TablePubsub.Value("ERROR_INFO_PUBSUB"),
driver_id.binary(), error_data)
@pytest.mark.skipif(
os.getenv("TRAVIS") is None,
reason="This test should only be run on Travis.")
def test_ray_stack(ray_start_2_cpus):
def unique_name_1():
time.sleep(1000)
@ray.remote
def unique_name_2():
time.sleep(1000)
@ray.remote
def unique_name_3():
unique_name_1()
unique_name_2.remote()
unique_name_3.remote()
success = False
start_time = time.time()
while time.time() - start_time < 30:
output = ray.utils.decode(subprocess.check_output(["ray", "stack"]))
if ("unique_name_1" in output and "unique_name_2" in output
and "unique_name_3" in output):
success = True
break
if not success:
raise Exception("Failed to find necessary information with "
"'ray stack'")
def test_pandas_parquet_serialization():
pytest.importorskip("pandas")
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
tempdir = tempfile.mkdtemp()
filename = os.path.join(tempdir, "parquet-test")
pd.DataFrame({"col1": [0, 1], "col2": [0, 1]}).to_parquet(filename)
with open(os.path.join(tempdir, "parquet-compression"), "wb") as f:
table = pa.Table.from_arrays([pa.array([1, 2, 3])], ["hello"])
pq.write_table(table, f, compression="lz4")
shutil.rmtree(tempdir)
def test_socket_dir_not_existing(shutdown_only):
random_name = ray.ObjectID.from_random().hex()
temp_raylet_socket_dir = "/tmp/ray/tests/{}".format(random_name)
temp_raylet_socket_name = os.path.join(temp_raylet_socket_dir,
"raylet_socket")
ray.init(num_cpus=1, raylet_socket_name=temp_raylet_socket_name)
def test_raylet_is_robust_to_random_messages(ray_start_regular):
node_manager_address = None
node_manager_port = None
for client in ray.nodes():
if "NodeManagerAddress" in client:
node_manager_address = client["NodeManagerAddress"]
node_manager_port = client["NodeManagerPort"]
assert node_manager_address
assert node_manager_port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((node_manager_address, node_manager_port))
s.send(1000 * b"asdf")
@ray.remote
def f():
return 1
assert ray.get(f.remote()) == 1
def test_non_ascii_comment(ray_start_regular):
@ray.remote
def f():
return 1
assert ray.get(f.remote()) == 1
@ray.remote
def echo(x):
return x
@ray.remote
class WithConstructor(object):
def __init__(self, data):
self.data = data
def get_data(self):
return self.data
@ray.remote
class WithoutConstructor(object):
def set_data(self, data):
self.data = data
def get_data(self):
return self.data
class BaseClass(object):
def __init__(self, data):
self.data = data
def get_data(self):
return self.data
@ray.remote
class DerivedClass(BaseClass):
def __init__(self, data):
BaseClass.__init__(self, data)
def test_load_code_from_local(shutdown_only):
ray.init(load_code_from_local=True, num_cpus=4)
message = "foo"
assert ray.get(echo.remote(message)) == message
actor = WithConstructor.remote(1)
assert ray.get(actor.get_data.remote()) == 1
actor = WithoutConstructor.remote()
actor.set_data.remote(1)
assert ray.get(actor.get_data.remote()) == 1
actor = DerivedClass.remote(1)
assert ray.get(actor.get_data.remote()) == 1
base_actor_class = ray.remote(num_cpus=1)(BaseClass)
base_actor = base_actor_class.remote(message)
assert ray.get(base_actor.get_data.remote()) == message
def test_shutdown_disconnect_global_state():
ray.init(num_cpus=0)
ray.shutdown()
with pytest.raises(Exception) as e:
ray.objects()
assert str(e.value).endswith("ray.init has been called.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**8], indirect=True)
def test_redis_lru_with_set(ray_start_object_store_memory):
x = np.zeros(8 * 10**7, dtype=np.uint8)
x_id = ray.put(x)
removed = False
start_time = time.time()
while time.time() < start_time + 10:
if ray.state.state.redis_clients[0].delete(b"OBJECT" +
x_id.binary()) == 1:
removed = True
break
assert removed
ray.put(x)
def test_decorated_function(ray_start_regular):
def function_invocation_decorator(f):
def new_f(args, kwargs):
return f(args[::-1], {"d": 5}), kwargs
return new_f
def f(a, b, c, d=None):
return a, b, c, d
f.__ray_invocation_decorator__ = function_invocation_decorator
f = ray.remote(f)
result_id, kwargs = f.remote(1, 2, 3, d=4)
assert kwargs == {"d": 4}
assert ray.get(result_id) == (3, 2, 1, 5)
def test_get_postprocess(ray_start_regular):
def get_postprocessor(object_ids, values):
return [value for value in values if value > 0]
ray.worker.global_worker._post_get_hooks.append(get_postprocessor)
assert ray.get(
[ray.put(i) for i in [0, 1, 3, 5, -1, -3, 4]]) == [1, 3, 5, 4]
def test_export_after_shutdown(ray_start_regular):
@ray.remote
def f():
pass
@ray.remote
class Actor(object):
def method(self):
pass
ray.get(f.remote())
a = Actor.remote()
ray.get(a.method.remote())
ray.shutdown()
ray.init(num_cpus=1)
ray.get(f.remote())
a = Actor.remote()
ray.get(a.method.remote())
ray.shutdown()
ray.init(num_cpus=2)
@ray.remote
def export_definitions_from_worker(remote_function, actor_class):
ray.get(remote_function.remote())
actor_handle = actor_class.remote()
ray.get(actor_handle.method.remote())
ray.get(export_definitions_from_worker.remote(f, Actor))
| true | true |
1c4addfb448396b0009dee5101f84c6c363ce2cb | 51,768 | py | Python | pandas/core/indexes/interval.py | Bifaxin/pandas | 2ec7f2f279d770b286c9c7679ba7ad0e2f14dcbe | [
"BSD-3-Clause"
] | 2 | 2019-12-31T14:22:54.000Z | 2019-12-31T14:23:42.000Z | pandas/core/indexes/interval.py | Bifaxin/pandas | 2ec7f2f279d770b286c9c7679ba7ad0e2f14dcbe | [
"BSD-3-Clause"
] | null | null | null | pandas/core/indexes/interval.py | Bifaxin/pandas | 2ec7f2f279d770b286c9c7679ba7ad0e2f14dcbe | [
"BSD-3-Clause"
] | null | null | null | """ define the IntervalIndex """
from operator import le, lt
import textwrap
from typing import Any, Optional, Tuple, Union
import warnings
import numpy as np
from pandas._config import get_option
from pandas._libs import Timedelta, Timestamp, lib
from pandas._libs.interval import Interval, IntervalMixin, IntervalTree
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.util._exceptions import rewrite_exception
from pandas.core.dtypes.cast import (
find_common_type,
infer_dtype_from_scalar,
maybe_downcast_to_dtype,
)
from pandas.core.dtypes.common import (
ensure_platform_int,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_interval_dtype,
is_list_like,
is_number,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.missing import isna
from pandas._typing import AnyArrayLike
from pandas.core.arrays.interval import IntervalArray, _interval_shared_docs
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
Index,
InvalidIndexError,
_index_shared_docs,
default_pprint,
ensure_index,
)
from pandas.core.indexes.datetimes import DatetimeIndex, date_range
from pandas.core.indexes.multi import MultiIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex, timedelta_range
from pandas.core.ops import get_op_result_name
from pandas.tseries.frequencies import to_offset
from pandas.tseries.offsets import DateOffset
_VALID_CLOSED = {"left", "right", "both", "neither"}
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(
klass="IntervalIndex",
qualname="IntervalIndex",
target_klass="IntervalIndex or list of Intervals",
name=textwrap.dedent(
"""\
name : object, optional
Name to be stored in the index.
"""
),
)
)
def _get_next_label(label):
dtype = getattr(label, "dtype", type(label))
if isinstance(label, (Timestamp, Timedelta)):
dtype = "datetime64"
if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
return label + np.timedelta64(1, "ns")
elif is_integer_dtype(dtype):
return label + 1
elif is_float_dtype(dtype):
return np.nextafter(label, np.infty)
else:
raise TypeError(
"cannot determine next label for type {typ!r}".format(typ=type(label))
)
def _get_prev_label(label):
dtype = getattr(label, "dtype", type(label))
if isinstance(label, (Timestamp, Timedelta)):
dtype = "datetime64"
if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
return label - np.timedelta64(1, "ns")
elif is_integer_dtype(dtype):
return label - 1
elif is_float_dtype(dtype):
return np.nextafter(label, -np.infty)
else:
raise TypeError(
"cannot determine next label for type {typ!r}".format(typ=type(label))
)
def _get_interval_closed_bounds(interval):
"""
Given an Interval or IntervalIndex, return the corresponding interval with
closed bounds.
"""
left, right = interval.left, interval.right
if interval.open_left:
left = _get_next_label(left)
if interval.open_right:
right = _get_prev_label(right)
return left, right
def _new_IntervalIndex(cls, d):
"""
This is called upon unpickling, rather than the default which doesn't have
arguments and breaks __new__.
"""
return cls.from_arrays(**d)
class SetopCheck:
"""
This is called to decorate the set operations of IntervalIndex
to perform the type check in advance.
"""
def __init__(self, op_name):
self.op_name = op_name
def __call__(self, setop):
def func(intvidx_self, other, sort=False):
intvidx_self._assert_can_do_setop(other)
other = ensure_index(other)
if not isinstance(other, IntervalIndex):
result = getattr(intvidx_self.astype(object), self.op_name)(other)
if self.op_name in ("difference",):
result = result.astype(intvidx_self.dtype)
return result
elif intvidx_self.closed != other.closed:
msg = (
"can only do set operations between two IntervalIndex "
"objects that are closed on the same side"
)
raise ValueError(msg)
# GH 19016: ensure set op will not return a prohibited dtype
subtypes = [intvidx_self.dtype.subtype, other.dtype.subtype]
common_subtype = find_common_type(subtypes)
if is_object_dtype(common_subtype):
msg = (
"can only do {op} between two IntervalIndex "
"objects that have compatible dtypes"
)
raise TypeError(msg.format(op=self.op_name))
return setop(intvidx_self, other, sort)
return func
@Appender(
_interval_shared_docs["class"]
% dict(
klass="IntervalIndex",
summary="Immutable index of intervals that are closed on the same side.",
name=_index_doc_kwargs["name"],
versionadded="0.20.0",
extra_attributes="is_overlapping\nvalues\n",
extra_methods="",
examples=textwrap.dedent(
"""\
Examples
--------
A new ``IntervalIndex`` is typically constructed using
:func:`interval_range`:
>>> pd.interval_range(start=0, end=5)
IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],
closed='right',
dtype='interval[int64]')
It may also be constructed using one of the constructor
methods: :meth:`IntervalIndex.from_arrays`,
:meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`.
See further examples in the doc strings of ``interval_range`` and the
mentioned constructor methods.
"""
),
)
)
class IntervalIndex(IntervalMixin, Index):
_typ = "intervalindex"
_comparables = ["name"]
_attributes = ["name", "closed"]
# we would like our indexing holder to defer to us
_defer_to_indexing = True
# Immutable, so we are able to cache computations like isna in '_mask'
_mask = None
# --------------------------------------------------------------------
# Constructors
def __new__(
cls, data, closed=None, dtype=None, copy=False, name=None, verify_integrity=True
):
if name is None and hasattr(data, "name"):
name = data.name
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray(
data,
closed=closed,
copy=copy,
dtype=dtype,
verify_integrity=verify_integrity,
)
return cls._simple_new(array, name)
@classmethod
def _simple_new(cls, array, name, closed=None):
"""
Construct from an IntervalArray
Parameters
----------
array : IntervalArray
name : str
Attached as result.name
closed : Any
Ignored.
"""
result = IntervalMixin.__new__(cls)
result._data = array
result.name = name
result._reset_identity()
return result
@classmethod
@Appender(
_interval_shared_docs["from_breaks"]
% dict(
klass="IntervalIndex",
examples=textwrap.dedent(
"""\
Examples
--------
>>> pd.IntervalIndex.from_breaks([0, 1, 2, 3])
IntervalIndex([(0, 1], (1, 2], (2, 3]],
closed='right',
dtype='interval[int64]')
"""
),
)
)
def from_breaks(cls, breaks, closed="right", name=None, copy=False, dtype=None):
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_breaks(
breaks, closed=closed, copy=copy, dtype=dtype
)
return cls._simple_new(array, name=name)
@classmethod
@Appender(
_interval_shared_docs["from_arrays"]
% dict(
klass="IntervalIndex",
examples=textwrap.dedent(
"""\
Examples
--------
>>> pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3])
IntervalIndex([(0, 1], (1, 2], (2, 3]],
closed='right',
dtype='interval[int64]')
"""
),
)
)
def from_arrays(
cls, left, right, closed="right", name=None, copy=False, dtype=None
):
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_arrays(
left, right, closed, copy=copy, dtype=dtype
)
return cls._simple_new(array, name=name)
@classmethod
@Appender(
_interval_shared_docs["from_tuples"]
% dict(
klass="IntervalIndex",
examples=textwrap.dedent(
"""\
Examples
--------
>>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)])
IntervalIndex([(0, 1], (1, 2]],
closed='right',
dtype='interval[int64]')
"""
),
)
)
def from_tuples(cls, data, closed="right", name=None, copy=False, dtype=None):
with rewrite_exception("IntervalArray", cls.__name__):
arr = IntervalArray.from_tuples(data, closed=closed, copy=copy, dtype=dtype)
return cls._simple_new(arr, name=name)
# --------------------------------------------------------------------
@Appender(_index_shared_docs["_shallow_copy"])
def _shallow_copy(self, left=None, right=None, **kwargs):
result = self._data._shallow_copy(left=left, right=right)
attributes = self._get_attributes_dict()
attributes.update(kwargs)
return self._simple_new(result, **attributes)
@cache_readonly
def _isnan(self):
"""Return a mask indicating if each value is NA"""
if self._mask is None:
self._mask = isna(self.left)
return self._mask
@cache_readonly
def _engine(self):
left = self._maybe_convert_i8(self.left)
right = self._maybe_convert_i8(self.right)
return IntervalTree(left, right, closed=self.closed)
def __contains__(self, key):
"""
return a boolean if this key is IN the index
We *only* accept an Interval
Parameters
----------
key : Interval
Returns
-------
boolean
"""
if not isinstance(key, Interval):
return False
try:
self.get_loc(key)
return True
except KeyError:
return False
@Appender(
_interval_shared_docs["to_tuples"]
% dict(
return_type="Index",
examples="""
Examples
--------
>>> idx = pd.IntervalIndex.from_arrays([0, np.nan, 2], [1, np.nan, 3])
>>> idx.to_tuples()
Index([(0.0, 1.0), (nan, nan), (2.0, 3.0)], dtype='object')
>>> idx.to_tuples(na_tuple=False)
Index([(0.0, 1.0), nan, (2.0, 3.0)], dtype='object')
""",
)
)
def to_tuples(self, na_tuple=True):
tuples = self._data.to_tuples(na_tuple=na_tuple)
return Index(tuples)
@cache_readonly
def _multiindex(self):
return MultiIndex.from_arrays([self.left, self.right], names=["left", "right"])
@property
def left(self):
"""
Return the left endpoints of each Interval in the IntervalIndex as
an Index.
"""
return self._data._left
@property
def right(self):
"""
Return the right endpoints of each Interval in the IntervalIndex as
an Index.
"""
return self._data._right
@property
def closed(self):
"""
Whether the intervals are closed on the left-side, right-side, both or
neither.
"""
return self._data._closed
@Appender(
_interval_shared_docs["set_closed"]
% dict(
klass="IntervalIndex",
examples=textwrap.dedent(
"""\
Examples
--------
>>> index = pd.interval_range(0, 3)
>>> index
IntervalIndex([(0, 1], (1, 2], (2, 3]],
closed='right',
dtype='interval[int64]')
>>> index.set_closed('both')
IntervalIndex([[0, 1], [1, 2], [2, 3]],
closed='both',
dtype='interval[int64]')
"""
),
)
)
def set_closed(self, closed):
if closed not in _VALID_CLOSED:
msg = "invalid option for 'closed': {closed}"
raise ValueError(msg.format(closed=closed))
# return self._shallow_copy(closed=closed)
array = self._data.set_closed(closed)
return self._simple_new(array, self.name)
@property
def length(self):
"""
Return an Index with entries denoting the length of each Interval in
the IntervalIndex.
"""
return self._data.length
@property
def size(self):
# Avoid materializing ndarray[Interval]
return self._data.size
@property
def itemsize(self):
msg = (
"IntervalIndex.itemsize is deprecated and will be removed in "
"a future version"
)
warnings.warn(msg, FutureWarning, stacklevel=2)
# suppress the warning from the underlying left/right itemsize
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return self.left.itemsize + self.right.itemsize
def __len__(self) -> int:
return len(self.left)
@cache_readonly
def values(self):
"""
Return the IntervalIndex's data as an IntervalArray.
"""
return self._data
@cache_readonly
def _values(self):
return self._data
@cache_readonly
def _ndarray_values(self):
return np.array(self._data)
def __array__(self, result=None):
""" the array interface, return my values """
return self._ndarray_values
def __array_wrap__(self, result, context=None):
# we don't want the superclass implementation
return result
def __reduce__(self):
d = dict(left=self.left, right=self.right)
d.update(self._get_attributes_dict())
return _new_IntervalIndex, (self.__class__, d), None
@Appender(_index_shared_docs["copy"])
def copy(self, deep=False, name=None):
array = self._data
if deep:
array = array.copy()
attributes = self._get_attributes_dict()
if name is not None:
attributes.update(name=name)
return self._simple_new(array, **attributes)
@Appender(_index_shared_docs["astype"])
def astype(self, dtype, copy=True):
with rewrite_exception("IntervalArray", self.__class__.__name__):
new_values = self.values.astype(dtype, copy=copy)
if is_interval_dtype(new_values):
return self._shallow_copy(new_values.left, new_values.right)
return super().astype(dtype, copy=copy)
@cache_readonly
def dtype(self):
"""Return the dtype object of the underlying data"""
return self._data.dtype
@property
def inferred_type(self) -> str:
"""Return a string of the type inferred from the values"""
return "interval"
@Appender(Index.memory_usage.__doc__)
def memory_usage(self, deep=False):
# we don't use an explicit engine
# so return the bytes here
return self.left.memory_usage(deep=deep) + self.right.memory_usage(deep=deep)
@cache_readonly
def mid(self):
"""
Return the midpoint of each Interval in the IntervalIndex as an Index.
"""
return self._data.mid
@cache_readonly
def is_monotonic(self):
"""
Return True if the IntervalIndex is monotonic increasing (only equal or
increasing values), else False
"""
return self.is_monotonic_increasing
@cache_readonly
def is_monotonic_increasing(self):
"""
Return True if the IntervalIndex is monotonic increasing (only equal or
increasing values), else False
"""
return self._engine.is_monotonic_increasing
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
"""
Return True if the IntervalIndex is monotonic decreasing (only equal or
decreasing values), else False
"""
return self[::-1].is_monotonic_increasing
@cache_readonly
def is_unique(self):
"""
Return True if the IntervalIndex contains unique elements, else False
"""
left = self.left
right = self.right
if self.isna().sum() > 1:
return False
if left.is_unique or right.is_unique:
return True
seen_pairs = set()
check_idx = np.where(left.duplicated(keep=False))[0]
for idx in check_idx:
pair = (left[idx], right[idx])
if pair in seen_pairs:
return False
seen_pairs.add(pair)
return True
@cache_readonly
@Appender(_interval_shared_docs["is_non_overlapping_monotonic"] % _index_doc_kwargs)
def is_non_overlapping_monotonic(self):
return self._data.is_non_overlapping_monotonic
@property
def is_overlapping(self):
"""
Return True if the IntervalIndex has overlapping intervals, else False.
Two intervals overlap if they share a common point, including closed
endpoints. Intervals that only have an open endpoint in common do not
overlap.
.. versionadded:: 0.24.0
Returns
-------
bool
Boolean indicating if the IntervalIndex has overlapping intervals.
See Also
--------
Interval.overlaps : Check whether two Interval objects overlap.
IntervalIndex.overlaps : Check an IntervalIndex elementwise for
overlaps.
Examples
--------
>>> index = pd.IntervalIndex.from_tuples([(0, 2), (1, 3), (4, 5)])
>>> index
IntervalIndex([(0, 2], (1, 3], (4, 5]],
closed='right',
dtype='interval[int64]')
>>> index.is_overlapping
True
Intervals that share closed endpoints overlap:
>>> index = pd.interval_range(0, 3, closed='both')
>>> index
IntervalIndex([[0, 1], [1, 2], [2, 3]],
closed='both',
dtype='interval[int64]')
>>> index.is_overlapping
True
Intervals that only have an open endpoint in common do not overlap:
>>> index = pd.interval_range(0, 3, closed='left')
>>> index
IntervalIndex([[0, 1), [1, 2), [2, 3)],
closed='left',
dtype='interval[int64]')
>>> index.is_overlapping
False
"""
# GH 23309
return self._engine.is_overlapping
@Appender(_index_shared_docs["_convert_scalar_indexer"])
def _convert_scalar_indexer(self, key, kind=None):
if kind == "iloc":
return super()._convert_scalar_indexer(key, kind=kind)
return key
def _maybe_cast_slice_bound(self, label, side, kind):
return getattr(self, side)._maybe_cast_slice_bound(label, side, kind)
@Appender(_index_shared_docs["_convert_list_indexer"])
def _convert_list_indexer(self, keyarr, kind=None):
"""
we are passed a list-like indexer. Return the
indexer for matching intervals.
"""
locs = self.get_indexer_for(keyarr)
# we have missing values
if (locs == -1).any():
raise KeyError
return locs
def _maybe_cast_indexed(self, key):
"""
we need to cast the key, which could be a scalar
or an array-like to the type of our subtype
"""
if isinstance(key, IntervalIndex):
return key
subtype = self.dtype.subtype
if is_float_dtype(subtype):
if is_integer(key):
key = float(key)
elif isinstance(key, (np.ndarray, Index)):
key = key.astype("float64")
elif is_integer_dtype(subtype):
if is_integer(key):
key = int(key)
return key
def _can_reindex(self, indexer: np.ndarray) -> None:
"""
Check if we are allowing reindexing with this particular indexer.
Parameters
----------
indexer : an integer indexer
Raises
------
ValueError if its a duplicate axis
"""
# trying to reindex on an axis with duplicates
if self.is_overlapping and len(indexer):
raise ValueError("cannot reindex from an overlapping axis")
def _needs_i8_conversion(self, key):
"""
Check if a given key needs i8 conversion. Conversion is necessary for
Timestamp, Timedelta, DatetimeIndex, and TimedeltaIndex keys. An
Interval-like requires conversion if it's endpoints are one of the
aforementioned types.
Assumes that any list-like data has already been cast to an Index.
Parameters
----------
key : scalar or Index-like
The key that should be checked for i8 conversion
Returns
-------
boolean
"""
if is_interval_dtype(key) or isinstance(key, Interval):
return self._needs_i8_conversion(key.left)
i8_types = (Timestamp, Timedelta, DatetimeIndex, TimedeltaIndex)
return isinstance(key, i8_types)
def _maybe_convert_i8(self, key):
"""
Maybe convert a given key to it's equivalent i8 value(s). Used as a
preprocessing step prior to IntervalTree queries (self._engine), which
expects numeric data.
Parameters
----------
key : scalar or list-like
The key that should maybe be converted to i8.
Returns
-------
key: scalar or list-like
The original key if no conversion occurred, int if converted scalar,
Int64Index if converted list-like.
"""
original = key
if is_list_like(key):
key = ensure_index(key)
if not self._needs_i8_conversion(key):
return original
scalar = is_scalar(key)
if is_interval_dtype(key) or isinstance(key, Interval):
# convert left/right and reconstruct
left = self._maybe_convert_i8(key.left)
right = self._maybe_convert_i8(key.right)
constructor = Interval if scalar else IntervalIndex.from_arrays
return constructor(left, right, closed=self.closed)
if scalar:
# Timestamp/Timedelta
key_dtype, key_i8 = infer_dtype_from_scalar(key, pandas_dtype=True)
else:
# DatetimeIndex/TimedeltaIndex
key_dtype, key_i8 = key.dtype, Index(key.asi8)
if key.hasnans:
# convert NaT from it's i8 value to np.nan so it's not viewed
# as a valid value, maybe causing errors (e.g. is_overlapping)
key_i8 = key_i8.where(~key._isnan)
# ensure consistency with IntervalIndex subtype
subtype = self.dtype.subtype
msg = (
"Cannot index an IntervalIndex of subtype {subtype} with "
"values of dtype {other}"
)
if not is_dtype_equal(subtype, key_dtype):
raise ValueError(msg.format(subtype=subtype, other=key_dtype))
return key_i8
def _check_method(self, method):
if method is None:
return
if method in ["bfill", "backfill", "pad", "ffill", "nearest"]:
msg = "method {method} not yet implemented for IntervalIndex"
raise NotImplementedError(msg.format(method=method))
raise ValueError("Invalid fill method")
def _searchsorted_monotonic(self, label, side, exclude_label=False):
if not self.is_non_overlapping_monotonic:
raise KeyError(
"can only get slices from an IntervalIndex if "
"bounds are non-overlapping and all monotonic "
"increasing or decreasing"
)
if isinstance(label, IntervalMixin):
msg = "Interval objects are not currently supported"
raise NotImplementedError(msg)
# GH 20921: "not is_monotonic_increasing" for the second condition
# instead of "is_monotonic_decreasing" to account for single element
# indexes being both increasing and decreasing
if (side == "left" and self.left.is_monotonic_increasing) or (
side == "right" and not self.left.is_monotonic_increasing
):
sub_idx = self.right
if self.open_right or exclude_label:
label = _get_next_label(label)
else:
sub_idx = self.left
if self.open_left or exclude_label:
label = _get_prev_label(label)
return sub_idx._searchsorted_monotonic(label, side)
def _find_non_overlapping_monotonic_bounds(self, key):
if isinstance(key, IntervalMixin):
start = self._searchsorted_monotonic(
key.left, "left", exclude_label=key.open_left
)
stop = self._searchsorted_monotonic(
key.right, "right", exclude_label=key.open_right
)
elif isinstance(key, slice):
# slice
start, stop = key.start, key.stop
if (key.step or 1) != 1:
raise NotImplementedError("cannot slice with a slice step")
if start is None:
start = 0
else:
start = self._searchsorted_monotonic(start, "left")
if stop is None:
stop = len(self)
else:
stop = self._searchsorted_monotonic(stop, "right")
else:
# scalar or index-like
start = self._searchsorted_monotonic(key, "left")
stop = self._searchsorted_monotonic(key, "right")
return start, stop
def get_loc(
self, key: Any, method: Optional[str] = None, tolerance=None
) -> Union[int, slice, np.ndarray]:
"""
Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
method : {None}, optional
* default: matches where the label is within an interval only.
Returns
-------
loc : int if unique index, slice if monotonic index, else mask
Examples
--------
>>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2)
>>> index = pd.IntervalIndex([i1, i2])
>>> index.get_loc(1)
0
You can also supply a point inside an interval.
>>> index.get_loc(1.5)
1
If a label is in several intervals, you get the locations of all the
relevant intervals.
>>> i3 = pd.Interval(0, 2)
>>> overlapping_index = pd.IntervalIndex([i1, i2, i3])
>>> overlapping_index.get_loc(0.5)
array([ True, False, True])
Only exact matches will be returned if an interval is provided.
>>> index.get_loc(pd.Interval(0, 1))
0
"""
self._check_method(method)
# list-like are invalid labels for II but in some cases may work, e.g
# single element array of comparable type, so guard against them early
if is_list_like(key):
raise KeyError(key)
if isinstance(key, Interval):
if self.closed != key.closed:
raise KeyError(key)
mask = (self.left == key.left) & (self.right == key.right)
else:
# assume scalar
op_left = le if self.closed_left else lt
op_right = le if self.closed_right else lt
try:
mask = op_left(self.left, key) & op_right(key, self.right)
except TypeError:
# scalar is not comparable to II subtype --> invalid label
raise KeyError(key)
matches = mask.sum()
if matches == 0:
raise KeyError(key)
elif matches == 1:
return mask.argmax()
return lib.maybe_booleans_to_slice(mask.view("u1"))
@Substitution(
**dict(
_index_doc_kwargs,
**{
"raises_section": textwrap.dedent(
"""
Raises
------
NotImplementedError
If any method argument other than the default of
None is specified as these are not yet implemented.
"""
)
}
)
)
@Appender(_index_shared_docs["get_indexer"])
def get_indexer(
self,
target: AnyArrayLike,
method: Optional[str] = None,
limit: Optional[int] = None,
tolerance: Optional[Any] = None,
) -> np.ndarray:
self._check_method(method)
if self.is_overlapping:
msg = (
"cannot handle overlapping indices; use "
"IntervalIndex.get_indexer_non_unique"
)
raise InvalidIndexError(msg)
target_as_index = ensure_index(target)
if isinstance(target_as_index, IntervalIndex):
# equal indexes -> 1:1 positional match
if self.equals(target_as_index):
return np.arange(len(self), dtype="intp")
# different closed or incompatible subtype -> no matches
common_subtype = find_common_type(
[self.dtype.subtype, target_as_index.dtype.subtype]
)
if self.closed != target_as_index.closed or is_object_dtype(common_subtype):
return np.repeat(np.intp(-1), len(target_as_index))
# non-overlapping -> at most one match per interval in target_as_index
# want exact matches -> need both left/right to match, so defer to
# left/right get_indexer, compare elementwise, equality -> match
left_indexer = self.left.get_indexer(target_as_index.left)
right_indexer = self.right.get_indexer(target_as_index.right)
indexer = np.where(left_indexer == right_indexer, left_indexer, -1)
elif not is_object_dtype(target_as_index):
# homogeneous scalar index: use IntervalTree
target_as_index = self._maybe_convert_i8(target_as_index)
indexer = self._engine.get_indexer(target_as_index.values)
else:
# heterogeneous scalar index: defer elementwise to get_loc
# (non-overlapping so get_loc guarantees scalar of KeyError)
indexer = []
for key in target_as_index:
try:
loc = self.get_loc(key)
except KeyError:
loc = -1
indexer.append(loc)
return ensure_platform_int(indexer)
@Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
def get_indexer_non_unique(
self, target: AnyArrayLike
) -> Tuple[np.ndarray, np.ndarray]:
target_as_index = ensure_index(target)
# check that target_as_index IntervalIndex is compatible
if isinstance(target_as_index, IntervalIndex):
common_subtype = find_common_type(
[self.dtype.subtype, target_as_index.dtype.subtype]
)
if self.closed != target_as_index.closed or is_object_dtype(common_subtype):
# different closed or incompatible subtype -> no matches
return (
np.repeat(-1, len(target_as_index)),
np.arange(len(target_as_index)),
)
if is_object_dtype(target_as_index) or isinstance(
target_as_index, IntervalIndex
):
# target_as_index might contain intervals: defer elementwise to get_loc
indexer, missing = [], []
for i, key in enumerate(target_as_index):
try:
locs = self.get_loc(key)
if isinstance(locs, slice):
locs = np.arange(locs.start, locs.stop, locs.step, dtype="intp")
locs = np.array(locs, ndmin=1)
except KeyError:
missing.append(i)
locs = np.array([-1])
indexer.append(locs)
indexer = np.concatenate(indexer)
else:
target_as_index = self._maybe_convert_i8(target_as_index)
indexer, missing = self._engine.get_indexer_non_unique(
target_as_index.values
)
return ensure_platform_int(indexer), ensure_platform_int(missing)
def get_indexer_for(self, target: AnyArrayLike, **kwargs) -> np.ndarray:
"""
Guaranteed return of an indexer even when overlapping.
This dispatches to get_indexer or get_indexer_non_unique
as appropriate.
Returns
-------
numpy.ndarray
List of indices.
"""
if self.is_overlapping:
return self.get_indexer_non_unique(target)[0]
return self.get_indexer(target, **kwargs)
@Appender(_index_shared_docs["get_value"] % _index_doc_kwargs)
def get_value(self, series: ABCSeries, key: Any) -> Any:
if com.is_bool_indexer(key):
loc = key
elif is_list_like(key):
if self.is_overlapping:
loc, missing = self.get_indexer_non_unique(key)
if len(missing):
raise KeyError
else:
loc = self.get_indexer(key)
elif isinstance(key, slice):
if not (key.step is None or key.step == 1):
raise ValueError("cannot support not-default step in a slice")
loc = self._convert_slice_indexer(key, kind="getitem")
else:
loc = self.get_loc(key)
return series.iloc[loc]
@Appender(_index_shared_docs["where"])
def where(self, cond, other=None):
if other is None:
other = self._na_value
values = np.where(cond, self.values, other)
return self._shallow_copy(values)
def delete(self, loc):
"""
Return a new IntervalIndex with passed location(-s) deleted
Returns
-------
new_index : IntervalIndex
"""
new_left = self.left.delete(loc)
new_right = self.right.delete(loc)
return self._shallow_copy(new_left, new_right)
def insert(self, loc, item):
"""
Return a new IntervalIndex inserting new item at location. Follows
Python list.append semantics for negative values. Only Interval
objects and NA can be inserted into an IntervalIndex
Parameters
----------
loc : int
item : object
Returns
-------
new_index : IntervalIndex
"""
if isinstance(item, Interval):
if item.closed != self.closed:
raise ValueError(
"inserted item must be closed on the same side as the index"
)
left_insert = item.left
right_insert = item.right
elif is_scalar(item) and isna(item):
# GH 18295
left_insert = right_insert = item
else:
raise ValueError(
"can only insert Interval objects and NA into an IntervalIndex"
)
new_left = self.left.insert(loc, left_insert)
new_right = self.right.insert(loc, right_insert)
return self._shallow_copy(new_left, new_right)
def _concat_same_dtype(self, to_concat, name):
"""
assert that we all have the same .closed
we allow a 0-len index here as well
"""
if not len({i.closed for i in to_concat if len(i)}) == 1:
msg = (
"can only append two IntervalIndex objects "
"that are closed on the same side"
)
raise ValueError(msg)
return super()._concat_same_dtype(to_concat, name)
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
result = self._data.take(
indices, axis=axis, allow_fill=allow_fill, fill_value=fill_value, **kwargs
)
attributes = self._get_attributes_dict()
return self._simple_new(result, **attributes)
def __getitem__(self, value):
result = self._data[value]
if isinstance(result, IntervalArray):
return self._shallow_copy(result)
else:
# scalar
return result
# --------------------------------------------------------------------
# Rendering Methods
# __repr__ associated methods are based on MultiIndex
def _format_with_header(self, header, **kwargs):
return header + list(self._format_native_types(**kwargs))
def _format_native_types(self, na_rep="NaN", quoting=None, **kwargs):
# GH 28210: use base method but with different default na_rep
return super()._format_native_types(na_rep=na_rep, quoting=quoting, **kwargs)
def _format_data(self, name=None):
# TODO: integrate with categorical and make generic
# name argument is unused here; just for compat with base / categorical
n = len(self)
max_seq_items = min((get_option("display.max_seq_items") or n) // 10, 10)
formatter = str
if n == 0:
summary = "[]"
elif n == 1:
first = formatter(self[0])
summary = "[{first}]".format(first=first)
elif n == 2:
first = formatter(self[0])
last = formatter(self[-1])
summary = "[{first}, {last}]".format(first=first, last=last)
else:
if n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in self[:n]]
tail = [formatter(x) for x in self[-n:]]
summary = "[{head} ... {tail}]".format(
head=", ".join(head), tail=", ".join(tail)
)
else:
tail = [formatter(x) for x in self]
summary = "[{tail}]".format(tail=", ".join(tail))
return summary + "," + self._format_space()
def _format_attrs(self):
attrs = [("closed", repr(self.closed))]
if self.name is not None:
attrs.append(("name", default_pprint(self.name)))
attrs.append(("dtype", "'{dtype}'".format(dtype=self.dtype)))
return attrs
def _format_space(self):
space = " " * (len(self.__class__.__name__) + 1)
return "\n{space}".format(space=space)
# --------------------------------------------------------------------
def argsort(self, *args, **kwargs):
return np.lexsort((self.right, self.left))
def equals(self, other):
"""
Determines if two IntervalIndex objects contain the same elements
"""
if self.is_(other):
return True
# if we can coerce to an II
# then we can compare
if not isinstance(other, IntervalIndex):
if not is_interval_dtype(other):
return False
other = Index(getattr(other, ".values", other))
return (
self.left.equals(other.left)
and self.right.equals(other.right)
and self.closed == other.closed
)
@Appender(
_interval_shared_docs["contains"]
% dict(
klass="IntervalIndex",
examples=textwrap.dedent(
"""\
>>> intervals = pd.IntervalIndex.from_tuples([(0, 1), (1, 3), (2, 4)])
>>> intervals
IntervalIndex([(0, 1], (1, 3], (2, 4]],
closed='right',
dtype='interval[int64]')
>>> intervals.contains(0.5)
array([ True, False, False])
"""
),
)
)
def contains(self, other):
return self._data.contains(other)
@Appender(
_interval_shared_docs["overlaps"]
% dict(
klass="IntervalIndex",
examples=textwrap.dedent(
"""\
>>> intervals = pd.IntervalIndex.from_tuples([(0, 1), (1, 3), (2, 4)])
>>> intervals
IntervalIndex([(0, 1], (1, 3], (2, 4]],
closed='right',
dtype='interval[int64]')
"""
),
)
)
def overlaps(self, other):
return self._data.overlaps(other)
@Appender(_index_shared_docs["intersection"])
@SetopCheck(op_name="intersection")
def intersection(
self, other: "IntervalIndex", sort: bool = False
) -> "IntervalIndex":
if self.left.is_unique and self.right.is_unique:
taken = self._intersection_unique(other)
elif other.left.is_unique and other.right.is_unique and self.isna().sum() <= 1:
# Swap other/self if other is unique and self does not have
# multiple NaNs
taken = other._intersection_unique(self)
else:
# duplicates
taken = self._intersection_non_unique(other)
if sort is None:
taken = taken.sort_values()
return taken
def _intersection_unique(self, other: "IntervalIndex") -> "IntervalIndex":
"""
Used when the IntervalIndex does not have any common endpoint,
no mater left or right.
Return the intersection with another IntervalIndex.
Parameters
----------
other : IntervalIndex
Returns
-------
taken : IntervalIndex
"""
lindexer = self.left.get_indexer(other.left)
rindexer = self.right.get_indexer(other.right)
match = (lindexer == rindexer) & (lindexer != -1)
indexer = lindexer.take(match.nonzero()[0])
return self.take(indexer)
def _intersection_non_unique(self, other: "IntervalIndex") -> "IntervalIndex":
"""
Used when the IntervalIndex does have some common endpoints,
on either sides.
Return the intersection with another IntervalIndex.
Parameters
----------
other : IntervalIndex
Returns
-------
taken : IntervalIndex
"""
mask = np.zeros(len(self), dtype=bool)
if self.hasnans and other.hasnans:
first_nan_loc = np.arange(len(self))[self.isna()][0]
mask[first_nan_loc] = True
other_tups = set(zip(other.left, other.right))
for i, tup in enumerate(zip(self.left, self.right)):
if tup in other_tups:
mask[i] = True
return self[mask]
def _setop(op_name: str, sort=None):
@SetopCheck(op_name=op_name)
def func(self, other, sort=sort):
result = getattr(self._multiindex, op_name)(other._multiindex, sort=sort)
result_name = get_op_result_name(self, other)
# GH 19101: ensure empty results have correct dtype
if result.empty:
result = result.values.astype(self.dtype.subtype)
else:
result = result.values
return type(self).from_tuples(result, closed=self.closed, name=result_name)
return func
@property
def is_all_dates(self) -> bool:
"""
This is False even when left/right contain datetime-like objects,
as the check is done on the Interval itself
"""
return False
union = _setop("union")
difference = _setop("difference")
symmetric_difference = _setop("symmetric_difference")
# TODO: arithmetic operations
IntervalIndex._add_logical_methods_disabled()
def _is_valid_endpoint(endpoint):
"""helper for interval_range to check if start/end are valid types"""
return any(
[
is_number(endpoint),
isinstance(endpoint, Timestamp),
isinstance(endpoint, Timedelta),
endpoint is None,
]
)
def _is_type_compatible(a, b):
"""helper for interval_range to check type compat of start/end/freq"""
is_ts_compat = lambda x: isinstance(x, (Timestamp, DateOffset))
is_td_compat = lambda x: isinstance(x, (Timedelta, DateOffset))
return (
(is_number(a) and is_number(b))
or (is_ts_compat(a) and is_ts_compat(b))
or (is_td_compat(a) and is_td_compat(b))
or com.any_none(a, b)
)
def interval_range(
start=None, end=None, periods=None, freq=None, name=None, closed="right"
):
"""
Return a fixed frequency IntervalIndex.
Parameters
----------
start : numeric or datetime-like, default None
Left bound for generating intervals.
end : numeric or datetime-like, default None
Right bound for generating intervals.
periods : int, default None
Number of periods to generate.
freq : numeric, str, or DateOffset, default None
The length of each interval. Must be consistent with the type of start
and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1
for numeric and 'D' for datetime-like.
name : str, default None
Name of the resulting IntervalIndex.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
Returns
-------
IntervalIndex
See Also
--------
IntervalIndex : An Index of intervals that are all closed on the same side.
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``IntervalIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end``, inclusively.
To learn more about datetime-like frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
Numeric ``start`` and ``end`` is supported.
>>> pd.interval_range(start=0, end=5)
IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],
closed='right', dtype='interval[int64]')
Additionally, datetime-like input is also supported.
>>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
... end=pd.Timestamp('2017-01-04'))
IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03],
(2017-01-03, 2017-01-04]],
closed='right', dtype='interval[datetime64[ns]]')
The ``freq`` parameter specifies the frequency between the left and right.
endpoints of the individual intervals within the ``IntervalIndex``. For
numeric ``start`` and ``end``, the frequency must also be numeric.
>>> pd.interval_range(start=0, periods=4, freq=1.5)
IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],
closed='right', dtype='interval[float64]')
Similarly, for datetime-like ``start`` and ``end``, the frequency must be
convertible to a DateOffset.
>>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
... periods=3, freq='MS')
IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01],
(2017-03-01, 2017-04-01]],
closed='right', dtype='interval[datetime64[ns]]')
Specify ``start``, ``end``, and ``periods``; the frequency is generated
automatically (linearly spaced).
>>> pd.interval_range(start=0, end=6, periods=4)
IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],
closed='right',
dtype='interval[float64]')
The ``closed`` parameter specifies which endpoints of the individual
intervals within the ``IntervalIndex`` are closed.
>>> pd.interval_range(end=5, periods=4, closed='both')
IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]],
closed='both', dtype='interval[int64]')
"""
start = com.maybe_box_datetimelike(start)
end = com.maybe_box_datetimelike(end)
endpoint = start if start is not None else end
if freq is None and com.any_none(periods, start, end):
freq = 1 if is_number(endpoint) else "D"
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError(
"Of the four parameters: start, end, periods, and "
"freq, exactly three must be specified"
)
if not _is_valid_endpoint(start):
msg = "start must be numeric or datetime-like, got {start}"
raise ValueError(msg.format(start=start))
elif not _is_valid_endpoint(end):
msg = "end must be numeric or datetime-like, got {end}"
raise ValueError(msg.format(end=end))
if is_float(periods):
periods = int(periods)
elif not is_integer(periods) and periods is not None:
msg = "periods must be a number, got {periods}"
raise TypeError(msg.format(periods=periods))
if freq is not None and not is_number(freq):
try:
freq = to_offset(freq)
except ValueError:
raise ValueError(
"freq must be numeric or convertible to "
"DateOffset, got {freq}".format(freq=freq)
)
# verify type compatibility
if not all(
[
_is_type_compatible(start, end),
_is_type_compatible(start, freq),
_is_type_compatible(end, freq),
]
):
raise TypeError("start, end, freq need to be type compatible")
# +1 to convert interval count to breaks count (n breaks = n-1 intervals)
if periods is not None:
periods += 1
if is_number(endpoint):
# force consistency between start/end/freq (lower end if freq skips it)
if com.all_not_none(start, end, freq):
end -= (end - start) % freq
# compute the period/start/end if unspecified (at most one)
if periods is None:
periods = int((end - start) // freq) + 1
elif start is None:
start = end - (periods - 1) * freq
elif end is None:
end = start + (periods - 1) * freq
breaks = np.linspace(start, end, periods)
if all(is_integer(x) for x in com.not_none(start, end, freq)):
# np.linspace always produces float output
breaks = maybe_downcast_to_dtype(breaks, "int64")
else:
# delegate to the appropriate range function
if isinstance(endpoint, Timestamp):
range_func = date_range
else:
range_func = timedelta_range
breaks = range_func(start=start, end=end, periods=periods, freq=freq)
return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
| 33.036375 | 95 | 0.583527 | from operator import le, lt
import textwrap
from typing import Any, Optional, Tuple, Union
import warnings
import numpy as np
from pandas._config import get_option
from pandas._libs import Timedelta, Timestamp, lib
from pandas._libs.interval import Interval, IntervalMixin, IntervalTree
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.util._exceptions import rewrite_exception
from pandas.core.dtypes.cast import (
find_common_type,
infer_dtype_from_scalar,
maybe_downcast_to_dtype,
)
from pandas.core.dtypes.common import (
ensure_platform_int,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_interval_dtype,
is_list_like,
is_number,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.missing import isna
from pandas._typing import AnyArrayLike
from pandas.core.arrays.interval import IntervalArray, _interval_shared_docs
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
Index,
InvalidIndexError,
_index_shared_docs,
default_pprint,
ensure_index,
)
from pandas.core.indexes.datetimes import DatetimeIndex, date_range
from pandas.core.indexes.multi import MultiIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex, timedelta_range
from pandas.core.ops import get_op_result_name
from pandas.tseries.frequencies import to_offset
from pandas.tseries.offsets import DateOffset
_VALID_CLOSED = {"left", "right", "both", "neither"}
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(
klass="IntervalIndex",
qualname="IntervalIndex",
target_klass="IntervalIndex or list of Intervals",
name=textwrap.dedent(
"""\
name : object, optional
Name to be stored in the index.
"""
),
)
)
def _get_next_label(label):
dtype = getattr(label, "dtype", type(label))
if isinstance(label, (Timestamp, Timedelta)):
dtype = "datetime64"
if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
return label + np.timedelta64(1, "ns")
elif is_integer_dtype(dtype):
return label + 1
elif is_float_dtype(dtype):
return np.nextafter(label, np.infty)
else:
raise TypeError(
"cannot determine next label for type {typ!r}".format(typ=type(label))
)
def _get_prev_label(label):
dtype = getattr(label, "dtype", type(label))
if isinstance(label, (Timestamp, Timedelta)):
dtype = "datetime64"
if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
return label - np.timedelta64(1, "ns")
elif is_integer_dtype(dtype):
return label - 1
elif is_float_dtype(dtype):
return np.nextafter(label, -np.infty)
else:
raise TypeError(
"cannot determine next label for type {typ!r}".format(typ=type(label))
)
def _get_interval_closed_bounds(interval):
left, right = interval.left, interval.right
if interval.open_left:
left = _get_next_label(left)
if interval.open_right:
right = _get_prev_label(right)
return left, right
def _new_IntervalIndex(cls, d):
return cls.from_arrays(**d)
class SetopCheck:
def __init__(self, op_name):
self.op_name = op_name
def __call__(self, setop):
def func(intvidx_self, other, sort=False):
intvidx_self._assert_can_do_setop(other)
other = ensure_index(other)
if not isinstance(other, IntervalIndex):
result = getattr(intvidx_self.astype(object), self.op_name)(other)
if self.op_name in ("difference",):
result = result.astype(intvidx_self.dtype)
return result
elif intvidx_self.closed != other.closed:
msg = (
"can only do set operations between two IntervalIndex "
"objects that are closed on the same side"
)
raise ValueError(msg)
subtypes = [intvidx_self.dtype.subtype, other.dtype.subtype]
common_subtype = find_common_type(subtypes)
if is_object_dtype(common_subtype):
msg = (
"can only do {op} between two IntervalIndex "
"objects that have compatible dtypes"
)
raise TypeError(msg.format(op=self.op_name))
return setop(intvidx_self, other, sort)
return func
@Appender(
_interval_shared_docs["class"]
% dict(
klass="IntervalIndex",
summary="Immutable index of intervals that are closed on the same side.",
name=_index_doc_kwargs["name"],
versionadded="0.20.0",
extra_attributes="is_overlapping\nvalues\n",
extra_methods="",
examples=textwrap.dedent(
"""\
Examples
--------
A new ``IntervalIndex`` is typically constructed using
:func:`interval_range`:
>>> pd.interval_range(start=0, end=5)
IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],
closed='right',
dtype='interval[int64]')
It may also be constructed using one of the constructor
methods: :meth:`IntervalIndex.from_arrays`,
:meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`.
See further examples in the doc strings of ``interval_range`` and the
mentioned constructor methods.
"""
),
)
)
class IntervalIndex(IntervalMixin, Index):
_typ = "intervalindex"
_comparables = ["name"]
_attributes = ["name", "closed"]
_defer_to_indexing = True
_mask = None
def __new__(
cls, data, closed=None, dtype=None, copy=False, name=None, verify_integrity=True
):
if name is None and hasattr(data, "name"):
name = data.name
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray(
data,
closed=closed,
copy=copy,
dtype=dtype,
verify_integrity=verify_integrity,
)
return cls._simple_new(array, name)
@classmethod
def _simple_new(cls, array, name, closed=None):
result = IntervalMixin.__new__(cls)
result._data = array
result.name = name
result._reset_identity()
return result
@classmethod
@Appender(
_interval_shared_docs["from_breaks"]
% dict(
klass="IntervalIndex",
examples=textwrap.dedent(
"""\
Examples
--------
>>> pd.IntervalIndex.from_breaks([0, 1, 2, 3])
IntervalIndex([(0, 1], (1, 2], (2, 3]],
closed='right',
dtype='interval[int64]')
"""
),
)
)
def from_breaks(cls, breaks, closed="right", name=None, copy=False, dtype=None):
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_breaks(
breaks, closed=closed, copy=copy, dtype=dtype
)
return cls._simple_new(array, name=name)
@classmethod
@Appender(
_interval_shared_docs["from_arrays"]
% dict(
klass="IntervalIndex",
examples=textwrap.dedent(
"""\
Examples
--------
>>> pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3])
IntervalIndex([(0, 1], (1, 2], (2, 3]],
closed='right',
dtype='interval[int64]')
"""
),
)
)
def from_arrays(
cls, left, right, closed="right", name=None, copy=False, dtype=None
):
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_arrays(
left, right, closed, copy=copy, dtype=dtype
)
return cls._simple_new(array, name=name)
@classmethod
@Appender(
_interval_shared_docs["from_tuples"]
% dict(
klass="IntervalIndex",
examples=textwrap.dedent(
"""\
Examples
--------
>>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)])
IntervalIndex([(0, 1], (1, 2]],
closed='right',
dtype='interval[int64]')
"""
),
)
)
def from_tuples(cls, data, closed="right", name=None, copy=False, dtype=None):
with rewrite_exception("IntervalArray", cls.__name__):
arr = IntervalArray.from_tuples(data, closed=closed, copy=copy, dtype=dtype)
return cls._simple_new(arr, name=name)
@Appender(_index_shared_docs["_shallow_copy"])
def _shallow_copy(self, left=None, right=None, **kwargs):
result = self._data._shallow_copy(left=left, right=right)
attributes = self._get_attributes_dict()
attributes.update(kwargs)
return self._simple_new(result, **attributes)
@cache_readonly
def _isnan(self):
if self._mask is None:
self._mask = isna(self.left)
return self._mask
@cache_readonly
def _engine(self):
left = self._maybe_convert_i8(self.left)
right = self._maybe_convert_i8(self.right)
return IntervalTree(left, right, closed=self.closed)
def __contains__(self, key):
if not isinstance(key, Interval):
return False
try:
self.get_loc(key)
return True
except KeyError:
return False
@Appender(
_interval_shared_docs["to_tuples"]
% dict(
return_type="Index",
examples="""
Examples
--------
>>> idx = pd.IntervalIndex.from_arrays([0, np.nan, 2], [1, np.nan, 3])
>>> idx.to_tuples()
Index([(0.0, 1.0), (nan, nan), (2.0, 3.0)], dtype='object')
>>> idx.to_tuples(na_tuple=False)
Index([(0.0, 1.0), nan, (2.0, 3.0)], dtype='object')
""",
)
)
def to_tuples(self, na_tuple=True):
tuples = self._data.to_tuples(na_tuple=na_tuple)
return Index(tuples)
@cache_readonly
def _multiindex(self):
return MultiIndex.from_arrays([self.left, self.right], names=["left", "right"])
@property
def left(self):
return self._data._left
@property
def right(self):
return self._data._right
@property
def closed(self):
return self._data._closed
@Appender(
_interval_shared_docs["set_closed"]
% dict(
klass="IntervalIndex",
examples=textwrap.dedent(
"""\
Examples
--------
>>> index = pd.interval_range(0, 3)
>>> index
IntervalIndex([(0, 1], (1, 2], (2, 3]],
closed='right',
dtype='interval[int64]')
>>> index.set_closed('both')
IntervalIndex([[0, 1], [1, 2], [2, 3]],
closed='both',
dtype='interval[int64]')
"""
),
)
)
def set_closed(self, closed):
if closed not in _VALID_CLOSED:
msg = "invalid option for 'closed': {closed}"
raise ValueError(msg.format(closed=closed))
array = self._data.set_closed(closed)
return self._simple_new(array, self.name)
@property
def length(self):
return self._data.length
@property
def size(self):
return self._data.size
@property
def itemsize(self):
msg = (
"IntervalIndex.itemsize is deprecated and will be removed in "
"a future version"
)
warnings.warn(msg, FutureWarning, stacklevel=2)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return self.left.itemsize + self.right.itemsize
def __len__(self) -> int:
return len(self.left)
@cache_readonly
def values(self):
return self._data
@cache_readonly
def _values(self):
return self._data
@cache_readonly
def _ndarray_values(self):
return np.array(self._data)
def __array__(self, result=None):
return self._ndarray_values
def __array_wrap__(self, result, context=None):
return result
def __reduce__(self):
d = dict(left=self.left, right=self.right)
d.update(self._get_attributes_dict())
return _new_IntervalIndex, (self.__class__, d), None
@Appender(_index_shared_docs["copy"])
def copy(self, deep=False, name=None):
array = self._data
if deep:
array = array.copy()
attributes = self._get_attributes_dict()
if name is not None:
attributes.update(name=name)
return self._simple_new(array, **attributes)
@Appender(_index_shared_docs["astype"])
def astype(self, dtype, copy=True):
with rewrite_exception("IntervalArray", self.__class__.__name__):
new_values = self.values.astype(dtype, copy=copy)
if is_interval_dtype(new_values):
return self._shallow_copy(new_values.left, new_values.right)
return super().astype(dtype, copy=copy)
@cache_readonly
def dtype(self):
return self._data.dtype
@property
def inferred_type(self) -> str:
return "interval"
@Appender(Index.memory_usage.__doc__)
def memory_usage(self, deep=False):
# we don't use an explicit engine
return self.left.memory_usage(deep=deep) + self.right.memory_usage(deep=deep)
@cache_readonly
def mid(self):
return self._data.mid
@cache_readonly
def is_monotonic(self):
return self.is_monotonic_increasing
@cache_readonly
def is_monotonic_increasing(self):
return self._engine.is_monotonic_increasing
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
return self[::-1].is_monotonic_increasing
@cache_readonly
def is_unique(self):
left = self.left
right = self.right
if self.isna().sum() > 1:
return False
if left.is_unique or right.is_unique:
return True
seen_pairs = set()
check_idx = np.where(left.duplicated(keep=False))[0]
for idx in check_idx:
pair = (left[idx], right[idx])
if pair in seen_pairs:
return False
seen_pairs.add(pair)
return True
@cache_readonly
@Appender(_interval_shared_docs["is_non_overlapping_monotonic"] % _index_doc_kwargs)
def is_non_overlapping_monotonic(self):
return self._data.is_non_overlapping_monotonic
@property
def is_overlapping(self):
return self._engine.is_overlapping
@Appender(_index_shared_docs["_convert_scalar_indexer"])
def _convert_scalar_indexer(self, key, kind=None):
if kind == "iloc":
return super()._convert_scalar_indexer(key, kind=kind)
return key
def _maybe_cast_slice_bound(self, label, side, kind):
return getattr(self, side)._maybe_cast_slice_bound(label, side, kind)
@Appender(_index_shared_docs["_convert_list_indexer"])
def _convert_list_indexer(self, keyarr, kind=None):
locs = self.get_indexer_for(keyarr)
if (locs == -1).any():
raise KeyError
return locs
def _maybe_cast_indexed(self, key):
if isinstance(key, IntervalIndex):
return key
subtype = self.dtype.subtype
if is_float_dtype(subtype):
if is_integer(key):
key = float(key)
elif isinstance(key, (np.ndarray, Index)):
key = key.astype("float64")
elif is_integer_dtype(subtype):
if is_integer(key):
key = int(key)
return key
def _can_reindex(self, indexer: np.ndarray) -> None:
if self.is_overlapping and len(indexer):
raise ValueError("cannot reindex from an overlapping axis")
def _needs_i8_conversion(self, key):
if is_interval_dtype(key) or isinstance(key, Interval):
return self._needs_i8_conversion(key.left)
i8_types = (Timestamp, Timedelta, DatetimeIndex, TimedeltaIndex)
return isinstance(key, i8_types)
def _maybe_convert_i8(self, key):
original = key
if is_list_like(key):
key = ensure_index(key)
if not self._needs_i8_conversion(key):
return original
scalar = is_scalar(key)
if is_interval_dtype(key) or isinstance(key, Interval):
left = self._maybe_convert_i8(key.left)
right = self._maybe_convert_i8(key.right)
constructor = Interval if scalar else IntervalIndex.from_arrays
return constructor(left, right, closed=self.closed)
if scalar:
key_dtype, key_i8 = infer_dtype_from_scalar(key, pandas_dtype=True)
else:
key_dtype, key_i8 = key.dtype, Index(key.asi8)
if key.hasnans:
key_i8 = key_i8.where(~key._isnan)
subtype = self.dtype.subtype
msg = (
"Cannot index an IntervalIndex of subtype {subtype} with "
"values of dtype {other}"
)
if not is_dtype_equal(subtype, key_dtype):
raise ValueError(msg.format(subtype=subtype, other=key_dtype))
return key_i8
def _check_method(self, method):
if method is None:
return
if method in ["bfill", "backfill", "pad", "ffill", "nearest"]:
msg = "method {method} not yet implemented for IntervalIndex"
raise NotImplementedError(msg.format(method=method))
raise ValueError("Invalid fill method")
def _searchsorted_monotonic(self, label, side, exclude_label=False):
if not self.is_non_overlapping_monotonic:
raise KeyError(
"can only get slices from an IntervalIndex if "
"bounds are non-overlapping and all monotonic "
"increasing or decreasing"
)
if isinstance(label, IntervalMixin):
msg = "Interval objects are not currently supported"
raise NotImplementedError(msg)
if (side == "left" and self.left.is_monotonic_increasing) or (
side == "right" and not self.left.is_monotonic_increasing
):
sub_idx = self.right
if self.open_right or exclude_label:
label = _get_next_label(label)
else:
sub_idx = self.left
if self.open_left or exclude_label:
label = _get_prev_label(label)
return sub_idx._searchsorted_monotonic(label, side)
def _find_non_overlapping_monotonic_bounds(self, key):
if isinstance(key, IntervalMixin):
start = self._searchsorted_monotonic(
key.left, "left", exclude_label=key.open_left
)
stop = self._searchsorted_monotonic(
key.right, "right", exclude_label=key.open_right
)
elif isinstance(key, slice):
start, stop = key.start, key.stop
if (key.step or 1) != 1:
raise NotImplementedError("cannot slice with a slice step")
if start is None:
start = 0
else:
start = self._searchsorted_monotonic(start, "left")
if stop is None:
stop = len(self)
else:
stop = self._searchsorted_monotonic(stop, "right")
else:
start = self._searchsorted_monotonic(key, "left")
stop = self._searchsorted_monotonic(key, "right")
return start, stop
def get_loc(
self, key: Any, method: Optional[str] = None, tolerance=None
) -> Union[int, slice, np.ndarray]:
self._check_method(method)
if is_list_like(key):
raise KeyError(key)
if isinstance(key, Interval):
if self.closed != key.closed:
raise KeyError(key)
mask = (self.left == key.left) & (self.right == key.right)
else:
op_left = le if self.closed_left else lt
op_right = le if self.closed_right else lt
try:
mask = op_left(self.left, key) & op_right(key, self.right)
except TypeError:
raise KeyError(key)
matches = mask.sum()
if matches == 0:
raise KeyError(key)
elif matches == 1:
return mask.argmax()
return lib.maybe_booleans_to_slice(mask.view("u1"))
@Substitution(
**dict(
_index_doc_kwargs,
**{
"raises_section": textwrap.dedent(
"""
Raises
------
NotImplementedError
If any method argument other than the default of
None is specified as these are not yet implemented.
"""
)
}
)
)
@Appender(_index_shared_docs["get_indexer"])
def get_indexer(
self,
target: AnyArrayLike,
method: Optional[str] = None,
limit: Optional[int] = None,
tolerance: Optional[Any] = None,
) -> np.ndarray:
self._check_method(method)
if self.is_overlapping:
msg = (
"cannot handle overlapping indices; use "
"IntervalIndex.get_indexer_non_unique"
)
raise InvalidIndexError(msg)
target_as_index = ensure_index(target)
if isinstance(target_as_index, IntervalIndex):
if self.equals(target_as_index):
return np.arange(len(self), dtype="intp")
common_subtype = find_common_type(
[self.dtype.subtype, target_as_index.dtype.subtype]
)
if self.closed != target_as_index.closed or is_object_dtype(common_subtype):
return np.repeat(np.intp(-1), len(target_as_index))
left_indexer = self.left.get_indexer(target_as_index.left)
right_indexer = self.right.get_indexer(target_as_index.right)
indexer = np.where(left_indexer == right_indexer, left_indexer, -1)
elif not is_object_dtype(target_as_index):
target_as_index = self._maybe_convert_i8(target_as_index)
indexer = self._engine.get_indexer(target_as_index.values)
else:
indexer = []
for key in target_as_index:
try:
loc = self.get_loc(key)
except KeyError:
loc = -1
indexer.append(loc)
return ensure_platform_int(indexer)
@Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
def get_indexer_non_unique(
self, target: AnyArrayLike
) -> Tuple[np.ndarray, np.ndarray]:
target_as_index = ensure_index(target)
if isinstance(target_as_index, IntervalIndex):
common_subtype = find_common_type(
[self.dtype.subtype, target_as_index.dtype.subtype]
)
if self.closed != target_as_index.closed or is_object_dtype(common_subtype):
return (
np.repeat(-1, len(target_as_index)),
np.arange(len(target_as_index)),
)
if is_object_dtype(target_as_index) or isinstance(
target_as_index, IntervalIndex
):
indexer, missing = [], []
for i, key in enumerate(target_as_index):
try:
locs = self.get_loc(key)
if isinstance(locs, slice):
locs = np.arange(locs.start, locs.stop, locs.step, dtype="intp")
locs = np.array(locs, ndmin=1)
except KeyError:
missing.append(i)
locs = np.array([-1])
indexer.append(locs)
indexer = np.concatenate(indexer)
else:
target_as_index = self._maybe_convert_i8(target_as_index)
indexer, missing = self._engine.get_indexer_non_unique(
target_as_index.values
)
return ensure_platform_int(indexer), ensure_platform_int(missing)
def get_indexer_for(self, target: AnyArrayLike, **kwargs) -> np.ndarray:
if self.is_overlapping:
return self.get_indexer_non_unique(target)[0]
return self.get_indexer(target, **kwargs)
@Appender(_index_shared_docs["get_value"] % _index_doc_kwargs)
def get_value(self, series: ABCSeries, key: Any) -> Any:
if com.is_bool_indexer(key):
loc = key
elif is_list_like(key):
if self.is_overlapping:
loc, missing = self.get_indexer_non_unique(key)
if len(missing):
raise KeyError
else:
loc = self.get_indexer(key)
elif isinstance(key, slice):
if not (key.step is None or key.step == 1):
raise ValueError("cannot support not-default step in a slice")
loc = self._convert_slice_indexer(key, kind="getitem")
else:
loc = self.get_loc(key)
return series.iloc[loc]
@Appender(_index_shared_docs["where"])
def where(self, cond, other=None):
if other is None:
other = self._na_value
values = np.where(cond, self.values, other)
return self._shallow_copy(values)
def delete(self, loc):
new_left = self.left.delete(loc)
new_right = self.right.delete(loc)
return self._shallow_copy(new_left, new_right)
def insert(self, loc, item):
if isinstance(item, Interval):
if item.closed != self.closed:
raise ValueError(
"inserted item must be closed on the same side as the index"
)
left_insert = item.left
right_insert = item.right
elif is_scalar(item) and isna(item):
left_insert = right_insert = item
else:
raise ValueError(
"can only insert Interval objects and NA into an IntervalIndex"
)
new_left = self.left.insert(loc, left_insert)
new_right = self.right.insert(loc, right_insert)
return self._shallow_copy(new_left, new_right)
def _concat_same_dtype(self, to_concat, name):
if not len({i.closed for i in to_concat if len(i)}) == 1:
msg = (
"can only append two IntervalIndex objects "
"that are closed on the same side"
)
raise ValueError(msg)
return super()._concat_same_dtype(to_concat, name)
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
result = self._data.take(
indices, axis=axis, allow_fill=allow_fill, fill_value=fill_value, **kwargs
)
attributes = self._get_attributes_dict()
return self._simple_new(result, **attributes)
def __getitem__(self, value):
result = self._data[value]
if isinstance(result, IntervalArray):
return self._shallow_copy(result)
else:
return result
def _format_with_header(self, header, **kwargs):
return header + list(self._format_native_types(**kwargs))
def _format_native_types(self, na_rep="NaN", quoting=None, **kwargs):
return super()._format_native_types(na_rep=na_rep, quoting=quoting, **kwargs)
def _format_data(self, name=None):
n = len(self)
max_seq_items = min((get_option("display.max_seq_items") or n) // 10, 10)
formatter = str
if n == 0:
summary = "[]"
elif n == 1:
first = formatter(self[0])
summary = "[{first}]".format(first=first)
elif n == 2:
first = formatter(self[0])
last = formatter(self[-1])
summary = "[{first}, {last}]".format(first=first, last=last)
else:
if n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in self[:n]]
tail = [formatter(x) for x in self[-n:]]
summary = "[{head} ... {tail}]".format(
head=", ".join(head), tail=", ".join(tail)
)
else:
tail = [formatter(x) for x in self]
summary = "[{tail}]".format(tail=", ".join(tail))
return summary + "," + self._format_space()
def _format_attrs(self):
attrs = [("closed", repr(self.closed))]
if self.name is not None:
attrs.append(("name", default_pprint(self.name)))
attrs.append(("dtype", "'{dtype}'".format(dtype=self.dtype)))
return attrs
def _format_space(self):
space = " " * (len(self.__class__.__name__) + 1)
return "\n{space}".format(space=space)
def argsort(self, *args, **kwargs):
return np.lexsort((self.right, self.left))
def equals(self, other):
if self.is_(other):
return True
if not isinstance(other, IntervalIndex):
if not is_interval_dtype(other):
return False
other = Index(getattr(other, ".values", other))
return (
self.left.equals(other.left)
and self.right.equals(other.right)
and self.closed == other.closed
)
@Appender(
_interval_shared_docs["contains"]
% dict(
klass="IntervalIndex",
examples=textwrap.dedent(
"""\
>>> intervals = pd.IntervalIndex.from_tuples([(0, 1), (1, 3), (2, 4)])
>>> intervals
IntervalIndex([(0, 1], (1, 3], (2, 4]],
closed='right',
dtype='interval[int64]')
>>> intervals.contains(0.5)
array([ True, False, False])
"""
),
)
)
def contains(self, other):
return self._data.contains(other)
@Appender(
_interval_shared_docs["overlaps"]
% dict(
klass="IntervalIndex",
examples=textwrap.dedent(
"""\
>>> intervals = pd.IntervalIndex.from_tuples([(0, 1), (1, 3), (2, 4)])
>>> intervals
IntervalIndex([(0, 1], (1, 3], (2, 4]],
closed='right',
dtype='interval[int64]')
"""
),
)
)
def overlaps(self, other):
return self._data.overlaps(other)
@Appender(_index_shared_docs["intersection"])
@SetopCheck(op_name="intersection")
def intersection(
self, other: "IntervalIndex", sort: bool = False
) -> "IntervalIndex":
if self.left.is_unique and self.right.is_unique:
taken = self._intersection_unique(other)
elif other.left.is_unique and other.right.is_unique and self.isna().sum() <= 1:
taken = other._intersection_unique(self)
else:
taken = self._intersection_non_unique(other)
if sort is None:
taken = taken.sort_values()
return taken
def _intersection_unique(self, other: "IntervalIndex") -> "IntervalIndex":
lindexer = self.left.get_indexer(other.left)
rindexer = self.right.get_indexer(other.right)
match = (lindexer == rindexer) & (lindexer != -1)
indexer = lindexer.take(match.nonzero()[0])
return self.take(indexer)
def _intersection_non_unique(self, other: "IntervalIndex") -> "IntervalIndex":
mask = np.zeros(len(self), dtype=bool)
if self.hasnans and other.hasnans:
first_nan_loc = np.arange(len(self))[self.isna()][0]
mask[first_nan_loc] = True
other_tups = set(zip(other.left, other.right))
for i, tup in enumerate(zip(self.left, self.right)):
if tup in other_tups:
mask[i] = True
return self[mask]
def _setop(op_name: str, sort=None):
@SetopCheck(op_name=op_name)
def func(self, other, sort=sort):
result = getattr(self._multiindex, op_name)(other._multiindex, sort=sort)
result_name = get_op_result_name(self, other)
if result.empty:
result = result.values.astype(self.dtype.subtype)
else:
result = result.values
return type(self).from_tuples(result, closed=self.closed, name=result_name)
return func
@property
def is_all_dates(self) -> bool:
return False
union = _setop("union")
difference = _setop("difference")
symmetric_difference = _setop("symmetric_difference")
IntervalIndex._add_logical_methods_disabled()
def _is_valid_endpoint(endpoint):
return any(
[
is_number(endpoint),
isinstance(endpoint, Timestamp),
isinstance(endpoint, Timedelta),
endpoint is None,
]
)
def _is_type_compatible(a, b):
is_ts_compat = lambda x: isinstance(x, (Timestamp, DateOffset))
is_td_compat = lambda x: isinstance(x, (Timedelta, DateOffset))
return (
(is_number(a) and is_number(b))
or (is_ts_compat(a) and is_ts_compat(b))
or (is_td_compat(a) and is_td_compat(b))
or com.any_none(a, b)
)
def interval_range(
start=None, end=None, periods=None, freq=None, name=None, closed="right"
):
start = com.maybe_box_datetimelike(start)
end = com.maybe_box_datetimelike(end)
endpoint = start if start is not None else end
if freq is None and com.any_none(periods, start, end):
freq = 1 if is_number(endpoint) else "D"
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError(
"Of the four parameters: start, end, periods, and "
"freq, exactly three must be specified"
)
if not _is_valid_endpoint(start):
msg = "start must be numeric or datetime-like, got {start}"
raise ValueError(msg.format(start=start))
elif not _is_valid_endpoint(end):
msg = "end must be numeric or datetime-like, got {end}"
raise ValueError(msg.format(end=end))
if is_float(periods):
periods = int(periods)
elif not is_integer(periods) and periods is not None:
msg = "periods must be a number, got {periods}"
raise TypeError(msg.format(periods=periods))
if freq is not None and not is_number(freq):
try:
freq = to_offset(freq)
except ValueError:
raise ValueError(
"freq must be numeric or convertible to "
"DateOffset, got {freq}".format(freq=freq)
)
if not all(
[
_is_type_compatible(start, end),
_is_type_compatible(start, freq),
_is_type_compatible(end, freq),
]
):
raise TypeError("start, end, freq need to be type compatible")
if periods is not None:
periods += 1
if is_number(endpoint):
if com.all_not_none(start, end, freq):
end -= (end - start) % freq
if periods is None:
periods = int((end - start) // freq) + 1
elif start is None:
start = end - (periods - 1) * freq
elif end is None:
end = start + (periods - 1) * freq
breaks = np.linspace(start, end, periods)
if all(is_integer(x) for x in com.not_none(start, end, freq)):
breaks = maybe_downcast_to_dtype(breaks, "int64")
else:
if isinstance(endpoint, Timestamp):
range_func = date_range
else:
range_func = timedelta_range
breaks = range_func(start=start, end=end, periods=periods, freq=freq)
return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
| true | true |
1c4ade2d78bc881c4de7dad09da7fcbf9edec689 | 2,851 | py | Python | auto_generated_scripts/combining_kernels_by_summation.py | myforkmachine/pyprobml | a750b6e33e849ca75300fec1b9ee4b61def80c52 | [
"MIT"
] | null | null | null | auto_generated_scripts/combining_kernels_by_summation.py | myforkmachine/pyprobml | a750b6e33e849ca75300fec1b9ee4b61def80c52 | [
"MIT"
] | null | null | null | auto_generated_scripts/combining_kernels_by_summation.py | myforkmachine/pyprobml | a750b6e33e849ca75300fec1b9ee4b61def80c52 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[1]:
try:
import jax
except:
get_ipython().run_line_magic('pip', 'install jax jaxlib')
import jax
import jax.numpy as jnp
try:
import matplotlib.pyplot as plt
except:
get_ipython().run_line_magic('pip', 'install matplotlib')
import matplotlib.pyplot as plt
try:
import seaborn as sns
except:
get_ipython().run_line_magic('pip', 'install seaborn')
import seaborn as sns
try:
import tinygp
except ModuleNotFoundError:
get_ipython().run_line_magic('pip', 'install -qqq tinygp')
import tinygp
kernels = tinygp.kernels
from tinygp import GaussianProcess
# In[2]:
import os
dev_mode = "DEV_MODE" in os.environ
if dev_mode:
import sys
sys.path.append("scripts")
from plot_utils import latexify, savefig
latexify(width_scale_factor=4, height_scale_factor=1.5 / 2)
# In[3]:
def plot_sample(data, save_name):
if dev_mode:
fig, ax = plt.subplots(2, 1)
else:
fig, ax = plt.subplots(2, 1, figsize=(6.4, 6))
# Plot kernel
kernel = data["kernel1"] + data["kernel2"]
x2 = jnp.array([1.0]).reshape(-1, 1)
kernel_values = kernel(x, x2)
ax[0].plot(x.ravel(), kernel_values.ravel(), color="k")
# Plot samples
gp = GaussianProcess(kernel, x)
samples = gp.sample(key, (2,))
for sample in samples:
ax[1].plot(x, sample)
ax[0].set_title(data["title"])
ax[1].set_xlabel(data["xlabel"])
for axes in ax:
axes.set_xticks([])
ax[0].set_xlabel("$x$ (with $x'=1$)")
plt.tight_layout()
sns.despine()
if dev_mode and len(save_name) > 0:
savefig(save_name)
return fig, ax
x = jnp.arange(-3.0, 5.1, 0.1).reshape(-1, 1)
N = len(x)
key = jax.random.PRNGKey(4)
fig, ax = plot_sample(
{
"kernel1": kernels.Polynomial(order=1),
"kernel2": kernels.ExpSineSquared(scale=1.5, gamma=1.0),
"title": "Lin + Per",
"xlabel": "periodic plus trend",
},
save_name="kernel_sum_lin_per_latexified.pdf",
)
fig, ax = plot_sample(
{
"kernel1": kernels.ExpSquared(scale=1.0),
"kernel2": kernels.ExpSineSquared(scale=2.0, gamma=1.0),
"title": "SE + Per",
"xlabel": "periodic plus noise",
},
save_name="kernel_sum_se_per_latexified.pdf",
)
fig, ax = plot_sample(
{
"kernel1": kernels.ExpSquared(scale=1.0),
"kernel2": kernels.Polynomial(order=1),
"title": "SE + Lin",
"xlabel": "linear plus variation",
},
save_name="kernel_sum_lin_se_latexified.pdf",
)
fig, ax = plot_sample(
{
"kernel1": kernels.ExpSquared(scale=5.0),
"kernel2": kernels.ExpSquared(scale=0.5),
"title": "SE (long) + SE (short)}",
"xlabel": "slow & fast variation",
},
save_name="kernel_sum_se_se_latexified.pdf",
)
| 22.273438 | 64 | 0.619783 |
try:
import jax
except:
get_ipython().run_line_magic('pip', 'install jax jaxlib')
import jax
import jax.numpy as jnp
try:
import matplotlib.pyplot as plt
except:
get_ipython().run_line_magic('pip', 'install matplotlib')
import matplotlib.pyplot as plt
try:
import seaborn as sns
except:
get_ipython().run_line_magic('pip', 'install seaborn')
import seaborn as sns
try:
import tinygp
except ModuleNotFoundError:
get_ipython().run_line_magic('pip', 'install -qqq tinygp')
import tinygp
kernels = tinygp.kernels
from tinygp import GaussianProcess
import os
dev_mode = "DEV_MODE" in os.environ
if dev_mode:
import sys
sys.path.append("scripts")
from plot_utils import latexify, savefig
latexify(width_scale_factor=4, height_scale_factor=1.5 / 2)
def plot_sample(data, save_name):
if dev_mode:
fig, ax = plt.subplots(2, 1)
else:
fig, ax = plt.subplots(2, 1, figsize=(6.4, 6))
kernel = data["kernel1"] + data["kernel2"]
x2 = jnp.array([1.0]).reshape(-1, 1)
kernel_values = kernel(x, x2)
ax[0].plot(x.ravel(), kernel_values.ravel(), color="k")
gp = GaussianProcess(kernel, x)
samples = gp.sample(key, (2,))
for sample in samples:
ax[1].plot(x, sample)
ax[0].set_title(data["title"])
ax[1].set_xlabel(data["xlabel"])
for axes in ax:
axes.set_xticks([])
ax[0].set_xlabel("$x$ (with $x'=1$)")
plt.tight_layout()
sns.despine()
if dev_mode and len(save_name) > 0:
savefig(save_name)
return fig, ax
x = jnp.arange(-3.0, 5.1, 0.1).reshape(-1, 1)
N = len(x)
key = jax.random.PRNGKey(4)
fig, ax = plot_sample(
{
"kernel1": kernels.Polynomial(order=1),
"kernel2": kernels.ExpSineSquared(scale=1.5, gamma=1.0),
"title": "Lin + Per",
"xlabel": "periodic plus trend",
},
save_name="kernel_sum_lin_per_latexified.pdf",
)
fig, ax = plot_sample(
{
"kernel1": kernels.ExpSquared(scale=1.0),
"kernel2": kernels.ExpSineSquared(scale=2.0, gamma=1.0),
"title": "SE + Per",
"xlabel": "periodic plus noise",
},
save_name="kernel_sum_se_per_latexified.pdf",
)
fig, ax = plot_sample(
{
"kernel1": kernels.ExpSquared(scale=1.0),
"kernel2": kernels.Polynomial(order=1),
"title": "SE + Lin",
"xlabel": "linear plus variation",
},
save_name="kernel_sum_lin_se_latexified.pdf",
)
fig, ax = plot_sample(
{
"kernel1": kernels.ExpSquared(scale=5.0),
"kernel2": kernels.ExpSquared(scale=0.5),
"title": "SE (long) + SE (short)}",
"xlabel": "slow & fast variation",
},
save_name="kernel_sum_se_se_latexified.pdf",
)
| true | true |
1c4adea1f2919d3e80db315f58aa9cebace16e2d | 2,724 | py | Python | django/db/migrations/recorder.py | bak1an/django | 98bcc5d81bca578f3a5b4d47907ba4ac40446887 | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2021-04-14T20:15:54.000Z | 2021-04-14T20:15:54.000Z | django/db/migrations/recorder.py | djk2/django | 6b00af50146335485d8414c42efec7d8dd5397fc | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | django/db/migrations/recorder.py | djk2/django | 6b00af50146335485d8414c42efec7d8dd5397fc | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2020-02-06T10:31:51.000Z | 2020-02-06T10:31:51.000Z | from django.apps.registry import Apps
from django.db import models
from django.db.utils import DatabaseError
from django.utils.timezone import now
from .exceptions import MigrationSchemaMissing
class MigrationRecorder:
"""
Deals with storing migration records in the database.
Because this table is actually itself used for dealing with model
creation, it's the one thing we can't do normally via migrations.
We manually handle table creation/schema updating (using schema backend)
and then have a floating model to do queries with.
If a migration is unapplied its row is removed from the table. Having
a row in the table always means a migration is applied.
"""
class Migration(models.Model):
app = models.CharField(max_length=255)
name = models.CharField(max_length=255)
applied = models.DateTimeField(default=now)
class Meta:
apps = Apps()
app_label = "migrations"
db_table = "django_migrations"
def __str__(self):
return "Migration %s for %s" % (self.name, self.app)
def __init__(self, connection):
self.connection = connection
@property
def migration_qs(self):
return self.Migration.objects.using(self.connection.alias)
def ensure_schema(self):
"""
Ensures the table exists and has the correct schema.
"""
# If the table's there, that's fine - we've never changed its schema
# in the codebase.
if self.Migration._meta.db_table in self.connection.introspection.table_names(self.connection.cursor()):
return
# Make the table
try:
with self.connection.schema_editor() as editor:
editor.create_model(self.Migration)
except DatabaseError as exc:
raise MigrationSchemaMissing("Unable to create the django_migrations table (%s)" % exc)
def applied_migrations(self):
"""
Returns a set of (app, name) of applied migrations.
"""
self.ensure_schema()
return set(tuple(x) for x in self.migration_qs.values_list("app", "name"))
def record_applied(self, app, name):
"""
Records that a migration was applied.
"""
self.ensure_schema()
self.migration_qs.create(app=app, name=name)
def record_unapplied(self, app, name):
"""
Records that a migration was unapplied.
"""
self.ensure_schema()
self.migration_qs.filter(app=app, name=name).delete()
def flush(self):
"""
Deletes all migration records. Useful if you're testing migrations.
"""
self.migration_qs.all().delete()
| 32.819277 | 112 | 0.646843 | from django.apps.registry import Apps
from django.db import models
from django.db.utils import DatabaseError
from django.utils.timezone import now
from .exceptions import MigrationSchemaMissing
class MigrationRecorder:
class Migration(models.Model):
app = models.CharField(max_length=255)
name = models.CharField(max_length=255)
applied = models.DateTimeField(default=now)
class Meta:
apps = Apps()
app_label = "migrations"
db_table = "django_migrations"
def __str__(self):
return "Migration %s for %s" % (self.name, self.app)
def __init__(self, connection):
self.connection = connection
@property
def migration_qs(self):
return self.Migration.objects.using(self.connection.alias)
def ensure_schema(self):
# in the codebase.
if self.Migration._meta.db_table in self.connection.introspection.table_names(self.connection.cursor()):
return
# Make the table
try:
with self.connection.schema_editor() as editor:
editor.create_model(self.Migration)
except DatabaseError as exc:
raise MigrationSchemaMissing("Unable to create the django_migrations table (%s)" % exc)
def applied_migrations(self):
self.ensure_schema()
return set(tuple(x) for x in self.migration_qs.values_list("app", "name"))
def record_applied(self, app, name):
self.ensure_schema()
self.migration_qs.create(app=app, name=name)
def record_unapplied(self, app, name):
self.ensure_schema()
self.migration_qs.filter(app=app, name=name).delete()
def flush(self):
self.migration_qs.all().delete()
| true | true |
1c4adf74fe56fbacc2ac8f5384028ec5a33fdee6 | 1,138 | py | Python | flocker/node/test/test_testtools.py | wallnerryan/flocker-profiles | bcd3ced8edf4af86a68070ff6a714c45f9f4913b | [
"Apache-2.0"
] | null | null | null | flocker/node/test/test_testtools.py | wallnerryan/flocker-profiles | bcd3ced8edf4af86a68070ff6a714c45f9f4913b | [
"Apache-2.0"
] | null | null | null | flocker/node/test/test_testtools.py | wallnerryan/flocker-profiles | bcd3ced8edf4af86a68070ff6a714c45f9f4913b | [
"Apache-2.0"
] | null | null | null | # Copyright ClusterHQ Inc. See LICENSE file for details.
"""
Tests for ``flocker.node.testtools``.
"""
from zope.interface import implementer
from twisted.internet.defer import succeed
from .. import sequentially
from ..testtools import (
DummyDeployer, ControllableDeployer, ideployer_tests_factory,
)
from ...control import IClusterStateChange
@implementer(IClusterStateChange)
class DummyClusterStateChange(object):
"""
A non-implementation of ``IClusterStateChange``.
"""
def update_cluster_state(self, cluster_state):
return cluster_state
class DummyDeployerIDeployerTests(
ideployer_tests_factory(lambda case: DummyDeployer())
):
"""
Tests for the ``IDeployer`` implementation of ``DummyDeployer``.
"""
class ControllableDeployerIDeployerTests(
ideployer_tests_factory(
lambda case: ControllableDeployer(
hostname=u"10.0.0.1",
local_states=[succeed(DummyClusterStateChange())],
calculated_actions=[sequentially(changes=[])],
)
)
):
"""
Tests for the ``IDeployer`` implementation of ``DummyDeployer``.
"""
| 24.212766 | 68 | 0.702988 |
from zope.interface import implementer
from twisted.internet.defer import succeed
from .. import sequentially
from ..testtools import (
DummyDeployer, ControllableDeployer, ideployer_tests_factory,
)
from ...control import IClusterStateChange
@implementer(IClusterStateChange)
class DummyClusterStateChange(object):
def update_cluster_state(self, cluster_state):
return cluster_state
class DummyDeployerIDeployerTests(
ideployer_tests_factory(lambda case: DummyDeployer())
):
class ControllableDeployerIDeployerTests(
ideployer_tests_factory(
lambda case: ControllableDeployer(
hostname=u"10.0.0.1",
local_states=[succeed(DummyClusterStateChange())],
calculated_actions=[sequentially(changes=[])],
)
)
):
| true | true |
1c4ae2fc6e24edd3378f495b4123ca7ea576606a | 467 | py | Python | src/python/pants/option/errors.py | hythloday/pants | 107e9b0957f6949ac4bd535fbef8d2d8cba05c5c | [
"Apache-2.0"
] | 11 | 2015-01-20T01:39:41.000Z | 2019-08-08T07:27:44.000Z | src/python/pants/option/errors.py | hythloday/pants | 107e9b0957f6949ac4bd535fbef8d2d8cba05c5c | [
"Apache-2.0"
] | 1 | 2019-08-21T07:29:26.000Z | 2019-08-21T07:29:26.000Z | src/python/pants/option/errors.py | fakeNetflix/square-repo-pants | 28a018c7f47900aec4f576c81a52e0e4b41d9fec | [
"Apache-2.0"
] | 5 | 2015-03-30T02:46:53.000Z | 2018-03-08T20:10:43.000Z | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
class RegistrationError(Exception):
"""An error at option registration time."""
pass
class ParseError(Exception):
"""An error at flag parsing time."""
pass
| 27.470588 | 93 | 0.728051 |
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
class RegistrationError(Exception):
pass
class ParseError(Exception):
pass
| true | true |
1c4ae411c24d29c5748c1368f010a592d04efd84 | 2,532 | py | Python | test_database.py | rr2674/item_catalog | df424d45cb5be9399cd671607e63a99b810c934c | [
"MIT"
] | null | null | null | test_database.py | rr2674/item_catalog | df424d45cb5be9399cd671607e63a99b810c934c | [
"MIT"
] | null | null | null | test_database.py | rr2674/item_catalog | df424d45cb5be9399cd671607e63a99b810c934c | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
from sqlalchemy import create_engine, asc
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Category, Item, User
engine = create_engine('sqlite:///assignment4.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
#q = session.query(Item).all()
#print [i.serialize for i in q]
#sys.exit()
#q = session.query(Item).order_by(asc(Item.name)).all()
#print [i.serialize for i in q]
#sys.exit()
users = [
('Bob Whatever', '', '[email protected]'),
('Billy Sam', '', '[email protected]')
]
for name, picture, email in users:
session.add(User(username = name, picture = picture, email = email))
session.commit()
q = session.query(User).filter_by(email='[email protected]').one()
#q = session.query(User).filter_by(email='[email protected]').first()
user1 = q.id
q = session.query(User).filter_by(email='[email protected]').one()
user2 = q.id
categories_tuples = [
('Soccer', [('Shinguards', 'blahblah1', user1), ('Cleats', 'blahblahb2', user2), ('Soccer Ball', 'foofoo', user1)]),
('Basketball', [('Headband', 'blahblah3', user1), ('Basketball Shoes', 'blahblah4', user1), ('Basketball', 'blahblah5', user2)]),
#('Baseball', ['Bat', 'Glove', 'Ball']),
#('Football', ['Helmet', 'Jersey', 'Shoulder Pads'])
]
for category, items in categories_tuples:
c = Category(name=category)
session.add(c)
session.commit()
q = session.query(Category).filter_by(name=category).one()
print 'category_id: {}'.format(q.id)
for n, d, u in items:
session.add(Item(name=n, description=d, category_id=q.id, user_id=u))
#session.add(Item(name=n, description=d, user=u))
#session.add(Item(name=n, description=d, category=q.id, user=u))
session.commit()
q = session.query(Item).all()
print [i.serialize for i in q]
q = session.query(Item).order_by(asc(Item.name))
print [i.serialize for i in q]
q = session.query(Category).all()
print [i.serialize for i in q]
q = session.query(Category).filter_by(id=1).one()
print 'category 1: {}'.format(q.name)
category_name = 'Soccer'
q = session.query(Category).filter_by(name=category_name).one()
print 'category {}: id {}'.format(q.name, q.id)
q = session.query(Item).filter_by(category_id=q.id).all()
print ' Items for category {}: {}'.format(category_name, [i.serialize for i in q])
category_id = 10
try:
q = session.query(Category).filter_by(id=category_id).one()
print 'category 10: {}'.format(q.name)
except:
print 'catetegory id: {} does not exist'.format(category_id)
| 30.506024 | 130 | 0.675355 |
from sqlalchemy import create_engine, asc
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Category, Item, User
engine = create_engine('sqlite:///assignment4.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
users = [
('Bob Whatever', '', '[email protected]'),
('Billy Sam', '', '[email protected]')
]
for name, picture, email in users:
session.add(User(username = name, picture = picture, email = email))
session.commit()
q = session.query(User).filter_by(email='[email protected]').one()
user1 = q.id
q = session.query(User).filter_by(email='[email protected]').one()
user2 = q.id
categories_tuples = [
('Soccer', [('Shinguards', 'blahblah1', user1), ('Cleats', 'blahblahb2', user2), ('Soccer Ball', 'foofoo', user1)]),
('Basketball', [('Headband', 'blahblah3', user1), ('Basketball Shoes', 'blahblah4', user1), ('Basketball', 'blahblah5', user2)]),
]
for category, items in categories_tuples:
c = Category(name=category)
session.add(c)
session.commit()
q = session.query(Category).filter_by(name=category).one()
print 'category_id: {}'.format(q.id)
for n, d, u in items:
session.add(Item(name=n, description=d, category_id=q.id, user_id=u))
session.commit()
q = session.query(Item).all()
print [i.serialize for i in q]
q = session.query(Item).order_by(asc(Item.name))
print [i.serialize for i in q]
q = session.query(Category).all()
print [i.serialize for i in q]
q = session.query(Category).filter_by(id=1).one()
print 'category 1: {}'.format(q.name)
category_name = 'Soccer'
q = session.query(Category).filter_by(name=category_name).one()
print 'category {}: id {}'.format(q.name, q.id)
q = session.query(Item).filter_by(category_id=q.id).all()
print ' Items for category {}: {}'.format(category_name, [i.serialize for i in q])
category_id = 10
try:
q = session.query(Category).filter_by(id=category_id).one()
print 'category 10: {}'.format(q.name)
except:
print 'catetegory id: {} does not exist'.format(category_id)
| false | true |
1c4ae493f041362e778734f35d7f5208e5ae65ed | 5,189 | py | Python | tensorflow/python/distribute/keras_metrics_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/distribute/keras_metrics_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/distribute/keras_metrics_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import test
from tensorflow.python.framework import ops
from tensorflow.python.keras import metrics
from tensorflow.python.ops import math_ops
def _labeled_dataset_fn():
# First four batches of x: labels, predictions -> (labels == predictions)
# 0: 0, 0 -> True; 1: 1, 1 -> True; 2: 2, 2 -> True; 3: 3, 0 -> False
# 4: 4, 1 -> False; 5: 0, 2 -> False; 6: 1, 0 -> False; 7: 2, 1 -> False
# 8: 3, 2 -> False; 9: 4, 0 -> False; 10: 0, 1 -> False; 11: 1, 2 -> False
# 12: 2, 0 -> False; 13: 3, 1 -> False; 14: 4, 2 -> False; 15: 0, 0 -> True
return dataset_ops.Dataset.range(1000).map(
lambda x: {"labels": x % 5, "predictions": x % 3}).batch(
4, drop_remainder=True)
def _boolean_dataset_fn():
# First four batches of labels, predictions: {TP, FP, TN, FN}
# with a threshold of 0.5:
# T, T -> TP; F, T -> FP; T, F -> FN
# F, F -> TN; T, T -> TP; F, T -> FP
# T, F -> FN; F, F -> TN; T, T -> TP
# F, T -> FP; T, F -> FN; F, F -> TN
return dataset_ops.Dataset.from_tensor_slices({
"labels": [True, False, True, False],
"predictions": [True, True, False, False]}).repeat().batch(
3, drop_remainder=True)
def _threshold_dataset_fn():
# First four batches of labels, predictions: {TP, FP, TN, FN}
# with a threshold of 0.5:
# True, 1.0 -> TP; False, .75 -> FP; True, .25 -> FN
# False, 0.0 -> TN; True, 1.0 -> TP; False, .75 -> FP
# True, .25 -> FN; False, 0.0 -> TN; True, 1.0 -> TP
# False, .75 -> FP; True, .25 -> FN; False, 0.0 -> TN
return dataset_ops.Dataset.from_tensor_slices({
"labels": [True, False, True, False],
"predictions": [1.0, 0.75, 0.25, 0.]}).repeat().batch(
3, drop_remainder=True)
def _regression_dataset_fn():
return dataset_ops.Dataset.from_tensor_slices({
"labels": [1., .5, 1., 0.],
"predictions": [1., .75, .25, 0.]}).repeat()
def all_combinations():
return combinations.combine(
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
mode=["graph"])
def tpu_combinations():
return combinations.combine(
distribution=[
strategy_combinations.tpu_strategy_one_step,
],
mode=["graph"])
class KerasMetricsTest(test.TestCase, parameterized.TestCase):
def _test_metric(self, distribution, dataset_fn, metric_init_fn, expected_fn):
with ops.Graph().as_default(), distribution.scope():
metric = metric_init_fn()
iterator = distribution.make_input_fn_iterator(lambda _: dataset_fn())
updates = distribution.experimental_local_results(
distribution.experimental_run_v2(
metric, args=(iterator.get_next(),)))
batches_per_update = distribution.num_replicas_in_sync
self.evaluate(iterator.initialize())
self.evaluate([v.initializer for v in metric.variables])
batches_consumed = 0
for i in range(4):
batches_consumed += batches_per_update
self.evaluate(updates)
self.assertAllClose(expected_fn(batches_consumed),
self.evaluate(metric.result()),
0.001,
msg="After update #" + str(i+1))
if batches_consumed >= 4: # Consume 4 input batches in total.
break
@combinations.generate(all_combinations() + tpu_combinations())
def testMean(self, distribution):
def _dataset_fn():
return dataset_ops.Dataset.range(1000).map(math_ops.to_float).batch(
4, drop_remainder=True)
def _expected_fn(num_batches):
# Mean(0..3) = 1.5, Mean(0..7) = 3.5, Mean(0..11) = 5.5, etc.
return num_batches * 2 - 0.5
self._test_metric(distribution, _dataset_fn, metrics.Mean, _expected_fn)
if __name__ == "__main__":
test.main()
| 39.015038 | 81 | 0.624976 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import test
from tensorflow.python.framework import ops
from tensorflow.python.keras import metrics
from tensorflow.python.ops import math_ops
def _labeled_dataset_fn():
return dataset_ops.Dataset.range(1000).map(
lambda x: {"labels": x % 5, "predictions": x % 3}).batch(
4, drop_remainder=True)
def _boolean_dataset_fn():
return dataset_ops.Dataset.from_tensor_slices({
"labels": [True, False, True, False],
"predictions": [True, True, False, False]}).repeat().batch(
3, drop_remainder=True)
def _threshold_dataset_fn():
return dataset_ops.Dataset.from_tensor_slices({
"labels": [True, False, True, False],
"predictions": [1.0, 0.75, 0.25, 0.]}).repeat().batch(
3, drop_remainder=True)
def _regression_dataset_fn():
return dataset_ops.Dataset.from_tensor_slices({
"labels": [1., .5, 1., 0.],
"predictions": [1., .75, .25, 0.]}).repeat()
def all_combinations():
return combinations.combine(
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
mode=["graph"])
def tpu_combinations():
return combinations.combine(
distribution=[
strategy_combinations.tpu_strategy_one_step,
],
mode=["graph"])
class KerasMetricsTest(test.TestCase, parameterized.TestCase):
def _test_metric(self, distribution, dataset_fn, metric_init_fn, expected_fn):
with ops.Graph().as_default(), distribution.scope():
metric = metric_init_fn()
iterator = distribution.make_input_fn_iterator(lambda _: dataset_fn())
updates = distribution.experimental_local_results(
distribution.experimental_run_v2(
metric, args=(iterator.get_next(),)))
batches_per_update = distribution.num_replicas_in_sync
self.evaluate(iterator.initialize())
self.evaluate([v.initializer for v in metric.variables])
batches_consumed = 0
for i in range(4):
batches_consumed += batches_per_update
self.evaluate(updates)
self.assertAllClose(expected_fn(batches_consumed),
self.evaluate(metric.result()),
0.001,
msg="After update #" + str(i+1))
if batches_consumed >= 4: break
@combinations.generate(all_combinations() + tpu_combinations())
def testMean(self, distribution):
def _dataset_fn():
return dataset_ops.Dataset.range(1000).map(math_ops.to_float).batch(
4, drop_remainder=True)
def _expected_fn(num_batches):
return num_batches * 2 - 0.5
self._test_metric(distribution, _dataset_fn, metrics.Mean, _expected_fn)
if __name__ == "__main__":
test.main()
| true | true |
1c4ae526ae0a32d9da7ee7d70f206beb11de142a | 8,242 | py | Python | lib/fast_rcnn/test_gallery.py | yellowstarhx/person_search | e36a3d9db5d4b21ff29a9618b4e5f818c8f35300 | [
"BSD-2-Clause"
] | 768 | 2016-04-09T15:09:57.000Z | 2022-03-21T06:05:49.000Z | lib/fast_rcnn/test_gallery.py | IvyYZ/person_search | e36a3d9db5d4b21ff29a9618b4e5f818c8f35300 | [
"BSD-2-Clause"
] | 130 | 2016-05-24T08:00:04.000Z | 2022-03-24T18:40:31.000Z | lib/fast_rcnn/test_gallery.py | IvyYZ/person_search | e36a3d9db5d4b21ff29a9618b4e5f818c8f35300 | [
"BSD-2-Clause"
] | 264 | 2016-05-08T16:01:42.000Z | 2022-03-06T11:39:11.000Z | import numpy as np
import cv2
from fast_rcnn.config import cfg
from fast_rcnn.bbox_transform import clip_boxes, bbox_transform_inv
from fast_rcnn.nms_wrapper import nms
from fast_rcnn.test_utils import get_image_blob, get_gt_boxes_blob
from fast_rcnn.test_probe import _im_exfeat
from utils.timer import Timer
def _im_detect(net, im, roidb, blob_names=None):
"""Detect object classes in an image given object proposals.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
roidb (an roidb item): to provide gt_boxes if necessary
blob_names (list of str): list of feature blob names to be extracted
Returns:
boxes (ndarray): R x (4*K) array of predicted bounding boxes
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
features (dict of ndarray): {blob name: R x D array of features}
"""
im_blob, im_scales = get_image_blob(im)
assert len(im_scales) == 1, "Only single-image batch implemented"
blobs = {
'data': im_blob,
'im_info': np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32),
}
if 'gt_boxes' in net.blobs:
# Supply gt_boxes as input. Used to get pid_labels for proposals.
blobs['gt_boxes'] = get_gt_boxes_blob(
roidb['boxes'], roidb['gt_classes'], roidb['gt_pids'], im_scales)
# reshape network inputs
for k, v in blobs.iteritems():
net.blobs[k].reshape(*(v.shape))
# do forward
forward_kwargs = {k: v.astype(np.float32, copy=False)
for k, v in blobs.iteritems()}
blobs_out = net.forward(**forward_kwargs)
# unscale rois back to raw image space
rois = net.blobs['rois'].data.copy()
boxes = rois[:, 1:5] / im_scales[0]
if cfg.TEST.SVM:
# use the raw scores before softmax under the assumption they
# were trained as linear SVMs
scores = net.blobs['cls_score'].data
else:
# the first column of the pid_prob is the non-person box score
scores = blobs_out['pid_prob'][:, 0]
scores = scores[:, np.newaxis]
scores = np.hstack([scores, 1. - scores])
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
# As we no longer scale and shift the bbox_pred weights when snapshot,
# we need to manually do this during test.
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS and \
cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
num_classes = box_deltas.shape[1] // 4
stds = np.tile(cfg.TRAIN.BBOX_NORMALIZE_STDS, num_classes)
means = np.tile(cfg.TRAIN.BBOX_NORMALIZE_MEANS, num_classes)
box_deltas = box_deltas * stds + means
boxes = bbox_transform_inv(boxes, box_deltas)
boxes = clip_boxes(boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
boxes = np.tile(boxes, (1, scores.shape[1]))
features = {blob: net.blobs[blob].data.copy() for blob in blob_names} \
if blob_names is not None else {}
return boxes, scores, features
def _vis_detections(im, class_name, dets, thresh=0.3):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
im = im[:, :, (2, 1, 0)]
for i in xrange(np.minimum(10, dets.shape[0])):
bbox = dets[i, :4]
score = dets[i, -1]
if score > thresh:
plt.cla()
plt.imshow(im)
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='g', linewidth=3)
)
plt.title('{} {:.3f}'.format(class_name, score))
plt.show()
def detect_and_exfeat(net, imdb,
start=None, end=None,
blob_names=None,
thresh=0.05, vis=False):
assert imdb.num_classes == 2, "Only support two-class detection"
assert cfg.TEST.HAS_RPN, "Only support RPN as proposal"
start = start or 0
end = end or imdb.num_images
num_images = end - start
# all detections are collected into:
# all_boxes[image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
# all_features[blob][image] = N x D array of features
all_boxes = [0 for _ in xrange(num_images)]
all_features = {} if blob_names is None else \
{blob: [0 for _ in xrange(num_images)]
for blob in blob_names}
# timers
_t = {'im_detect' : Timer(), 'misc' : Timer()}
for i in xrange(num_images):
im = cv2.imread(imdb.image_path_at(start + i))
roidb = imdb.roidb[start + i]
_t['im_detect'].tic()
boxes, scores, feat_dic = _im_detect(net, im, roidb, blob_names)
_t['im_detect'].toc()
_t['misc'].tic()
j = 1 # only consider j = 1 (foreground class)
inds = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
keep = nms(cls_dets, cfg.TEST.NMS)
all_boxes[i] = cls_dets[keep]
for blob, feat in feat_dic.iteritems():
all_features[blob][i] = feat[inds][keep]
_t['misc'].toc()
print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s'.format(i + 1, num_images,
_t['im_detect'].average_time, _t['misc'].average_time)
if vis:
_vis_detections(im, imdb.classes[j], all_boxes[i])
return all_boxes, all_features
def usegt_and_exfeat(net, imdb,
start=None, end=None, blob_names=None):
start = start or 0
end = end or imdb.num_images
num_images = end - start
# all detections are collected into:
# all_boxes[image] = N x 5 array of detections (gt) in
# (x1, y1, x2, y2, score)
# all_features[blob][image] = N x D array of features
all_boxes = [0 for _ in xrange(num_images)]
all_features = {} if blob_names is None else \
{blob: [0 for _ in xrange(num_images)]
for blob in blob_names}
# timers
_t = {'gt_exfeat' : Timer(), 'misc' : Timer()}
for i in xrange(num_images):
im = cv2.imread(imdb.image_path_at(start + i))
gt = imdb.roidb[start + i]['boxes']
_t['gt_exfeat'].tic()
feat_dic = _im_exfeat(net, im, gt, blob_names)
_t['gt_exfeat'].toc()
all_boxes[i] = np.hstack((gt, np.ones((gt.shape[0], 1)))) \
.astype(np.float32)
for blob, feat in feat_dic.iteritems():
all_features[blob][i] = feat
print 'gt_exfeat: {:d}/{:d} {:.3f}s'.format(i + 1, num_images,
_t['gt_exfeat'].average_time)
return all_boxes, all_features
def demo_detect(net, filename, blob_name='feat', threshold=0.5):
"""Detect persons in a gallery image and extract their features
Arguments:
net (caffe.Net): trained network
filename (str): path to a gallery image file (jpg or png)
blob_name (str): feature blob name. Default 'feat'
threshold (float): detection score threshold. Default 0.5
Returns:
boxes (ndarray): N x 5 detected boxes in format [x1, y1, x2, y2, score]
features (ndarray): N x D features matrix
"""
im = cv2.imread(filename)
boxes, scores, feat_dic = _im_detect(net, im, None, [blob_name])
j = 1 # only consider j = 1 (foreground class)
inds = np.where(scores[:, j] > threshold)[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
boxes = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(boxes, cfg.TEST.NMS)
boxes = boxes[keep]
features = feat_dic[blob_name][inds][keep]
if boxes.shape[0] == 0:
return None, None
features = features.reshape(features.shape[0], -1)
return boxes, features
| 35.991266 | 80 | 0.599248 | import numpy as np
import cv2
from fast_rcnn.config import cfg
from fast_rcnn.bbox_transform import clip_boxes, bbox_transform_inv
from fast_rcnn.nms_wrapper import nms
from fast_rcnn.test_utils import get_image_blob, get_gt_boxes_blob
from fast_rcnn.test_probe import _im_exfeat
from utils.timer import Timer
def _im_detect(net, im, roidb, blob_names=None):
"""Detect object classes in an image given object proposals.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
roidb (an roidb item): to provide gt_boxes if necessary
blob_names (list of str): list of feature blob names to be extracted
Returns:
boxes (ndarray): R x (4*K) array of predicted bounding boxes
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
features (dict of ndarray): {blob name: R x D array of features}
"""
im_blob, im_scales = get_image_blob(im)
assert len(im_scales) == 1, "Only single-image batch implemented"
blobs = {
'data': im_blob,
'im_info': np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32),
}
if 'gt_boxes' in net.blobs:
blobs['gt_boxes'] = get_gt_boxes_blob(
roidb['boxes'], roidb['gt_classes'], roidb['gt_pids'], im_scales)
for k, v in blobs.iteritems():
net.blobs[k].reshape(*(v.shape))
forward_kwargs = {k: v.astype(np.float32, copy=False)
for k, v in blobs.iteritems()}
blobs_out = net.forward(**forward_kwargs)
rois = net.blobs['rois'].data.copy()
boxes = rois[:, 1:5] / im_scales[0]
if cfg.TEST.SVM:
scores = net.blobs['cls_score'].data
else:
scores = blobs_out['pid_prob'][:, 0]
scores = scores[:, np.newaxis]
scores = np.hstack([scores, 1. - scores])
if cfg.TEST.BBOX_REG:
box_deltas = blobs_out['bbox_pred']
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS and \
cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
num_classes = box_deltas.shape[1] // 4
stds = np.tile(cfg.TRAIN.BBOX_NORMALIZE_STDS, num_classes)
means = np.tile(cfg.TRAIN.BBOX_NORMALIZE_MEANS, num_classes)
box_deltas = box_deltas * stds + means
boxes = bbox_transform_inv(boxes, box_deltas)
boxes = clip_boxes(boxes, im.shape)
else:
boxes = np.tile(boxes, (1, scores.shape[1]))
features = {blob: net.blobs[blob].data.copy() for blob in blob_names} \
if blob_names is not None else {}
return boxes, scores, features
def _vis_detections(im, class_name, dets, thresh=0.3):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
im = im[:, :, (2, 1, 0)]
for i in xrange(np.minimum(10, dets.shape[0])):
bbox = dets[i, :4]
score = dets[i, -1]
if score > thresh:
plt.cla()
plt.imshow(im)
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='g', linewidth=3)
)
plt.title('{} {:.3f}'.format(class_name, score))
plt.show()
def detect_and_exfeat(net, imdb,
start=None, end=None,
blob_names=None,
thresh=0.05, vis=False):
assert imdb.num_classes == 2, "Only support two-class detection"
assert cfg.TEST.HAS_RPN, "Only support RPN as proposal"
start = start or 0
end = end or imdb.num_images
num_images = end - start
all_boxes = [0 for _ in xrange(num_images)]
all_features = {} if blob_names is None else \
{blob: [0 for _ in xrange(num_images)]
for blob in blob_names}
_t = {'im_detect' : Timer(), 'misc' : Timer()}
for i in xrange(num_images):
im = cv2.imread(imdb.image_path_at(start + i))
roidb = imdb.roidb[start + i]
_t['im_detect'].tic()
boxes, scores, feat_dic = _im_detect(net, im, roidb, blob_names)
_t['im_detect'].toc()
_t['misc'].tic()
j = 1 inds = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
keep = nms(cls_dets, cfg.TEST.NMS)
all_boxes[i] = cls_dets[keep]
for blob, feat in feat_dic.iteritems():
all_features[blob][i] = feat[inds][keep]
_t['misc'].toc()
print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s'.format(i + 1, num_images,
_t['im_detect'].average_time, _t['misc'].average_time)
if vis:
_vis_detections(im, imdb.classes[j], all_boxes[i])
return all_boxes, all_features
def usegt_and_exfeat(net, imdb,
start=None, end=None, blob_names=None):
start = start or 0
end = end or imdb.num_images
num_images = end - start
all_boxes = [0 for _ in xrange(num_images)]
all_features = {} if blob_names is None else \
{blob: [0 for _ in xrange(num_images)]
for blob in blob_names}
_t = {'gt_exfeat' : Timer(), 'misc' : Timer()}
for i in xrange(num_images):
im = cv2.imread(imdb.image_path_at(start + i))
gt = imdb.roidb[start + i]['boxes']
_t['gt_exfeat'].tic()
feat_dic = _im_exfeat(net, im, gt, blob_names)
_t['gt_exfeat'].toc()
all_boxes[i] = np.hstack((gt, np.ones((gt.shape[0], 1)))) \
.astype(np.float32)
for blob, feat in feat_dic.iteritems():
all_features[blob][i] = feat
print 'gt_exfeat: {:d}/{:d} {:.3f}s'.format(i + 1, num_images,
_t['gt_exfeat'].average_time)
return all_boxes, all_features
def demo_detect(net, filename, blob_name='feat', threshold=0.5):
"""Detect persons in a gallery image and extract their features
Arguments:
net (caffe.Net): trained network
filename (str): path to a gallery image file (jpg or png)
blob_name (str): feature blob name. Default 'feat'
threshold (float): detection score threshold. Default 0.5
Returns:
boxes (ndarray): N x 5 detected boxes in format [x1, y1, x2, y2, score]
features (ndarray): N x D features matrix
"""
im = cv2.imread(filename)
boxes, scores, feat_dic = _im_detect(net, im, None, [blob_name])
j = 1 inds = np.where(scores[:, j] > threshold)[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
boxes = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(boxes, cfg.TEST.NMS)
boxes = boxes[keep]
features = feat_dic[blob_name][inds][keep]
if boxes.shape[0] == 0:
return None, None
features = features.reshape(features.shape[0], -1)
return boxes, features
| false | true |
1c4ae56e29fb675a37300f165e75394c050cee69 | 108 | py | Python | openstack_manager/conf/constant.py | syunkitada/openstack-manager | d37c611040444352d3947f236f04124e4f548390 | [
"MIT"
] | null | null | null | openstack_manager/conf/constant.py | syunkitada/openstack-manager | d37c611040444352d3947f236f04124e4f548390 | [
"MIT"
] | null | null | null | openstack_manager/conf/constant.py | syunkitada/openstack-manager | d37c611040444352d3947f236f04124e4f548390 | [
"MIT"
] | null | null | null | # coding: utf-8
INIFILE = '/etc/openstack_manager/openstack_manager.conf'
LOG_DOMEIN = 'openstack_manager'
| 21.6 | 57 | 0.787037 |
INIFILE = '/etc/openstack_manager/openstack_manager.conf'
LOG_DOMEIN = 'openstack_manager'
| true | true |
1c4ae5b4e8da20e6eed7f5cfb053c8127923859c | 1,815 | py | Python | experiments/joystick.py | joc-mer/py-tetris-esp32 | cde188b97ebf4f339ff8826ef802d3771a7c952c | [
"BSD-3-Clause"
] | null | null | null | experiments/joystick.py | joc-mer/py-tetris-esp32 | cde188b97ebf4f339ff8826ef802d3771a7c952c | [
"BSD-3-Clause"
] | null | null | null | experiments/joystick.py | joc-mer/py-tetris-esp32 | cde188b97ebf4f339ff8826ef802d3771a7c952c | [
"BSD-3-Clause"
] | null | null | null | from machine import ADC, Pin, SPI
import utime, math
import max7219 # https://github.com/mcauser/micropython-max7219
# ESP32 max7219 8x8 LED Matrix
# 5V VCC
# GND GND
# D2 DIN
# D5 CS
# D4 CLK
# -- set up 8 pix matrices of 8x8 LED (monochrome)
spi = SPI(1, baudrate=10000000, polarity=1, phase=0, sck=Pin(4), mosi=Pin(2))
ss = Pin(5, Pin.OUT)
display = max7219.Matrix8x8(spi, ss, 4)
display.fill(0)
display.show()
# -- power the joystick with +3.3V
xPin, yPin, cPin = 34, 32, 27
# -- ADC: see https://docs.micropython.org/en/latest/esp32/quickref.html#adc-analog-to-digital-conversion
width = 10
_width = {9:ADC.WIDTH_9BIT, 10:ADC.WIDTH_10BIT,
11:ADC.WIDTH_11BIT, 12:ADC.WIDTH_12BIT,}
X = ADC(Pin(xPin, Pin.IN))
X.atten(ADC.ATTN_11DB) # set 11dB input attentuation (voltage range roughly 0.0v - 3.6v)
X.width(_width[width])
Y = ADC(Pin(yPin, Pin.IN))
Y.atten(ADC.ATTN_11DB) # set 11dB input attentuation (voltage range roughly 0.0v - 3.6v)
Y.width(_width[width])
cButton = Pin(cPin, Pin.IN, Pin.PULL_UP)
# -- initialize, assuming central position at startup
x0, y0 = X.read(), Y.read()
def getXY():
"""
return x and y, between ~ -1 and +1 (+- ~5%)
"""
global X, Y, x0, x0, width
return (X.read()-x0)/2**(width-1), (Y.read()-x0)/2**(width-1)
x, y = 2.0, 2.0
i = 0
while True:
dx, dy = getXY()
# -- round to closest 0.1
dx, dy = int(10*dx)/10, int(10*dy)/10
#print('x=%5.2f y=%5.2f'%(x, y) )
if dx>0:
x += math.sqrt(abs(dx))/5
else:
x -= math.sqrt(abs(dx))/5
if dy>0:
y += math.sqrt(abs(dy))/5
else:
y -= math.sqrt(abs(dy))/5
x, y = x%32, y%8
if cButton.value():
display.fill(0)
display.pixel(int(x), int(y), 1)
i+=1
display.show()
#utime.sleep(0.1)
| 24.527027 | 105 | 0.603857 | from machine import ADC, Pin, SPI
import utime, math
import max7219
spi = SPI(1, baudrate=10000000, polarity=1, phase=0, sck=Pin(4), mosi=Pin(2))
ss = Pin(5, Pin.OUT)
display = max7219.Matrix8x8(spi, ss, 4)
display.fill(0)
display.show()
xPin, yPin, cPin = 34, 32, 27
width = 10
_width = {9:ADC.WIDTH_9BIT, 10:ADC.WIDTH_10BIT,
11:ADC.WIDTH_11BIT, 12:ADC.WIDTH_12BIT,}
X = ADC(Pin(xPin, Pin.IN))
X.atten(ADC.ATTN_11DB) X.width(_width[width])
Y = ADC(Pin(yPin, Pin.IN))
Y.atten(ADC.ATTN_11DB) Y.width(_width[width])
cButton = Pin(cPin, Pin.IN, Pin.PULL_UP)
x0, y0 = X.read(), Y.read()
def getXY():
global X, Y, x0, x0, width
return (X.read()-x0)/2**(width-1), (Y.read()-x0)/2**(width-1)
x, y = 2.0, 2.0
i = 0
while True:
dx, dy = getXY()
dx, dy = int(10*dx)/10, int(10*dy)/10
if dx>0:
x += math.sqrt(abs(dx))/5
else:
x -= math.sqrt(abs(dx))/5
if dy>0:
y += math.sqrt(abs(dy))/5
else:
y -= math.sqrt(abs(dy))/5
x, y = x%32, y%8
if cButton.value():
display.fill(0)
display.pixel(int(x), int(y), 1)
i+=1
display.show()
| true | true |
1c4ae77477d8d3e5166dcacf0aee93a91c200ce5 | 31,826 | py | Python | src/contractExtractor/reentrancyExtractor/judgePath.py | xf97/HuangGai | 40a349be6102d5eb63893fb914659405ae162d93 | [
"MIT"
] | 23 | 2020-09-20T02:10:44.000Z | 2022-03-22T12:58:13.000Z | src/contractExtractor/reentrancyExtractor/judgePath.py | xf97/HuangGai | 40a349be6102d5eb63893fb914659405ae162d93 | [
"MIT"
] | 3 | 2020-09-22T15:28:33.000Z | 2022-01-22T07:48:53.000Z | src/contractExtractor/reentrancyExtractor/judgePath.py | xf97/HuangGai | 40a349be6102d5eb63893fb914659405ae162d93 | [
"MIT"
] | 5 | 2021-07-15T02:45:09.000Z | 2022-03-21T13:36:40.000Z | #!/usr/bin/python
#-*- coding: utf-8 -*-
'''
该部分程序用于判断目标合约是否包含目标路径
如果包含,则需要保存目标路径
'''
'''
可用工具:slither真是个宝藏工具
slither可能可用的功能:
合约各个函数的调用图
文件中各个合约的继承关系
最子类合约的构造函数执行结果
function-summary里有每个函数读写、内外部调用的总结
human-summary里有对每个合约功能的概述->可以用来判断->不能用来判断,对于Receive ETH而言,只判断payable关键字而不判断合约是否真的可以接收以太币
require显示出每个合约的每个函数中用到的require和assert
最子类合约状态变量的内存情况
对状态变量的写入及对应的auth操作
'''
import subprocess
import os
from inherGraph import inherGraph #该库用于返回主合约的合约名
from colorPrint import * #该头文件中定义了色彩显示的信息
from pydot import io #该头文件用来读取.dot文件
import re
import json
#缓存路径
#进行抽取时,合约仍然存于cache文件夹中
CACHE_PATH = "./cache/"
#终端输出记录文件
TERMINAL_FILE = "log.txt"
#注入所需信息存储路径
INJECT_INFO_PATH = "./result/"
#元组标志
TUPLE_FLAG = "tuple()"
#require和assert函数类型标志
REQUIRE_FUNC_TYPE_FLAG = "function (bool) pure"
#require的另一种形式 的定义
REQUIRE_FUNC_STRING_TYPE_FLAG = "function (bool,string memory) pure"
#require标志
REQUIRE_FLAG = "require"
#assert标志
ASSERT_FLAG = "assert"
#替换为真值flag
BOOL_TRUE_FLAG = 0
#图文件前缀
DOT_PREFIX = "temp.sol."
#图文件后缀
DOT_SUFFIX = ".call-graph.dot"
#有向边标志
EDGE_FLAG = " -> "
#payable函数标志
PAYABLE_FLAG = "payable"
#构造函数标志
CONSTRUCTOR_FLAG = "constructor"
#回退函数标志
FALLBACK_FLAG = "fallback"
#账本类型标志
MAPPING_FLAG = "mapping(address => uint256)"
#dot中cluster标志
CLUSTER_FLAG = "cluster_"
#dot中label标志
LABEL_FLAG = "[label="
#UINT256标志
UINT256_FLAG = "uint256"
#加等于标志
ADD_EQU_FLAG = "+="
#等于标志
EQU_FLAG = "="
#加标志
ADD_FLAG = "+"
#减等于标志
SUB_EQU_FLAG = "-="
#减标志
SUB_FLAG = "-"
#SafeMath标志
SAFEMATH_FLAG = "SAFEMATH"
#库类型标志
LIBRARY_FLAG = "library"
#add函数名标志
ADD_STR_FLAG = "add"
#sub函数名标志
SUB_STR_FLAG = "sub"
#transfer标志
TRANSFER_FLAG = "transfer"
#send标志
SEND_FLAG = "send"
#收款地址标志
ADDRESS_PAYABLE_FLAG = "address payable"
#value标志
VALUE_FLAG = "value"
#call标志
CALL_FLAG = "call"
#路径保存位置
PATH_INFO_PATH = "./pathInfo/"
#未使用
#发送以太币标志字符串
SEND_ETH_FLAG = "Send ETH"
#收取以太币标志字符串
RECEIVE_ETH_FLAG = "Receive ETH"
#转出以太币路径结构体
class outEtherInfo:
def __init__(self):
self.ledgerList = list() #本路径上扣款语句位置
self.ledgerId = list()
self.ledgerIndex = -1 #账本下标
self.statementList = list() #语句位置列表
self.statementIndex = -1
class judgePath:
def __init__(self, _contractPath, _json, _filename):
self.filename = _filename #被处理的合约文件名
self.contractPath = _contractPath
self.inherGraph = inherGraph(_json)
self.targetContractName = self.getMainContract()
self.json = _json
self.receiveEthPath = list()
self.funcCallGraph = list()
self.sendEthPath = list()
if not os.path.exists(PATH_INFO_PATH):
os.mkdir(PATH_INFO_PATH) #若文件夹不存在,则建立路径信息保存文件夹
'''
try:
#如果存在log.txt,则删除已存在的log.txt
if os.path.exists(os.path.join(CACHE_PATH, TERMINAL_FILE)):
os.remove(os.path.join(CACHE_PATH, TERMINAL_FILE))
#启动脚本,记录终端输出
#compileResult = subprocess.run("script -f " + TERMINAL_FILE, check = True, shell = True)
print(compileResult.read())
except:
print("Failed to record terminal output.")
'''
def getMainContract(self):
return self.inherGraph.getMainContractName()
#待修改
#已经实现
def storePathInfo(self, _statementInfo):
try:
infoDict = dict()
PATH = "pathInfo"
offset = 1
key = PATH + str(offset)
for _statement in _statementInfo:
tempDict = dict()
tempDict["path"] = _statement[0]
tempDict["ledgerList"] = _statement[1].ledgerList
tempDict["ledgerIndex"] = _statement[1].ledgerIndex
tempDict["statementList"] = _statement[1].statementList
tempDict["statementIndex"] = _statement[1].statementIndex
tempDict["ledgerIdList"] = _statement[1].ledgerId
infoDict[key] = tempDict
offset += 1
key = PATH + str(offset) #更新键值
#保存路径信息
with open(os.path.join(PATH_INFO_PATH, self.filename.split(".")[0] + ".json"), "w", encoding = "utf-8") as f:
json.dump(infoDict, f, indent = 1)
#print("%s %s %s" % (info, self.filename + "target path information...saved", end))
except:
#print("%s %s %s" % (bad, self.filename + " target path information...failed", end))
pass
#返回assert语句中的条件值部分
def getAssertStatement(self, _ast):
funcCall = self.findASTNode(_ast, "name", "FunctionCall")
srcList = list() #assert语句中BinaryOperation的源代码位置
for call in funcCall:
if call["attributes"]["type"] == TUPLE_FLAG:
children0 = call["children"][0] #children[0]是运算符
children1 = call["children"][1] #children[1]是第一个参数-也只有一个
if children0["attributes"]["type"] == REQUIRE_FUNC_TYPE_FLAG and \
children0["attributes"]["value"] == ASSERT_FLAG:
sPos, ePos = self.srcToPos(children1["src"])
srcList.append([sPos, ePos, BOOL_TRUE_FLAG])
else:
continue
else:
continue
#print(srcList, "****")
return srcList
#返回require语句中的条件值部分
def getRequireStatement(self, _ast):
funcCall = self.findASTNode(_ast, "name", "FunctionCall")
srcList = list()
for call in funcCall:
if call["attributes"]["type"] == TUPLE_FLAG:
children0 = call["children"][0]
children1 = call["children"][1]
if (children0["attributes"]["type"] == REQUIRE_FUNC_TYPE_FLAG or \
children0["attributes"]["type"] == REQUIRE_FUNC_STRING_TYPE_FLAG) and \
children0["attributes"]["value"] == REQUIRE_FLAG:
sPos, ePos = self.srcToPos(children1["src"])
srcList.append([sPos, ePos, BOOL_TRUE_FLAG])
else:
continue
else:
continue
return srcList
#返回if语句的条件值部分
def getIfStatement(self, _ast):
ifStatements = self.findASTNode(_ast, "name", "IfStatement")
srcList = list() #目标语句
#拿出条件值部分
for ifStatement in ifStatements:
if ifStatement["children"][0]["attributes"]["type"] == "bool" and ifStatement["children"][0]["name"] == "BinaryOperation":
#找到
sPos, ePos = self.srcToPos(ifStatement["children"][0]["src"])
srcList.append([sPos, ePos, EVER_TRUE_FLAG])
else:
continue
return srcList #2021/12/14 code here
def shieldTerminate(self, _statementInfo):
funcList = list()
contractAndFuncList = list()
for path in [i[0] for i in _statementInfo]:
for func in path:
contractAndFuncList.append(func)
#print(contractAndFuncList)
#根据合约和函数名获得funcList
for func in contractAndFuncList:
(contract, function) = tuple(func.split("."))
contractAst = self.getContractAst(contract)
for func in self.findASTNode(contractAst, "name", "FunctionDefinition"):
if func["attributes"]["name"] == function:
#找到一个目标函数
funcList.append(func)
else:
continue
#寻找函数中可能影响转账的语句
srcList = list() #该列表记录需要被屏蔽或替换的源代码位置
for funcAst in funcList:
'''
srcList.extend(self.getRequireStatement(funcAst))
srcList.extend(self.getAssertStatement(funcAst))
'''
srcList.extend(self.getIfStatement(funcAst)) #funcAst是目标函数的完整ast,因此可以传入
#寻找函数修改其中的语句
#然后再增加函数修改器
#增补,修改器值得注意
#然后逐个搜索函数,增补函数使用的修改器到目标函数列表中
modifierList = list()
for func in funcList:
#此时的func是ast形式
usedModifierIdList = [item["children"][0]["attributes"]["referencedDeclaration"] for item in self.findASTNode(func, "name", "ModifierInvocation")]
if not usedModifierIdList:
continue
else:
#根据id找到修改器
for _id in usedModifierIdList:
modifierList.append(self.findASTNode(self.json, "id", _id)[0])
#print(modifierList)
#3. 函数修改器也看一下
for funcAst in modifierList:
#srcList.extend(self.getRequireStatement(funcAst))
#srcList.extend(self.getAssertStatement(funcAst))
srcList.extend(self.getIfStatement(funcAst))
#最后再增补一下非require和assert的身份验证语句
#造成误判,不使用该语句
#去重
srcList = self.removeDuplicate(srcList)
#存储信息
#无论有没有都写入
#if srcList:
self.storeInjectInfo(srcList)
def storeInjectInfo(self, _srcList):
try:
resultDict = dict()
resultDict["srcList"] = _srcList
#保存信息
with open(os.path.join(INJECT_INFO_PATH, self.filename.split(".")[0] + ".json"), "w", encoding = "utf-8") as f:
json.dump(resultDict, f, indent = 1)
#print("%s %s %s" % (info, self.filename + " target injected information...saved", end))
except:
#print("%s %s %s" % (bad, self.filename + " target injected information...failed", end))
pass
#raise Exception()
def removeDuplicate(self, _list):
result = list()
for item in _list:
if item not in result:
result.append(item)
else:
continue
return result
def run(self):
#第一步,应该是生成合约所有函数的CFG
self.getAllFuncCFG()
#第二步,产生函数间的调用图(可能跨合约)
self.getAllFuncCallGraph()
#第三步,根据合约的CFG和函数调用图,尝试组合出所有路径
#3.1 构造函数调用关系图
self.getCallGraphDot()
#3.2 寻找以payable函数为起点的函数调用路径,寻找其中增值的mapping变量
increaseLedger = self.findLedger(self.funcCallGraph)
#3.3 寻找路径 ,其中存在对增值mapping变量减值的操作,并且有.transfer/.send/.call.value语句
#最好能够保存减值操作和传输语句的相对位置(或许能够以调用链中的偏移量来记录),结果记录在出钱语句中
statementInfo = self.outOfEther(self.funcCallGraph, increaseLedger)
for _statement in statementInfo:
if len(_statement[1].ledgerId) > 1:
#当超过一个账本时,无法判定哪个才是账本,判定为不符合抽取条件
return False
#清除生成的缓存资料
self.deleteDot()
if len(statementInfo) == 0:
#print("%s %s %s" % (info, "Doesn't meet the extraction criteria.", end))
return False
else:
#如果符合抽取标准,则保存路径信息
self.storePathInfo(statementInfo)
#print(statementInfo)
#记录路径中所有可能终止执行的语句
self.shieldTerminate(statementInfo)
#print("%s %s %s" % (info, "Meet the extraction criteria.", end))
return True
'''
不可用,slither的contract-summary并不准确
#1. 使用Slither生成contract-summary, slither将生成每个子类合约的合约总结
compileResult = subprocess.run("slither " + _contractPath + " --print human-summary", check = True, shell = True)
#2. 读取log.txt,判断主合约是否具有收取以太币、发送以太币的功能,有的话返回True
return self.findTargetFeatures(self.contractName)
'''
#待实现
#已经实现
def outOfEther(self, _callGraph, _ledger):
ledgerId = [int(name.split(".")[1]) for name in _ledger] #获取账本的id
newCallGraph = self.contractNameToNum(_callGraph)
decreaseLedger = list()
pathList = list()
for path in newCallGraph:
#检查每条路径
(ledger, idList, ledgerIndex) = self.findOnePathDecreseLedger(path, ledgerId)
(outEtherState, etherIndex) = self.findEtherOutStatement(path)
if ledgerIndex != -1 and etherIndex != -1:
#该路径下同时存在账本扣减操作和语句转出操作
item = outEtherInfo()
item.ledgerList = ledger
item.ledgerId = idList
item.ledgerIndex = ledgerIndex #账本下标
item.statementList = outEtherState #语句位置列表
item.statementIndex = etherIndex
pathList.append([path, item])
newResult = list()
for i in pathList:
if i not in newResult:
newResult.append(i)
return newResult
def getContractAst(self, _name):
contractList = self.findASTNode(self.json, "name", "ContractDefinition")
for contract in contractList:
if contract["attributes"]["name"] == _name:
return contract
else:
continue
return contractList[0]
def findEtherOutStatement(self, _path):
'''
问题是:当路径中存在多条以太币转出语句时,记录哪一条的位置呢
第一条,因为这一条执行需要的状态改变是最少的,因此危险性最小
'''
statementList = list()
index = -1
contractList = self.findASTNode(self.json, "name", "ContractDefinition")
for func in _path:
#拆分出函数名和合约名
funcName = func.split(".")[1]
contractName = func.split(".")[0]
for contract in contractList:
if contract["attributes"]["name"] == contractName:
functionList = self.findASTNode(contract, "name", "FunctionDefinition")
for oneFunc in functionList:
temp = statementList[:]
if oneFunc["attributes"]["kind"] == CONSTRUCTOR_FLAG and funcName == CONSTRUCTOR_FLAG:
accessStatement = self.findASTNode(oneFunc, "name", "MemberAccess")
statementList.extend(self.getStatement_transfer(accessStatement))
statementList.extend(self.getStatement_send(accessStatement))
statementList.extend(self.getStatement_callValue(accessStatement))
elif oneFunc["attributes"]["kind"] == FALLBACK_FLAG and funcName == FALLBACK_FLAG:
accessStatement = self.findASTNode(oneFunc, "name", "MemberAccess")
statementList.extend(self.getStatement_transfer(accessStatement))
statementList.extend(self.getStatement_send(accessStatement))
statementList.extend(self.getStatement_callValue(accessStatement))
elif oneFunc["attributes"]["name"] == funcName:
accessStatement = self.findASTNode(oneFunc, "name", "MemberAccess")
statementList.extend(self.getStatement_transfer(accessStatement))
statementList.extend(self.getStatement_send(accessStatement))
statementList.extend(self.getStatement_callValue(accessStatement))
if len(statementList) > len(temp) and index == -1:
index = _path.index(func)
return statementList, index
'''
最终决定不用type作为判断依据,因为不同版本的Solidity这几个函数的type是不同的(会导致我们的可用性范围收窄)
'''
def getStatement_transfer(self, _astList):
result = list()
for _ast in _astList:
try:
if _ast["attributes"]["member_name"] == TRANSFER_FLAG and _ast["attributes"]["referencedDeclaration"] == None:
if _ast["children"][0]["attributes"]["type"] == ADDRESS_PAYABLE_FLAG:
#找到在memberAccess语句中找到使用.transfer语句
startPos, endPos = self.srcToPos(_ast["src"])
result.append([startPos, endPos])
else:
continue
else:
continue
except:
continue
return result
def getStatement_send(self, _astList):
result = list()
for _ast in _astList:
try:
if _ast["attributes"]["member_name"] == SEND_FLAG and _ast["attributes"]["referencedDeclaration"] == None:
if _ast["children"][0]["attributes"]["type"] == ADDRESS_PAYABLE_FLAG:
#找到在memberAccess语句中找到使用.send语句
startPos, endPos = self.srcToPos(_ast["src"])
result.append([startPos, endPos])
else:
continue
else:
continue
except:
continue
return result
def getStatement_callValue(self, _astList):
result = list()
for _ast in _astList:
try:
if _ast["attributes"]["member_name"] == VALUE_FLAG and _ast["attributes"]["referencedDeclaration"] == None:
member = _ast["children"][0]
if member["attributes"]["member_name"] == CALL_FLAG and member["attributes"]["referencedDeclaration"] == None:
addressMember = member["children"][0]
if addressMember["attributes"]["type"] == ADDRESS_PAYABLE_FLAG:
#找到在memberAccess语句中找到使用.call.value语句
startPos, endPos = self.srcToPos(_ast["src"])
result.append([startPos, endPos])
else:
continue
else:
continue
else:
continue
except:
continue
return result
def findOnePathDecreseLedger(self, _path, _ledgerID):
'''
问题是:如果一条路径中有多个扣款操作,记录哪个的?
应该最后一个的,根据转出语句距离最近
'''
result = list()
idList = list()
contractList = self.findASTNode(self.json, "name", "ContractDefinition")
index = -1
for func in _path:
#拆分出函数名和合约名
funcName = func.split(".")[1]
contractName = func.split(".")[0]
#找到合约的AST
for contract in contractList:
if contract["attributes"]["name"] == contractName:
functionList = self.findASTNode(contract, "name", "FunctionDefinition")
for oneFunc in functionList:
temp = result[:]
if oneFunc["attributes"]["kind"] == CONSTRUCTOR_FLAG and funcName == CONSTRUCTOR_FLAG:
#找到函数的ast
statementList = self.findASTNode(oneFunc, "name", "Assignment")
result.extend(self.getMapping_subEqu(statementList, _ledgerID)[0])
idList.extend(self.getMapping_subEqu(statementList, _ledgerID)[1])
result.extend(self.getMapping_sub(statementList, _ledgerID)[0])
idList.extend(self.getMapping_sub(statementList, _ledgerID)[1])
result.extend(self.getMapping_SafeMathSub(statementList, _ledgerID)[0])
idList.extend(self.getMapping_SafeMathSub(statementList, _ledgerID)[1])
elif oneFunc["attributes"]["kind"] == FALLBACK_FLAG and funcName == FALLBACK_FLAG:
statementList = self.findASTNode(oneFunc, "name", "Assignment")
result.extend(self.getMapping_subEqu(statementList, _ledgerID)[0])
idList.extend(self.getMapping_subEqu(statementList, _ledgerID)[1])
result.extend(self.getMapping_sub(statementList, _ledgerID)[0])
idList.extend(self.getMapping_sub(statementList, _ledgerID)[1])
result.extend(self.getMapping_SafeMathSub(statementList, _ledgerID)[0])
idList.extend(self.getMapping_SafeMathSub(statementList, _ledgerID)[1])
elif oneFunc["attributes"]["name"] == funcName:
statementList = self.findASTNode(oneFunc, "name", "Assignment")
result.extend(self.getMapping_subEqu(statementList, _ledgerID)[0])
idList.extend(self.getMapping_subEqu(statementList, _ledgerID)[1])
result.extend(self.getMapping_sub(statementList, _ledgerID)[0])
idList.extend(self.getMapping_sub(statementList, _ledgerID)[1])
result.extend(self.getMapping_SafeMathSub(statementList, _ledgerID)[0])
idList.extend(self.getMapping_SafeMathSub(statementList, _ledgerID)[1])
if len(result) > len(temp):
index = _path.index(func)
#最后记得去重
result = list(set(result))
idList = list(set(idList))
return result, idList, index
'''
if len(result) == 0:
return result, -1
else:
return result, index
'''
def getMapping_subEqu(self, _astList, _ledgerID):
result = list()
idList = list()
for _ast in _astList:
if _ast["attributes"]["type"] == UINT256_FLAG and _ast["attributes"]["operator"] == SUB_EQU_FLAG:
if _ast["children"][0]["attributes"]["type"] == UINT256_FLAG:
#print("hahahah")
#寻找id
for _id in _ledgerID:
#_id = ledger.split(".")[1]
if str(_id) == str(_ast["children"][0]["children"][0]["attributes"]["referencedDeclaration"]):
#在payable起始的函数的调用序列的该赋值语句中,有对mapping(address=>uint256)的+=操作
idList.append(str(_id))
result.append(self.srcToPos(_ast["src"]))
else:
continue
else:
continue
else:
continue
return result, idList
def getMapping_sub(self, _astList, _ledgerID):
result = list()
idList = list()
for _ast in _astList:
try:
if _ast["attributes"]["type"] == UINT256_FLAG and _ast["attributes"]["operator"] == EQU_FLAG:
#print(_ast["attributes"])
num = _ast["children"][0]
operator = _ast["children"][1]
if num["attributes"]["type"] == UINT256_FLAG and operator["attributes"]["operator"] == SUB_FLAG:
for _id in _ledgerID:
#_id = ledger.split(".")[1]
if str(_id) == str(num["children"][0]["attributes"]["referencedDeclaration"]):
#在payable起始的函数的调用序列的该赋值语句中,有对mapping(address=>uint256)的+=操作
idList.append(str(_id))
result.append(self.srcToPos(_ast["src"]))
except:
continue
return result, idList
def getMapping_SafeMathSub(self, _astList, _ledgerID):
safeMathAst = dict()
for ast in self.findASTNode(self.json, "name", "ContractDefinition"):
if ast["attributes"]["name"].upper() == SAFEMATH_FLAG and ast["attributes"]["contractKind"] == LIBRARY_FLAG:
safeMathAst = ast
#找到safeMath的AST
break
else:
continue
subId = int()
if len(safeMathAst.keys()) == 0:
return list(), list()
#用id来指明函数调用
for func in self.findASTNode(safeMathAst, "name", "FunctionDefinition"):
if func["attributes"]["name"].lower() == SUB_STR_FLAG:
subId = func["id"]
break
else:
continue
#下一步,来找调用
result = list()
idList = list()
#赋值语句的ast
for _ast in _astList:
try:
if _ast["attributes"]["type"] == UINT256_FLAG and _ast["attributes"]["operator"] == EQU_FLAG:
#print(_ast["attributes"])
num = _ast["children"][0]
operator = _ast["children"][1]
if num["attributes"]["type"] == UINT256_FLAG and operator["attributes"]["type"] == UINT256_FLAG:
mapping = num["children"][0]
safeMathAdd = operator["children"][0]
if safeMathAdd["attributes"]["member_name"].lower() == SUB_STR_FLAG and safeMathAdd["attributes"]["referencedDeclaration"] == subId:
#确定了,这一句使用safeMath库里sub函数,考察接收结果的是否是我们要的结构
for _id in _ledgerID:
#_id = ledger.split(".")[1]
if str(_id) == str(mapping["attributes"]["referencedDeclaration"]):
#在payable起始的函数的调用序列的该赋值语句中,有对mapping(address=>uint256)的SafeMath.sub操作
idList.append(str(_id))
result.append(self.srcToPos(_ast["src"]))
except:
continue
return result, idList
#待实现
#清空本地缓存
def deleteDot(self):
for file in os.listdir():
if file.endswith(DOT_SUFFIX):
os.remove(file)
#print("%s %s %s" % (info, "Clear intermediate files.", end))
def getAllFuncCFG(self):
#打印的输出地点在本地
try:
subprocess.run("slither " + self.contractPath + " --print cfg", check = True, shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
except:
#print("Failed to generate control flow graph.")
pass
def getAllFuncCallGraph(self):
#打印的输出地点在本地
try:
subprocess.run("slither " + self.contractPath + " --print call-graph", check = True, shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
except:
#print("Failed to generate functions call-graph.")
pass
def getCallGraphDot(self):
dotFileName = CACHE_PATH + DOT_PREFIX + self.targetContractName + DOT_SUFFIX
try:
f = io.open(dotFileName)
edgeList = list()
#逐行遍历dot文件,找到所有有向边
for line in f.readlines():
if line.find(EDGE_FLAG) != -1:
#找到有向边,分裂起点和终点
edgeInfo = list()
edgeInfo.append(line.split(EDGE_FLAG)[0])
edgeInfo.append(line.split(EDGE_FLAG)[1][:-1]) #去掉结尾换行符
#加入边集
edgeList.append(edgeInfo)
#根据边集,拼接路径
#我的起点是你的终点
temp = edgeList[:] #为防止出现问题,准备一个副本
for edge in edgeList:
result = edge[:]
#两个工作,我的终点是你的起点吗,我的起点是你的终点吗
startPos = edge[0]
endPos = edge[1]
for line in temp:
if line[1] == startPos:
#它的终点是我的起点,加入
result.insert(0, line[0])
#更新起点
startPos = line[0]
if line[0] == endPos:
#它的起点是我的终点,加入
result.append(line[1])
#更新终点
endPos = line[1]
#跨合约的函数调用拼接完毕
self.funcCallGraph.append(result)
#接下来拼接“独立”函数
f.seek(0,0) #回到文件开头
startFuncList = [funcName[0]for funcName in self.funcCallGraph]
for line in f.readlines():
if line.find(LABEL_FLAG) != -1:
funcName = line.split(" ")[0]
if funcName not in startFuncList:
self.funcCallGraph.append([funcName])
else:
continue
except:
#print("Failed to read functions call-graph.")
pass
#待实现
#已经实现
def findLedger(self, _callGraph):
#find each payable function and its contract
#dict
payableList = self.getPayableFunc(self.json)
#contractName to num
newCallGraph = self.contractNameToNum(_callGraph)
#mapping
mappingList = self.getMapping(self.json)
#给定调用图、payable函数列表、mapping,寻找在以payable函数开头的路劲中,其中使用过(加过钱)的mappingAList
increaseMapping = self.findIncreaseMapping(payableList, newCallGraph, mappingList)
return increaseMapping
def findIncreaseMapping(self, _payableList, _funcPath, _mappingList):
result = list()
for payableFunc in _payableList:
for onePath in _funcPath:
if onePath[0] == payableFunc:
#找到一条路径
if len(self.findOnePathMapping(onePath, _mappingList)):
self.receiveEthPath.append(onePath) #找到一条收钱路径
result.extend(self.findOnePathMapping(onePath, _mappingList))
else:
continue
result = list(set(result))
return result
def findOnePathMapping(self, _path, _mappingList):
result = list()
contractList = self.findASTNode(self.json, "name", "ContractDefinition")
for func in _path:
#拆分出函数名和合约名
funcName = func.split(".")[1]
contractName = func.split(".")[0]
#找到合约的AST
for contract in contractList:
if contract["attributes"]["name"] == contractName:
functionList = self.findASTNode(contract, "name", "FunctionDefinition")
for oneFunc in functionList:
if oneFunc["attributes"]["kind"] == CONSTRUCTOR_FLAG and funcName == CONSTRUCTOR_FLAG:
#找到函数的ast
statementList = self.findASTNode(oneFunc, "name", "Assignment")
result.extend(self.getMapping_addEqu(statementList, _mappingList))
result.extend(self.getMapping_add(statementList, _mappingList))
result.extend(self.getMapping_SafeMathAdd(statementList, _mappingList))
elif oneFunc["attributes"]["kind"] == FALLBACK_FLAG and funcName == FALLBACK_FLAG:
statementList = self.findASTNode(oneFunc, "name", "Assignment")
result.extend(self.getMapping_addEqu(statementList, _mappingList))
result.extend(self.getMapping_add(statementList, _mappingList))
result.extend(self.getMapping_SafeMathAdd(statementList, _mappingList))
elif oneFunc["attributes"]["name"] == funcName:
statementList = self.findASTNode(oneFunc, "name", "Assignment")
result.extend(self.getMapping_addEqu(statementList, _mappingList))
result.extend(self.getMapping_add(statementList, _mappingList))
result.extend(self.getMapping_SafeMathAdd(statementList, _mappingList))
#最后记得去重
result = list(set(result))
return result
#如果该赋值语句中存在对mapping的+=操作,则返回mappingList
def getMapping_addEqu(self, _astList, _mappingList):
result = list()
for _ast in _astList:
if _ast["attributes"]["type"] == UINT256_FLAG and _ast["attributes"]["operator"] == ADD_EQU_FLAG:
if _ast["children"][0]["attributes"]["type"] == UINT256_FLAG:
#print("hahahah")
#寻找id
for ledger in _mappingList:
_id = ledger.split(".")[1]
if str(_id) == str(_ast["children"][0]["children"][0]["attributes"]["referencedDeclaration"]):
#在payable起始的函数的调用序列的该赋值语句中,有对mapping(address=>uint256)的+=操作
result.append(ledger)
else:
continue
else:
continue
else:
continue
return result
#如果该赋值语句中存在对mapping的+操作,则返回mappingList
def getMapping_add(self, _astList, _mappingList):
result = list()
for _ast in _astList:
try:
if _ast["attributes"]["type"] == UINT256_FLAG and _ast["attributes"]["operator"] == EQU_FLAG:
#print(_ast["attributes"])
num = _ast["children"][0]
operator = _ast["children"][1]
if num["attributes"]["type"] == UINT256_FLAG and operator["attributes"]["operator"] == ADD_FLAG:
for ledger in _mappingList:
_id = ledger.split(".")[1]
if str(_id) == str(num["children"][0]["attributes"]["referencedDeclaration"]):
#在payable起始的函数的调用序列的该赋值语句中,有对mapping(address=>uint256)的+=操作
result.append(ledger)
except:
continue
return result
#待实现
#已经实现
def getMapping_SafeMathAdd(self, _astList, _mappingList):
safeMathAst = dict()
for ast in self.findASTNode(self.json, "name", "ContractDefinition"):
if ast["attributes"]["name"].upper() == SAFEMATH_FLAG and ast["attributes"]["contractKind"] == LIBRARY_FLAG:
safeMathAst = ast
#找到safeMath的AST
break
else:
continue
addId = int()
if len(safeMathAst.keys()) == 0:
return list()
#用id来指明函数调用
for func in self.findASTNode(safeMathAst, "name", "FunctionDefinition"):
if func["attributes"]["name"].lower() == ADD_STR_FLAG:
addId = func["id"]
break
else:
continue
#下一步,来找调用
result = list()
#赋值语句的ast
for _ast in _astList:
try:
if _ast["attributes"]["type"] == UINT256_FLAG and _ast["attributes"]["operator"] == EQU_FLAG:
#print(_ast["attributes"])
num = _ast["children"][0]
operator = _ast["children"][1]
if num["attributes"]["type"] == UINT256_FLAG and operator["attributes"]["type"] == UINT256_FLAG:
mapping = num["children"][0]
safeMathAdd = operator["children"][0]
if safeMathAdd["attributes"]["member_name"].lower() == ADD_STR_FLAG and safeMathAdd["attributes"]["referencedDeclaration"] == addId:
#确定了,这一句使用safeMath库里add函数,考察接收结果的是否是我们要的结构
for ledger in _mappingList:
_id = ledger.split(".")[1]
if str(_id) == str(mapping["attributes"]["referencedDeclaration"]):
#在payable起始的函数的调用序列的该赋值语句中,有对mapping(address=>uint256)的SafeMath.add操作
result.append(ledger)
except:
continue
return result
def contractNameToNum(self,_callGraph):
dotFileName = CACHE_PATH + DOT_PREFIX + self.targetContractName + DOT_SUFFIX
#try:
result = list()
f = io.open(dotFileName)
contractNameDict = dict()
for line in f.readlines():
if line.find(CLUSTER_FLAG) != -1:
#找到集群声明标志,拆分出编号和合约名
try:
temp = line.split(" ")[1]
#下述方法不能应对合约名以下划线开头的情况,使用土办法
num, contractName = self.splitTemp(temp)
contractNameDict[contractName] = num
except:
continue
else:
continue
for _list in _callGraph:
aList = list()
for func in _list:
try:
num, funcName = self.splitTempName(func)
for item in contractNameDict.items():
if item[1] == num:
temp = item[0] + "." + funcName
aList.append(temp)
else:
continue
except:
continue
result.append(aList)
#print(contractNameDict)
#print(result)
return result
def splitTemp(self, _str):
result = list()
flag = 0
temp = str()
for char in _str:
if char != "_":
temp += char
elif char == "_" and flag < 1:
temp = str()
flag += 1
elif char == "_" and flag == 1:
result.append(temp)
temp = str()
flag += 1
elif flag >= 2:
temp += char
result.append(temp)
return result[0], result[1]
def splitTempName(self, _str):
result = list()
flag = False
temp = str()
for char in _str:
if char == "_" and flag == False:
flag = True
result.append(temp)
temp = ""
else:
temp += char
result.append(temp)
return result[0][1:], result[1][:-1] #去掉头尾的双引号
def getMapping(self, _json):
#variable声明
mappingDict = dict()
for ast in self.findASTNode(_json, "name", "VariableDeclaration"):
#print(ast)
if ast["attributes"]["type"] == MAPPING_FLAG:
mappingName = ast["id"]
startPos, endPos = self.srcToPos(ast["src"])
mappingDict[mappingName] = [startPos, endPos]
contractDict = dict()
#dict: {合约名,[起始位置,终止位置]}
for ast in self.findASTNode(self.json, "name", "ContractDefinition"):
contractName = ast["attributes"]["name"]
startPos, endPos = self.srcToPos(ast["src"])
contractDict[contractName] = [startPos, endPos]
#根据从属关系,拼接返回结果
result = list()
for mappingName in mappingDict:
startPos, endPos = mappingDict[mappingName]
for item in contractDict.items():
if startPos >= item[1][0] and endPos <= item[1][1]:
#找到合约和函数的对应关系
temp = item[0] + "." + str(mappingName)
result.append(temp)
break
else:
continue
return result
def getPayableFunc(self, _json):
contractDict = dict()
#dict: {合约名,[起始位置,终止位置]}
for ast in self.findASTNode(self.json, "name", "ContractDefinition"):
contractName = ast["attributes"]["name"]
startPos, endPos = self.srcToPos(ast["src"])
contractDict[contractName] = [startPos, endPos]
#payable func
funcList = list()
for ast in self.findASTNode(self.json, "name", "FunctionDefinition"):
if ast["attributes"]["stateMutability"] == PAYABLE_FLAG:
if ast["attributes"]["kind"] == CONSTRUCTOR_FLAG:
functionName = CONSTRUCTOR_FLAG
elif ast["attributes"]["kind"] == FALLBACK_FLAG:
functionName = FALLBACK_FLAG
else:
functionName = ast["attributes"]["name"]
startPos, endPos = self.srcToPos(ast["src"])
#bug修复,不同合约可能有重名函数
funcList.append([functionName, startPos, endPos])
#根据从属关系,拼接返回结果
result = list()
for func in funcList:
startPos = func[1]
endPos = func[2]
for item in contractDict.items():
if startPos >= item[1][0] and endPos <= item[1][1]:
#找到合约和函数的对应关系
temp = item[0] + "." + func[0]
result.append(temp)
break
else:
continue
return result #返回payable函数
#在给定的ast中返回包含键值对"_name": "_value"的字典列表
def findASTNode(self, _ast, _name, _value):
queue = [_ast]
result = list()
literalList = list()
while len(queue) > 0:
data = queue.pop()
for key in data:
if key == _name and data[key] == _value:
result.append(data)
elif type(data[key]) == dict:
queue.append(data[key])
elif type(data[key]) == list:
for item in data[key]:
if type(item) == dict:
queue.append(item)
return result
#传入:657:17:0
#传出:657, 674
def srcToPos(self, _src):
temp = _src.split(":")
return int(temp[0]), int(temp[0]) + int(temp[1])
| 32.115035 | 153 | 0.68532 |
import subprocess
import os
from inherGraph import inherGraph from colorPrint import * from pydot import io import re
import json
CACHE_PATH = "./cache/"
TERMINAL_FILE = "log.txt"
INJECT_INFO_PATH = "./result/"
TUPLE_FLAG = "tuple()"
REQUIRE_FUNC_TYPE_FLAG = "function (bool) pure"
REQUIRE_FUNC_STRING_TYPE_FLAG = "function (bool,string memory) pure"
REQUIRE_FLAG = "require"
ASSERT_FLAG = "assert"
BOOL_TRUE_FLAG = 0
DOT_PREFIX = "temp.sol."
DOT_SUFFIX = ".call-graph.dot"
EDGE_FLAG = " -> "
PAYABLE_FLAG = "payable"
CONSTRUCTOR_FLAG = "constructor"
FALLBACK_FLAG = "fallback"
MAPPING_FLAG = "mapping(address => uint256)"
CLUSTER_FLAG = "cluster_"
LABEL_FLAG = "[label="
UINT256_FLAG = "uint256"
ADD_EQU_FLAG = "+="
EQU_FLAG = "="
ADD_FLAG = "+"
SUB_EQU_FLAG = "-="
SUB_FLAG = "-"
SAFEMATH_FLAG = "SAFEMATH"
LIBRARY_FLAG = "library"
ADD_STR_FLAG = "add"
SUB_STR_FLAG = "sub"
TRANSFER_FLAG = "transfer"
SEND_FLAG = "send"
ADDRESS_PAYABLE_FLAG = "address payable"
VALUE_FLAG = "value"
CALL_FLAG = "call"
PATH_INFO_PATH = "./pathInfo/"
SEND_ETH_FLAG = "Send ETH"
RECEIVE_ETH_FLAG = "Receive ETH"
class outEtherInfo:
def __init__(self):
self.ledgerList = list() self.ledgerId = list()
self.ledgerIndex = -1 self.statementList = list() self.statementIndex = -1
class judgePath:
def __init__(self, _contractPath, _json, _filename):
self.filename = _filename self.contractPath = _contractPath
self.inherGraph = inherGraph(_json)
self.targetContractName = self.getMainContract()
self.json = _json
self.receiveEthPath = list()
self.funcCallGraph = list()
self.sendEthPath = list()
if not os.path.exists(PATH_INFO_PATH):
os.mkdir(PATH_INFO_PATH)
def getMainContract(self):
return self.inherGraph.getMainContractName()
def storePathInfo(self, _statementInfo):
try:
infoDict = dict()
PATH = "pathInfo"
offset = 1
key = PATH + str(offset)
for _statement in _statementInfo:
tempDict = dict()
tempDict["path"] = _statement[0]
tempDict["ledgerList"] = _statement[1].ledgerList
tempDict["ledgerIndex"] = _statement[1].ledgerIndex
tempDict["statementList"] = _statement[1].statementList
tempDict["statementIndex"] = _statement[1].statementIndex
tempDict["ledgerIdList"] = _statement[1].ledgerId
infoDict[key] = tempDict
offset += 1
key = PATH + str(offset) with open(os.path.join(PATH_INFO_PATH, self.filename.split(".")[0] + ".json"), "w", encoding = "utf-8") as f:
json.dump(infoDict, f, indent = 1)
except:
pass
def getAssertStatement(self, _ast):
funcCall = self.findASTNode(_ast, "name", "FunctionCall")
srcList = list() for call in funcCall:
if call["attributes"]["type"] == TUPLE_FLAG:
children0 = call["children"][0] children1 = call["children"][1] if children0["attributes"]["type"] == REQUIRE_FUNC_TYPE_FLAG and \
children0["attributes"]["value"] == ASSERT_FLAG:
sPos, ePos = self.srcToPos(children1["src"])
srcList.append([sPos, ePos, BOOL_TRUE_FLAG])
else:
continue
else:
continue
return srcList
def getRequireStatement(self, _ast):
funcCall = self.findASTNode(_ast, "name", "FunctionCall")
srcList = list()
for call in funcCall:
if call["attributes"]["type"] == TUPLE_FLAG:
children0 = call["children"][0]
children1 = call["children"][1]
if (children0["attributes"]["type"] == REQUIRE_FUNC_TYPE_FLAG or \
children0["attributes"]["type"] == REQUIRE_FUNC_STRING_TYPE_FLAG) and \
children0["attributes"]["value"] == REQUIRE_FLAG:
sPos, ePos = self.srcToPos(children1["src"])
srcList.append([sPos, ePos, BOOL_TRUE_FLAG])
else:
continue
else:
continue
return srcList
def getIfStatement(self, _ast):
ifStatements = self.findASTNode(_ast, "name", "IfStatement")
srcList = list() for ifStatement in ifStatements:
if ifStatement["children"][0]["attributes"]["type"] == "bool" and ifStatement["children"][0]["name"] == "BinaryOperation":
sPos, ePos = self.srcToPos(ifStatement["children"][0]["src"])
srcList.append([sPos, ePos, EVER_TRUE_FLAG])
else:
continue
return srcList
def shieldTerminate(self, _statementInfo):
funcList = list()
contractAndFuncList = list()
for path in [i[0] for i in _statementInfo]:
for func in path:
contractAndFuncList.append(func)
for func in contractAndFuncList:
(contract, function) = tuple(func.split("."))
contractAst = self.getContractAst(contract)
for func in self.findASTNode(contractAst, "name", "FunctionDefinition"):
if func["attributes"]["name"] == function:
funcList.append(func)
else:
continue
srcList = list() for funcAst in funcList:
srcList.extend(self.getIfStatement(funcAst)) modifierList = list()
for func in funcList:
usedModifierIdList = [item["children"][0]["attributes"]["referencedDeclaration"] for item in self.findASTNode(func, "name", "ModifierInvocation")]
if not usedModifierIdList:
continue
else:
for _id in usedModifierIdList:
modifierList.append(self.findASTNode(self.json, "id", _id)[0])
for funcAst in modifierList:
srcList.extend(self.getIfStatement(funcAst))
srcList = self.removeDuplicate(srcList)
self.storeInjectInfo(srcList)
def storeInjectInfo(self, _srcList):
try:
resultDict = dict()
resultDict["srcList"] = _srcList
with open(os.path.join(INJECT_INFO_PATH, self.filename.split(".")[0] + ".json"), "w", encoding = "utf-8") as f:
json.dump(resultDict, f, indent = 1)
except:
pass
def removeDuplicate(self, _list):
result = list()
for item in _list:
if item not in result:
result.append(item)
else:
continue
return result
def run(self):
self.getAllFuncCFG()
self.getAllFuncCallGraph()
self.getCallGraphDot()
increaseLedger = self.findLedger(self.funcCallGraph)
statementInfo = self.outOfEther(self.funcCallGraph, increaseLedger)
for _statement in statementInfo:
if len(_statement[1].ledgerId) > 1:
return False
self.deleteDot()
if len(statementInfo) == 0:
return False
else:
#如果符合抽取标准,则保存路径信息
self.storePathInfo(statementInfo)
#print(statementInfo)
#记录路径中所有可能终止执行的语句
self.shieldTerminate(statementInfo)
#print("%s %s %s" % (info, "Meet the extraction criteria.", end))
return True
#待实现
#已经实现
def outOfEther(self, _callGraph, _ledger):
ledgerId = [int(name.split(".")[1]) for name in _ledger] #获取账本的id
newCallGraph = self.contractNameToNum(_callGraph)
decreaseLedger = list()
pathList = list()
for path in newCallGraph:
#检查每条路径
(ledger, idList, ledgerIndex) = self.findOnePathDecreseLedger(path, ledgerId)
(outEtherState, etherIndex) = self.findEtherOutStatement(path)
if ledgerIndex != -1 and etherIndex != -1:
#该路径下同时存在账本扣减操作和语句转出操作
item = outEtherInfo()
item.ledgerList = ledger
item.ledgerId = idList
item.ledgerIndex = ledgerIndex #账本下标
item.statementList = outEtherState #语句位置列表
item.statementIndex = etherIndex
pathList.append([path, item])
newResult = list()
for i in pathList:
if i not in newResult:
newResult.append(i)
return newResult
def getContractAst(self, _name):
contractList = self.findASTNode(self.json, "name", "ContractDefinition")
for contract in contractList:
if contract["attributes"]["name"] == _name:
return contract
else:
continue
return contractList[0]
def findEtherOutStatement(self, _path):
statementList = list()
index = -1
contractList = self.findASTNode(self.json, "name", "ContractDefinition")
for func in _path:
#拆分出函数名和合约名
funcName = func.split(".")[1]
contractName = func.split(".")[0]
for contract in contractList:
if contract["attributes"]["name"] == contractName:
functionList = self.findASTNode(contract, "name", "FunctionDefinition")
for oneFunc in functionList:
temp = statementList[:]
if oneFunc["attributes"]["kind"] == CONSTRUCTOR_FLAG and funcName == CONSTRUCTOR_FLAG:
accessStatement = self.findASTNode(oneFunc, "name", "MemberAccess")
statementList.extend(self.getStatement_transfer(accessStatement))
statementList.extend(self.getStatement_send(accessStatement))
statementList.extend(self.getStatement_callValue(accessStatement))
elif oneFunc["attributes"]["kind"] == FALLBACK_FLAG and funcName == FALLBACK_FLAG:
accessStatement = self.findASTNode(oneFunc, "name", "MemberAccess")
statementList.extend(self.getStatement_transfer(accessStatement))
statementList.extend(self.getStatement_send(accessStatement))
statementList.extend(self.getStatement_callValue(accessStatement))
elif oneFunc["attributes"]["name"] == funcName:
accessStatement = self.findASTNode(oneFunc, "name", "MemberAccess")
statementList.extend(self.getStatement_transfer(accessStatement))
statementList.extend(self.getStatement_send(accessStatement))
statementList.extend(self.getStatement_callValue(accessStatement))
if len(statementList) > len(temp) and index == -1:
index = _path.index(func)
return statementList, index
def getStatement_transfer(self, _astList):
result = list()
for _ast in _astList:
try:
if _ast["attributes"]["member_name"] == TRANSFER_FLAG and _ast["attributes"]["referencedDeclaration"] == None:
if _ast["children"][0]["attributes"]["type"] == ADDRESS_PAYABLE_FLAG:
#找到在memberAccess语句中找到使用.transfer语句
startPos, endPos = self.srcToPos(_ast["src"])
result.append([startPos, endPos])
else:
continue
else:
continue
except:
continue
return result
def getStatement_send(self, _astList):
result = list()
for _ast in _astList:
try:
if _ast["attributes"]["member_name"] == SEND_FLAG and _ast["attributes"]["referencedDeclaration"] == None:
if _ast["children"][0]["attributes"]["type"] == ADDRESS_PAYABLE_FLAG:
#找到在memberAccess语句中找到使用.send语句
startPos, endPos = self.srcToPos(_ast["src"])
result.append([startPos, endPos])
else:
continue
else:
continue
except:
continue
return result
def getStatement_callValue(self, _astList):
result = list()
for _ast in _astList:
try:
if _ast["attributes"]["member_name"] == VALUE_FLAG and _ast["attributes"]["referencedDeclaration"] == None:
member = _ast["children"][0]
if member["attributes"]["member_name"] == CALL_FLAG and member["attributes"]["referencedDeclaration"] == None:
addressMember = member["children"][0]
if addressMember["attributes"]["type"] == ADDRESS_PAYABLE_FLAG:
#找到在memberAccess语句中找到使用.call.value语句
startPos, endPos = self.srcToPos(_ast["src"])
result.append([startPos, endPos])
else:
continue
else:
continue
else:
continue
except:
continue
return result
def findOnePathDecreseLedger(self, _path, _ledgerID):
result = list()
idList = list()
contractList = self.findASTNode(self.json, "name", "ContractDefinition")
index = -1
for func in _path:
#拆分出函数名和合约名
funcName = func.split(".")[1]
contractName = func.split(".")[0]
#找到合约的AST
for contract in contractList:
if contract["attributes"]["name"] == contractName:
functionList = self.findASTNode(contract, "name", "FunctionDefinition")
for oneFunc in functionList:
temp = result[:]
if oneFunc["attributes"]["kind"] == CONSTRUCTOR_FLAG and funcName == CONSTRUCTOR_FLAG:
#找到函数的ast
statementList = self.findASTNode(oneFunc, "name", "Assignment")
result.extend(self.getMapping_subEqu(statementList, _ledgerID)[0])
idList.extend(self.getMapping_subEqu(statementList, _ledgerID)[1])
result.extend(self.getMapping_sub(statementList, _ledgerID)[0])
idList.extend(self.getMapping_sub(statementList, _ledgerID)[1])
result.extend(self.getMapping_SafeMathSub(statementList, _ledgerID)[0])
idList.extend(self.getMapping_SafeMathSub(statementList, _ledgerID)[1])
elif oneFunc["attributes"]["kind"] == FALLBACK_FLAG and funcName == FALLBACK_FLAG:
statementList = self.findASTNode(oneFunc, "name", "Assignment")
result.extend(self.getMapping_subEqu(statementList, _ledgerID)[0])
idList.extend(self.getMapping_subEqu(statementList, _ledgerID)[1])
result.extend(self.getMapping_sub(statementList, _ledgerID)[0])
idList.extend(self.getMapping_sub(statementList, _ledgerID)[1])
result.extend(self.getMapping_SafeMathSub(statementList, _ledgerID)[0])
idList.extend(self.getMapping_SafeMathSub(statementList, _ledgerID)[1])
elif oneFunc["attributes"]["name"] == funcName:
statementList = self.findASTNode(oneFunc, "name", "Assignment")
result.extend(self.getMapping_subEqu(statementList, _ledgerID)[0])
idList.extend(self.getMapping_subEqu(statementList, _ledgerID)[1])
result.extend(self.getMapping_sub(statementList, _ledgerID)[0])
idList.extend(self.getMapping_sub(statementList, _ledgerID)[1])
result.extend(self.getMapping_SafeMathSub(statementList, _ledgerID)[0])
idList.extend(self.getMapping_SafeMathSub(statementList, _ledgerID)[1])
if len(result) > len(temp):
index = _path.index(func)
#最后记得去重
result = list(set(result))
idList = list(set(idList))
return result, idList, index
def getMapping_subEqu(self, _astList, _ledgerID):
result = list()
idList = list()
for _ast in _astList:
if _ast["attributes"]["type"] == UINT256_FLAG and _ast["attributes"]["operator"] == SUB_EQU_FLAG:
if _ast["children"][0]["attributes"]["type"] == UINT256_FLAG:
#print("hahahah")
#寻找id
for _id in _ledgerID:
#_id = ledger.split(".")[1]
if str(_id) == str(_ast["children"][0]["children"][0]["attributes"]["referencedDeclaration"]):
#在payable起始的函数的调用序列的该赋值语句中,有对mapping(address=>uint256)的+=操作
idList.append(str(_id))
result.append(self.srcToPos(_ast["src"]))
else:
continue
else:
continue
else:
continue
return result, idList
def getMapping_sub(self, _astList, _ledgerID):
result = list()
idList = list()
for _ast in _astList:
try:
if _ast["attributes"]["type"] == UINT256_FLAG and _ast["attributes"]["operator"] == EQU_FLAG:
#print(_ast["attributes"])
num = _ast["children"][0]
operator = _ast["children"][1]
if num["attributes"]["type"] == UINT256_FLAG and operator["attributes"]["operator"] == SUB_FLAG:
for _id in _ledgerID:
#_id = ledger.split(".")[1]
if str(_id) == str(num["children"][0]["attributes"]["referencedDeclaration"]):
#在payable起始的函数的调用序列的该赋值语句中,有对mapping(address=>uint256)的+=操作
idList.append(str(_id))
result.append(self.srcToPos(_ast["src"]))
except:
continue
return result, idList
def getMapping_SafeMathSub(self, _astList, _ledgerID):
safeMathAst = dict()
for ast in self.findASTNode(self.json, "name", "ContractDefinition"):
if ast["attributes"]["name"].upper() == SAFEMATH_FLAG and ast["attributes"]["contractKind"] == LIBRARY_FLAG:
safeMathAst = ast
#找到safeMath的AST
break
else:
continue
subId = int()
if len(safeMathAst.keys()) == 0:
return list(), list()
#用id来指明函数调用
for func in self.findASTNode(safeMathAst, "name", "FunctionDefinition"):
if func["attributes"]["name"].lower() == SUB_STR_FLAG:
subId = func["id"]
break
else:
continue
#下一步,来找调用
result = list()
idList = list()
#赋值语句的ast
for _ast in _astList:
try:
if _ast["attributes"]["type"] == UINT256_FLAG and _ast["attributes"]["operator"] == EQU_FLAG:
#print(_ast["attributes"])
num = _ast["children"][0]
operator = _ast["children"][1]
if num["attributes"]["type"] == UINT256_FLAG and operator["attributes"]["type"] == UINT256_FLAG:
mapping = num["children"][0]
safeMathAdd = operator["children"][0]
if safeMathAdd["attributes"]["member_name"].lower() == SUB_STR_FLAG and safeMathAdd["attributes"]["referencedDeclaration"] == subId:
#确定了,这一句使用safeMath库里sub函数,考察接收结果的是否是我们要的结构
for _id in _ledgerID:
#_id = ledger.split(".")[1]
if str(_id) == str(mapping["attributes"]["referencedDeclaration"]):
#在payable起始的函数的调用序列的该赋值语句中,有对mapping(address=>uint256)的SafeMath.sub操作
idList.append(str(_id))
result.append(self.srcToPos(_ast["src"]))
except:
continue
return result, idList
#待实现
#清空本地缓存
def deleteDot(self):
for file in os.listdir():
if file.endswith(DOT_SUFFIX):
os.remove(file)
#print("%s %s %s" % (info, "Clear intermediate files.", end))
def getAllFuncCFG(self):
#打印的输出地点在本地
try:
subprocess.run("slither " + self.contractPath + " --print cfg", check = True, shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
except:
#print("Failed to generate control flow graph.")
pass
def getAllFuncCallGraph(self):
#打印的输出地点在本地
try:
subprocess.run("slither " + self.contractPath + " --print call-graph", check = True, shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
except:
#print("Failed to generate functions call-graph.")
pass
def getCallGraphDot(self):
dotFileName = CACHE_PATH + DOT_PREFIX + self.targetContractName + DOT_SUFFIX
try:
f = io.open(dotFileName)
edgeList = list()
#逐行遍历dot文件,找到所有有向边
for line in f.readlines():
if line.find(EDGE_FLAG) != -1:
#找到有向边,分裂起点和终点
edgeInfo = list()
edgeInfo.append(line.split(EDGE_FLAG)[0])
edgeInfo.append(line.split(EDGE_FLAG)[1][:-1]) #去掉结尾换行符
#加入边集
edgeList.append(edgeInfo)
#根据边集,拼接路径
#我的起点是你的终点
temp = edgeList[:] #为防止出现问题,准备一个副本
for edge in edgeList:
result = edge[:]
#两个工作,我的终点是你的起点吗,我的起点是你的终点吗
startPos = edge[0]
endPos = edge[1]
for line in temp:
if line[1] == startPos:
#它的终点是我的起点,加入
result.insert(0, line[0])
#更新起点
startPos = line[0]
if line[0] == endPos:
#它的起点是我的终点,加入
result.append(line[1])
#更新终点
endPos = line[1]
#跨合约的函数调用拼接完毕
self.funcCallGraph.append(result)
#接下来拼接“独立”函数
f.seek(0,0) #回到文件开头
startFuncList = [funcName[0]for funcName in self.funcCallGraph]
for line in f.readlines():
if line.find(LABEL_FLAG) != -1:
funcName = line.split(" ")[0]
if funcName not in startFuncList:
self.funcCallGraph.append([funcName])
else:
continue
except:
#print("Failed to read functions call-graph.")
pass
#待实现
#已经实现
def findLedger(self, _callGraph):
#find each payable function and its contract
#dict
payableList = self.getPayableFunc(self.json)
#contractName to num
newCallGraph = self.contractNameToNum(_callGraph)
#mapping
mappingList = self.getMapping(self.json)
#给定调用图、payable函数列表、mapping,寻找在以payable函数开头的路劲中,其中使用过(加过钱)的mappingAList
increaseMapping = self.findIncreaseMapping(payableList, newCallGraph, mappingList)
return increaseMapping
def findIncreaseMapping(self, _payableList, _funcPath, _mappingList):
result = list()
for payableFunc in _payableList:
for onePath in _funcPath:
if onePath[0] == payableFunc:
#找到一条路径
if len(self.findOnePathMapping(onePath, _mappingList)):
self.receiveEthPath.append(onePath) #找到一条收钱路径
result.extend(self.findOnePathMapping(onePath, _mappingList))
else:
continue
result = list(set(result))
return result
def findOnePathMapping(self, _path, _mappingList):
result = list()
contractList = self.findASTNode(self.json, "name", "ContractDefinition")
for func in _path:
#拆分出函数名和合约名
funcName = func.split(".")[1]
contractName = func.split(".")[0]
#找到合约的AST
for contract in contractList:
if contract["attributes"]["name"] == contractName:
functionList = self.findASTNode(contract, "name", "FunctionDefinition")
for oneFunc in functionList:
if oneFunc["attributes"]["kind"] == CONSTRUCTOR_FLAG and funcName == CONSTRUCTOR_FLAG:
#找到函数的ast
statementList = self.findASTNode(oneFunc, "name", "Assignment")
result.extend(self.getMapping_addEqu(statementList, _mappingList))
result.extend(self.getMapping_add(statementList, _mappingList))
result.extend(self.getMapping_SafeMathAdd(statementList, _mappingList))
elif oneFunc["attributes"]["kind"] == FALLBACK_FLAG and funcName == FALLBACK_FLAG:
statementList = self.findASTNode(oneFunc, "name", "Assignment")
result.extend(self.getMapping_addEqu(statementList, _mappingList))
result.extend(self.getMapping_add(statementList, _mappingList))
result.extend(self.getMapping_SafeMathAdd(statementList, _mappingList))
elif oneFunc["attributes"]["name"] == funcName:
statementList = self.findASTNode(oneFunc, "name", "Assignment")
result.extend(self.getMapping_addEqu(statementList, _mappingList))
result.extend(self.getMapping_add(statementList, _mappingList))
result.extend(self.getMapping_SafeMathAdd(statementList, _mappingList))
#最后记得去重
result = list(set(result))
return result
#如果该赋值语句中存在对mapping的+=操作,则返回mappingList
def getMapping_addEqu(self, _astList, _mappingList):
result = list()
for _ast in _astList:
if _ast["attributes"]["type"] == UINT256_FLAG and _ast["attributes"]["operator"] == ADD_EQU_FLAG:
if _ast["children"][0]["attributes"]["type"] == UINT256_FLAG:
#print("hahahah")
#寻找id
for ledger in _mappingList:
_id = ledger.split(".")[1]
if str(_id) == str(_ast["children"][0]["children"][0]["attributes"]["referencedDeclaration"]):
#在payable起始的函数的调用序列的该赋值语句中,有对mapping(address=>uint256)的+=操作
result.append(ledger)
else:
continue
else:
continue
else:
continue
return result
#如果该赋值语句中存在对mapping的+操作,则返回mappingList
def getMapping_add(self, _astList, _mappingList):
result = list()
for _ast in _astList:
try:
if _ast["attributes"]["type"] == UINT256_FLAG and _ast["attributes"]["operator"] == EQU_FLAG:
#print(_ast["attributes"])
num = _ast["children"][0]
operator = _ast["children"][1]
if num["attributes"]["type"] == UINT256_FLAG and operator["attributes"]["operator"] == ADD_FLAG:
for ledger in _mappingList:
_id = ledger.split(".")[1]
if str(_id) == str(num["children"][0]["attributes"]["referencedDeclaration"]):
#在payable起始的函数的调用序列的该赋值语句中,有对mapping(address=>uint256)的+=操作
result.append(ledger)
except:
continue
return result
#待实现
#已经实现
def getMapping_SafeMathAdd(self, _astList, _mappingList):
safeMathAst = dict()
for ast in self.findASTNode(self.json, "name", "ContractDefinition"):
if ast["attributes"]["name"].upper() == SAFEMATH_FLAG and ast["attributes"]["contractKind"] == LIBRARY_FLAG:
safeMathAst = ast
#找到safeMath的AST
break
else:
continue
addId = int()
if len(safeMathAst.keys()) == 0:
return list()
#用id来指明函数调用
for func in self.findASTNode(safeMathAst, "name", "FunctionDefinition"):
if func["attributes"]["name"].lower() == ADD_STR_FLAG:
addId = func["id"]
break
else:
continue
#下一步,来找调用
result = list()
#赋值语句的ast
for _ast in _astList:
try:
if _ast["attributes"]["type"] == UINT256_FLAG and _ast["attributes"]["operator"] == EQU_FLAG:
#print(_ast["attributes"])
num = _ast["children"][0]
operator = _ast["children"][1]
if num["attributes"]["type"] == UINT256_FLAG and operator["attributes"]["type"] == UINT256_FLAG:
mapping = num["children"][0]
safeMathAdd = operator["children"][0]
if safeMathAdd["attributes"]["member_name"].lower() == ADD_STR_FLAG and safeMathAdd["attributes"]["referencedDeclaration"] == addId:
#确定了,这一句使用safeMath库里add函数,考察接收结果的是否是我们要的结构
for ledger in _mappingList:
_id = ledger.split(".")[1]
if str(_id) == str(mapping["attributes"]["referencedDeclaration"]):
#在payable起始的函数的调用序列的该赋值语句中,有对mapping(address=>uint256)的SafeMath.add操作
result.append(ledger)
except:
continue
return result
def contractNameToNum(self,_callGraph):
dotFileName = CACHE_PATH + DOT_PREFIX + self.targetContractName + DOT_SUFFIX
#try:
result = list()
f = io.open(dotFileName)
contractNameDict = dict()
for line in f.readlines():
if line.find(CLUSTER_FLAG) != -1:
#找到集群声明标志,拆分出编号和合约名
try:
temp = line.split(" ")[1]
#下述方法不能应对合约名以下划线开头的情况,使用土办法
num, contractName = self.splitTemp(temp)
contractNameDict[contractName] = num
except:
continue
else:
continue
for _list in _callGraph:
aList = list()
for func in _list:
try:
num, funcName = self.splitTempName(func)
for item in contractNameDict.items():
if item[1] == num:
temp = item[0] + "." + funcName
aList.append(temp)
else:
continue
except:
continue
result.append(aList)
#print(contractNameDict)
#print(result)
return result
def splitTemp(self, _str):
result = list()
flag = 0
temp = str()
for char in _str:
if char != "_":
temp += char
elif char == "_" and flag < 1:
temp = str()
flag += 1
elif char == "_" and flag == 1:
result.append(temp)
temp = str()
flag += 1
elif flag >= 2:
temp += char
result.append(temp)
return result[0], result[1]
def splitTempName(self, _str):
result = list()
flag = False
temp = str()
for char in _str:
if char == "_" and flag == False:
flag = True
result.append(temp)
temp = ""
else:
temp += char
result.append(temp)
return result[0][1:], result[1][:-1] #去掉头尾的双引号
def getMapping(self, _json):
#variable声明
mappingDict = dict()
for ast in self.findASTNode(_json, "name", "VariableDeclaration"):
#print(ast)
if ast["attributes"]["type"] == MAPPING_FLAG:
mappingName = ast["id"]
startPos, endPos = self.srcToPos(ast["src"])
mappingDict[mappingName] = [startPos, endPos]
contractDict = dict()
#dict: {合约名,[起始位置,终止位置]}
for ast in self.findASTNode(self.json, "name", "ContractDefinition"):
contractName = ast["attributes"]["name"]
startPos, endPos = self.srcToPos(ast["src"])
contractDict[contractName] = [startPos, endPos]
#根据从属关系,拼接返回结果
result = list()
for mappingName in mappingDict:
startPos, endPos = mappingDict[mappingName]
for item in contractDict.items():
if startPos >= item[1][0] and endPos <= item[1][1]:
#找到合约和函数的对应关系
temp = item[0] + "." + str(mappingName)
result.append(temp)
break
else:
continue
return result
def getPayableFunc(self, _json):
contractDict = dict()
#dict: {合约名,[起始位置,终止位置]}
for ast in self.findASTNode(self.json, "name", "ContractDefinition"):
contractName = ast["attributes"]["name"]
startPos, endPos = self.srcToPos(ast["src"])
contractDict[contractName] = [startPos, endPos]
#payable func
funcList = list()
for ast in self.findASTNode(self.json, "name", "FunctionDefinition"):
if ast["attributes"]["stateMutability"] == PAYABLE_FLAG:
if ast["attributes"]["kind"] == CONSTRUCTOR_FLAG:
functionName = CONSTRUCTOR_FLAG
elif ast["attributes"]["kind"] == FALLBACK_FLAG:
functionName = FALLBACK_FLAG
else:
functionName = ast["attributes"]["name"]
startPos, endPos = self.srcToPos(ast["src"])
#bug修复,不同合约可能有重名函数
funcList.append([functionName, startPos, endPos])
#根据从属关系,拼接返回结果
result = list()
for func in funcList:
startPos = func[1]
endPos = func[2]
for item in contractDict.items():
if startPos >= item[1][0] and endPos <= item[1][1]:
#找到合约和函数的对应关系
temp = item[0] + "." + func[0]
result.append(temp)
break
else:
continue
return result #返回payable函数
#在给定的ast中返回包含键值对"_name": "_value"的字典列表
def findASTNode(self, _ast, _name, _value):
queue = [_ast]
result = list()
literalList = list()
while len(queue) > 0:
data = queue.pop()
for key in data:
if key == _name and data[key] == _value:
result.append(data)
elif type(data[key]) == dict:
queue.append(data[key])
elif type(data[key]) == list:
for item in data[key]:
if type(item) == dict:
queue.append(item)
return result
#传入:657:17:0
#传出:657, 674
def srcToPos(self, _src):
temp = _src.split(":")
return int(temp[0]), int(temp[0]) + int(temp[1])
| true | true |
1c4ae85a0d8d9e82d0383f1b99a36ec9586665d4 | 8,662 | py | Python | homeassistant/components/xiaomi_miio/remote.py | shanbs/home-assistant | 818776d2b4f11e4f51992dc88bc0a6f9055833b2 | [
"Apache-2.0"
] | 1 | 2019-02-18T03:16:32.000Z | 2019-02-18T03:16:32.000Z | homeassistant/components/xiaomi_miio/remote.py | shanbs/home-assistant | 818776d2b4f11e4f51992dc88bc0a6f9055833b2 | [
"Apache-2.0"
] | 3 | 2021-09-08T03:29:36.000Z | 2022-03-12T00:59:48.000Z | homeassistant/components/xiaomi_miio/remote.py | shanbs/home-assistant | 818776d2b4f11e4f51992dc88bc0a6f9055833b2 | [
"Apache-2.0"
] | 1 | 2019-09-28T07:06:08.000Z | 2019-09-28T07:06:08.000Z | """Support for the Xiaomi IR Remote (Chuangmi IR)."""
import asyncio
import logging
import time
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.remote import (
PLATFORM_SCHEMA, DOMAIN, ATTR_NUM_REPEATS, ATTR_DELAY_SECS,
DEFAULT_DELAY_SECS, RemoteDevice)
from homeassistant.const import (
CONF_NAME, CONF_HOST, CONF_TOKEN, CONF_TIMEOUT,
ATTR_ENTITY_ID, ATTR_HIDDEN, CONF_COMMAND)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.util.dt import utcnow
REQUIREMENTS = ['python-miio==0.4.4', 'construct==2.9.45']
_LOGGER = logging.getLogger(__name__)
SERVICE_LEARN = 'xiaomi_miio_learn_command'
DATA_KEY = 'remote.xiaomi_miio'
CONF_SLOT = 'slot'
CONF_COMMANDS = 'commands'
DEFAULT_TIMEOUT = 10
DEFAULT_SLOT = 1
LEARN_COMMAND_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): vol.All(str),
vol.Optional(CONF_TIMEOUT, default=10): vol.All(int, vol.Range(min=0)),
vol.Optional(CONF_SLOT, default=1):
vol.All(int, vol.Range(min=1, max=1000000)),
})
COMMAND_SCHEMA = vol.Schema({
vol.Required(CONF_COMMAND): vol.All(cv.ensure_list, [cv.string])
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT):
vol.All(int, vol.Range(min=0)),
vol.Optional(CONF_SLOT, default=DEFAULT_SLOT):
vol.All(int, vol.Range(min=1, max=1000000)),
vol.Optional(ATTR_HIDDEN, default=True): cv.boolean,
vol.Required(CONF_TOKEN): vol.All(str, vol.Length(min=32, max=32)),
vol.Optional(CONF_COMMANDS, default={}):
cv.schema_with_slug_keys(COMMAND_SCHEMA),
}, extra=vol.ALLOW_EXTRA)
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the Xiaomi IR Remote (Chuangmi IR) platform."""
from miio import ChuangmiIr, DeviceException
host = config.get(CONF_HOST)
token = config.get(CONF_TOKEN)
# Create handler
_LOGGER.info("Initializing with host %s (token %s...)", host, token[:5])
# The Chuang Mi IR Remote Controller wants to be re-discovered every
# 5 minutes. As long as polling is disabled the device should be
# re-discovered (lazy_discover=False) in front of every command.
device = ChuangmiIr(host, token, lazy_discover=False)
# Check that we can communicate with device.
try:
device_info = device.info()
model = device_info.model
unique_id = "{}-{}".format(model, device_info.mac_address)
_LOGGER.info("%s %s %s detected",
model,
device_info.firmware_version,
device_info.hardware_version)
except DeviceException as ex:
_LOGGER.error("Device unavailable or token incorrect: %s", ex)
raise PlatformNotReady
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
friendly_name = config.get(CONF_NAME, "xiaomi_miio_" +
host.replace('.', '_'))
slot = config.get(CONF_SLOT)
timeout = config.get(CONF_TIMEOUT)
hidden = config.get(ATTR_HIDDEN)
xiaomi_miio_remote = XiaomiMiioRemote(friendly_name, device, unique_id,
slot, timeout, hidden,
config.get(CONF_COMMANDS))
hass.data[DATA_KEY][host] = xiaomi_miio_remote
async_add_entities([xiaomi_miio_remote])
async def async_service_handler(service):
"""Handle a learn command."""
if service.service != SERVICE_LEARN:
_LOGGER.error("We should not handle service: %s", service.service)
return
entity_id = service.data.get(ATTR_ENTITY_ID)
entity = None
for remote in hass.data[DATA_KEY].values():
if remote.entity_id == entity_id:
entity = remote
if not entity:
_LOGGER.error("entity_id: '%s' not found", entity_id)
return
device = entity.device
slot = service.data.get(CONF_SLOT, entity.slot)
await hass.async_add_executor_job(device.learn, slot)
timeout = service.data.get(CONF_TIMEOUT, entity.timeout)
_LOGGER.info("Press the key you want Home Assistant to learn")
start_time = utcnow()
while (utcnow() - start_time) < timedelta(seconds=timeout):
message = await hass.async_add_executor_job(
device.read, slot)
_LOGGER.debug("Message received from device: '%s'", message)
if 'code' in message and message['code']:
log_msg = "Received command is: {}".format(message['code'])
_LOGGER.info(log_msg)
hass.components.persistent_notification.async_create(
log_msg, title='Xiaomi Miio Remote')
return
if ('error' in message and
message['error']['message'] == "learn timeout"):
await hass.async_add_executor_job(device.learn, slot)
await asyncio.sleep(1, loop=hass.loop)
_LOGGER.error("Timeout. No infrared command captured")
hass.components.persistent_notification.async_create(
"Timeout. No infrared command captured",
title='Xiaomi Miio Remote')
hass.services.async_register(DOMAIN, SERVICE_LEARN, async_service_handler,
schema=LEARN_COMMAND_SCHEMA)
class XiaomiMiioRemote(RemoteDevice):
"""Representation of a Xiaomi Miio Remote device."""
def __init__(self, friendly_name, device, unique_id,
slot, timeout, hidden, commands):
"""Initialize the remote."""
self._name = friendly_name
self._device = device
self._unique_id = unique_id
self._is_hidden = hidden
self._slot = slot
self._timeout = timeout
self._state = False
self._commands = commands
@property
def unique_id(self):
"""Return an unique ID."""
return self._unique_id
@property
def name(self):
"""Return the name of the remote."""
return self._name
@property
def device(self):
"""Return the remote object."""
return self._device
@property
def hidden(self):
"""Return if we should hide entity."""
return self._is_hidden
@property
def slot(self):
"""Return the slot to save learned command."""
return self._slot
@property
def timeout(self):
"""Return the timeout for learning command."""
return self._timeout
@property
def is_on(self):
"""Return False if device is unreachable, else True."""
from miio import DeviceException
try:
self.device.info()
return True
except DeviceException:
return False
@property
def should_poll(self):
"""We should not be polled for device up state."""
return False
@property
def device_state_attributes(self):
"""Hide remote by default."""
if self._is_hidden:
return {'hidden': 'true'}
return
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
_LOGGER.error("Device does not support turn_on, "
"please use 'remote.send_command' to send commands.")
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
_LOGGER.error("Device does not support turn_off, "
"please use 'remote.send_command' to send commands.")
def _send_command(self, payload):
"""Send a command."""
from miio import DeviceException
_LOGGER.debug("Sending payload: '%s'", payload)
try:
self.device.play(payload)
except DeviceException as ex:
_LOGGER.error(
"Transmit of IR command failed, %s, exception: %s",
payload, ex)
def send_command(self, command, **kwargs):
"""Send a command."""
num_repeats = kwargs.get(ATTR_NUM_REPEATS)
delay = kwargs.get(ATTR_DELAY_SECS, DEFAULT_DELAY_SECS)
for _ in range(num_repeats):
for payload in command:
if payload in self._commands:
for local_payload in self._commands[payload][CONF_COMMAND]:
self._send_command(local_payload)
else:
self._send_command(payload)
time.sleep(delay)
| 33.187739 | 79 | 0.625491 | import asyncio
import logging
import time
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.remote import (
PLATFORM_SCHEMA, DOMAIN, ATTR_NUM_REPEATS, ATTR_DELAY_SECS,
DEFAULT_DELAY_SECS, RemoteDevice)
from homeassistant.const import (
CONF_NAME, CONF_HOST, CONF_TOKEN, CONF_TIMEOUT,
ATTR_ENTITY_ID, ATTR_HIDDEN, CONF_COMMAND)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.util.dt import utcnow
REQUIREMENTS = ['python-miio==0.4.4', 'construct==2.9.45']
_LOGGER = logging.getLogger(__name__)
SERVICE_LEARN = 'xiaomi_miio_learn_command'
DATA_KEY = 'remote.xiaomi_miio'
CONF_SLOT = 'slot'
CONF_COMMANDS = 'commands'
DEFAULT_TIMEOUT = 10
DEFAULT_SLOT = 1
LEARN_COMMAND_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): vol.All(str),
vol.Optional(CONF_TIMEOUT, default=10): vol.All(int, vol.Range(min=0)),
vol.Optional(CONF_SLOT, default=1):
vol.All(int, vol.Range(min=1, max=1000000)),
})
COMMAND_SCHEMA = vol.Schema({
vol.Required(CONF_COMMAND): vol.All(cv.ensure_list, [cv.string])
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT):
vol.All(int, vol.Range(min=0)),
vol.Optional(CONF_SLOT, default=DEFAULT_SLOT):
vol.All(int, vol.Range(min=1, max=1000000)),
vol.Optional(ATTR_HIDDEN, default=True): cv.boolean,
vol.Required(CONF_TOKEN): vol.All(str, vol.Length(min=32, max=32)),
vol.Optional(CONF_COMMANDS, default={}):
cv.schema_with_slug_keys(COMMAND_SCHEMA),
}, extra=vol.ALLOW_EXTRA)
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
from miio import ChuangmiIr, DeviceException
host = config.get(CONF_HOST)
token = config.get(CONF_TOKEN)
_LOGGER.info("Initializing with host %s (token %s...)", host, token[:5])
device = ChuangmiIr(host, token, lazy_discover=False)
try:
device_info = device.info()
model = device_info.model
unique_id = "{}-{}".format(model, device_info.mac_address)
_LOGGER.info("%s %s %s detected",
model,
device_info.firmware_version,
device_info.hardware_version)
except DeviceException as ex:
_LOGGER.error("Device unavailable or token incorrect: %s", ex)
raise PlatformNotReady
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
friendly_name = config.get(CONF_NAME, "xiaomi_miio_" +
host.replace('.', '_'))
slot = config.get(CONF_SLOT)
timeout = config.get(CONF_TIMEOUT)
hidden = config.get(ATTR_HIDDEN)
xiaomi_miio_remote = XiaomiMiioRemote(friendly_name, device, unique_id,
slot, timeout, hidden,
config.get(CONF_COMMANDS))
hass.data[DATA_KEY][host] = xiaomi_miio_remote
async_add_entities([xiaomi_miio_remote])
async def async_service_handler(service):
if service.service != SERVICE_LEARN:
_LOGGER.error("We should not handle service: %s", service.service)
return
entity_id = service.data.get(ATTR_ENTITY_ID)
entity = None
for remote in hass.data[DATA_KEY].values():
if remote.entity_id == entity_id:
entity = remote
if not entity:
_LOGGER.error("entity_id: '%s' not found", entity_id)
return
device = entity.device
slot = service.data.get(CONF_SLOT, entity.slot)
await hass.async_add_executor_job(device.learn, slot)
timeout = service.data.get(CONF_TIMEOUT, entity.timeout)
_LOGGER.info("Press the key you want Home Assistant to learn")
start_time = utcnow()
while (utcnow() - start_time) < timedelta(seconds=timeout):
message = await hass.async_add_executor_job(
device.read, slot)
_LOGGER.debug("Message received from device: '%s'", message)
if 'code' in message and message['code']:
log_msg = "Received command is: {}".format(message['code'])
_LOGGER.info(log_msg)
hass.components.persistent_notification.async_create(
log_msg, title='Xiaomi Miio Remote')
return
if ('error' in message and
message['error']['message'] == "learn timeout"):
await hass.async_add_executor_job(device.learn, slot)
await asyncio.sleep(1, loop=hass.loop)
_LOGGER.error("Timeout. No infrared command captured")
hass.components.persistent_notification.async_create(
"Timeout. No infrared command captured",
title='Xiaomi Miio Remote')
hass.services.async_register(DOMAIN, SERVICE_LEARN, async_service_handler,
schema=LEARN_COMMAND_SCHEMA)
class XiaomiMiioRemote(RemoteDevice):
def __init__(self, friendly_name, device, unique_id,
slot, timeout, hidden, commands):
self._name = friendly_name
self._device = device
self._unique_id = unique_id
self._is_hidden = hidden
self._slot = slot
self._timeout = timeout
self._state = False
self._commands = commands
@property
def unique_id(self):
return self._unique_id
@property
def name(self):
return self._name
@property
def device(self):
return self._device
@property
def hidden(self):
return self._is_hidden
@property
def slot(self):
return self._slot
@property
def timeout(self):
return self._timeout
@property
def is_on(self):
from miio import DeviceException
try:
self.device.info()
return True
except DeviceException:
return False
@property
def should_poll(self):
return False
@property
def device_state_attributes(self):
if self._is_hidden:
return {'hidden': 'true'}
return
async def async_turn_on(self, **kwargs):
_LOGGER.error("Device does not support turn_on, "
"please use 'remote.send_command' to send commands.")
async def async_turn_off(self, **kwargs):
_LOGGER.error("Device does not support turn_off, "
"please use 'remote.send_command' to send commands.")
def _send_command(self, payload):
from miio import DeviceException
_LOGGER.debug("Sending payload: '%s'", payload)
try:
self.device.play(payload)
except DeviceException as ex:
_LOGGER.error(
"Transmit of IR command failed, %s, exception: %s",
payload, ex)
def send_command(self, command, **kwargs):
num_repeats = kwargs.get(ATTR_NUM_REPEATS)
delay = kwargs.get(ATTR_DELAY_SECS, DEFAULT_DELAY_SECS)
for _ in range(num_repeats):
for payload in command:
if payload in self._commands:
for local_payload in self._commands[payload][CONF_COMMAND]:
self._send_command(local_payload)
else:
self._send_command(payload)
time.sleep(delay)
| true | true |
1c4ae8c514ee7ef6422e7f7a69e4e366db683d91 | 6,151 | py | Python | conditional/util/member.py | jabbate19/conditional | 20013459438d80bca06a844da250e2543c84186e | [
"MIT"
] | 9 | 2016-08-21T19:27:24.000Z | 2019-09-12T06:56:49.000Z | conditional/util/member.py | jabbate19/conditional | 20013459438d80bca06a844da250e2543c84186e | [
"MIT"
] | 237 | 2016-08-21T18:08:58.000Z | 2022-03-28T17:01:36.000Z | conditional/util/member.py | jabbate19/conditional | 20013459438d80bca06a844da250e2543c84186e | [
"MIT"
] | 31 | 2016-08-22T23:46:48.000Z | 2022-03-26T22:37:39.000Z | from datetime import datetime
from conditional import start_of_year
from conditional.models.models import CommitteeMeeting
from conditional.models.models import CurrentCoops
from conditional.models.models import FreshmanEvalData
from conditional.models.models import HouseMeeting
from conditional.models.models import MemberCommitteeAttendance
from conditional.models.models import MemberHouseMeetingAttendance
from conditional.models.models import MemberSeminarAttendance
from conditional.models.models import TechnicalSeminar
from conditional.util.cache import service_cache
from conditional.util.ldap import ldap_get_active_members
from conditional.util.ldap import ldap_get_current_students
from conditional.util.ldap import ldap_get_intro_members
from conditional.util.ldap import ldap_get_onfloor_members
from conditional.util.ldap import ldap_get_roomnumber
from conditional.util.ldap import ldap_is_active
from conditional.util.ldap import ldap_is_onfloor
@service_cache(maxsize=1024)
def get_voting_members():
if datetime.today() < datetime(start_of_year().year, 12, 31):
semester = 'Fall'
else:
semester = 'Spring'
active_members = set(member.uid for member in ldap_get_active_members())
intro_members = set(member.uid for member in ldap_get_intro_members())
on_coop = set(member.uid for member in CurrentCoops.query.filter(
CurrentCoops.date_created > start_of_year(),
CurrentCoops.semester == semester).all())
voting_list = list(active_members - intro_members - on_coop)
passed_fall = FreshmanEvalData.query.filter(
FreshmanEvalData.freshman_eval_result == "Passed",
FreshmanEvalData.eval_date > start_of_year()
).distinct()
for intro_member in passed_fall:
if intro_member.uid not in voting_list:
voting_list.append(intro_member.uid)
return voting_list
@service_cache(maxsize=1024)
def get_members_info():
members = ldap_get_current_students()
member_list = []
for account in members:
uid = account.uid
name = account.cn
active = ldap_is_active(account)
onfloor = ldap_is_onfloor(account)
room = ldap_get_roomnumber(account)
hp = account.housingPoints
member_list.append({
"uid": uid,
"name": name,
"active": active,
"onfloor": onfloor,
"room": room,
"hp": hp
})
return member_list
def get_freshman_data(user_name):
freshman = {}
freshman_data = FreshmanEvalData.query.filter(FreshmanEvalData.uid == user_name).first()
if freshman_data is None:
return None
freshman['status'] = freshman_data.freshman_eval_result
# number of committee meetings attended
c_meetings = [m.meeting_id for m in
MemberCommitteeAttendance.query.filter(
MemberCommitteeAttendance.uid == user_name
) if CommitteeMeeting.query.filter(
CommitteeMeeting.id == m.meeting_id).first().approved]
freshman['committee_meetings'] = len(c_meetings)
# technical seminar total
t_seminars = [s.seminar_id for s in
MemberSeminarAttendance.query.filter(
MemberSeminarAttendance.uid == user_name
) if TechnicalSeminar.query.filter(
TechnicalSeminar.id == s.seminar_id).first().approved]
freshman['ts_total'] = len(t_seminars)
attendance = [m.name for m in TechnicalSeminar.query.filter(
TechnicalSeminar.id.in_(t_seminars)
)]
freshman['ts_list'] = attendance
h_meetings = [(m.meeting_id, m.attendance_status) for m in
MemberHouseMeetingAttendance.query.filter(
MemberHouseMeetingAttendance.uid == user_name)]
freshman['hm_missed'] = len([h for h in h_meetings if h[1] == "Absent"])
freshman['social_events'] = freshman_data.social_events
freshman['general_comments'] = freshman_data.other_notes
freshman['sig_missed'] = freshman_data.signatures_missed
freshman['eval_date'] = freshman_data.eval_date
return freshman
@service_cache(maxsize=1024)
def get_onfloor_members():
return [uid for uid in [members.uid for members in ldap_get_active_members()]
if uid in [members.uid for members in ldap_get_onfloor_members()]]
def get_cm(member):
c_meetings = [{
"uid": cm.uid,
"timestamp": cm.timestamp,
"committee": cm.committee
} for cm in CommitteeMeeting.query.join(
MemberCommitteeAttendance,
MemberCommitteeAttendance.meeting_id == CommitteeMeeting.id
).with_entities(
MemberCommitteeAttendance.uid,
CommitteeMeeting.timestamp,
CommitteeMeeting.committee
).filter(
CommitteeMeeting.timestamp > start_of_year(),
MemberCommitteeAttendance.uid == member.uid,
CommitteeMeeting.approved == True # pylint: disable=singleton-comparison
).all()]
return c_meetings
def get_hm(member, only_absent=False):
h_meetings = MemberHouseMeetingAttendance.query.outerjoin(
HouseMeeting,
MemberHouseMeetingAttendance.meeting_id == HouseMeeting.id).with_entities(
MemberHouseMeetingAttendance.meeting_id,
MemberHouseMeetingAttendance.attendance_status,
HouseMeeting.date).filter(
HouseMeeting.date > start_of_year(),
MemberHouseMeetingAttendance.uid == member.uid)
if only_absent:
h_meetings = h_meetings.filter(MemberHouseMeetingAttendance.attendance_status == "Absent")
return h_meetings
@service_cache(maxsize=128)
def req_cm(member):
# Get the number of required committee meetings based on if the member
# is going on co-op in the current operating session.
co_op = CurrentCoops.query.filter(
CurrentCoops.uid == member.uid,
CurrentCoops.date_created > start_of_year()).first()
if co_op:
return 15
return 30
| 37.969136 | 98 | 0.688506 | from datetime import datetime
from conditional import start_of_year
from conditional.models.models import CommitteeMeeting
from conditional.models.models import CurrentCoops
from conditional.models.models import FreshmanEvalData
from conditional.models.models import HouseMeeting
from conditional.models.models import MemberCommitteeAttendance
from conditional.models.models import MemberHouseMeetingAttendance
from conditional.models.models import MemberSeminarAttendance
from conditional.models.models import TechnicalSeminar
from conditional.util.cache import service_cache
from conditional.util.ldap import ldap_get_active_members
from conditional.util.ldap import ldap_get_current_students
from conditional.util.ldap import ldap_get_intro_members
from conditional.util.ldap import ldap_get_onfloor_members
from conditional.util.ldap import ldap_get_roomnumber
from conditional.util.ldap import ldap_is_active
from conditional.util.ldap import ldap_is_onfloor
@service_cache(maxsize=1024)
def get_voting_members():
if datetime.today() < datetime(start_of_year().year, 12, 31):
semester = 'Fall'
else:
semester = 'Spring'
active_members = set(member.uid for member in ldap_get_active_members())
intro_members = set(member.uid for member in ldap_get_intro_members())
on_coop = set(member.uid for member in CurrentCoops.query.filter(
CurrentCoops.date_created > start_of_year(),
CurrentCoops.semester == semester).all())
voting_list = list(active_members - intro_members - on_coop)
passed_fall = FreshmanEvalData.query.filter(
FreshmanEvalData.freshman_eval_result == "Passed",
FreshmanEvalData.eval_date > start_of_year()
).distinct()
for intro_member in passed_fall:
if intro_member.uid not in voting_list:
voting_list.append(intro_member.uid)
return voting_list
@service_cache(maxsize=1024)
def get_members_info():
members = ldap_get_current_students()
member_list = []
for account in members:
uid = account.uid
name = account.cn
active = ldap_is_active(account)
onfloor = ldap_is_onfloor(account)
room = ldap_get_roomnumber(account)
hp = account.housingPoints
member_list.append({
"uid": uid,
"name": name,
"active": active,
"onfloor": onfloor,
"room": room,
"hp": hp
})
return member_list
def get_freshman_data(user_name):
freshman = {}
freshman_data = FreshmanEvalData.query.filter(FreshmanEvalData.uid == user_name).first()
if freshman_data is None:
return None
freshman['status'] = freshman_data.freshman_eval_result
c_meetings = [m.meeting_id for m in
MemberCommitteeAttendance.query.filter(
MemberCommitteeAttendance.uid == user_name
) if CommitteeMeeting.query.filter(
CommitteeMeeting.id == m.meeting_id).first().approved]
freshman['committee_meetings'] = len(c_meetings)
t_seminars = [s.seminar_id for s in
MemberSeminarAttendance.query.filter(
MemberSeminarAttendance.uid == user_name
) if TechnicalSeminar.query.filter(
TechnicalSeminar.id == s.seminar_id).first().approved]
freshman['ts_total'] = len(t_seminars)
attendance = [m.name for m in TechnicalSeminar.query.filter(
TechnicalSeminar.id.in_(t_seminars)
)]
freshman['ts_list'] = attendance
h_meetings = [(m.meeting_id, m.attendance_status) for m in
MemberHouseMeetingAttendance.query.filter(
MemberHouseMeetingAttendance.uid == user_name)]
freshman['hm_missed'] = len([h for h in h_meetings if h[1] == "Absent"])
freshman['social_events'] = freshman_data.social_events
freshman['general_comments'] = freshman_data.other_notes
freshman['sig_missed'] = freshman_data.signatures_missed
freshman['eval_date'] = freshman_data.eval_date
return freshman
@service_cache(maxsize=1024)
def get_onfloor_members():
return [uid for uid in [members.uid for members in ldap_get_active_members()]
if uid in [members.uid for members in ldap_get_onfloor_members()]]
def get_cm(member):
c_meetings = [{
"uid": cm.uid,
"timestamp": cm.timestamp,
"committee": cm.committee
} for cm in CommitteeMeeting.query.join(
MemberCommitteeAttendance,
MemberCommitteeAttendance.meeting_id == CommitteeMeeting.id
).with_entities(
MemberCommitteeAttendance.uid,
CommitteeMeeting.timestamp,
CommitteeMeeting.committee
).filter(
CommitteeMeeting.timestamp > start_of_year(),
MemberCommitteeAttendance.uid == member.uid,
CommitteeMeeting.approved == True ).all()]
return c_meetings
def get_hm(member, only_absent=False):
h_meetings = MemberHouseMeetingAttendance.query.outerjoin(
HouseMeeting,
MemberHouseMeetingAttendance.meeting_id == HouseMeeting.id).with_entities(
MemberHouseMeetingAttendance.meeting_id,
MemberHouseMeetingAttendance.attendance_status,
HouseMeeting.date).filter(
HouseMeeting.date > start_of_year(),
MemberHouseMeetingAttendance.uid == member.uid)
if only_absent:
h_meetings = h_meetings.filter(MemberHouseMeetingAttendance.attendance_status == "Absent")
return h_meetings
@service_cache(maxsize=128)
def req_cm(member):
co_op = CurrentCoops.query.filter(
CurrentCoops.uid == member.uid,
CurrentCoops.date_created > start_of_year()).first()
if co_op:
return 15
return 30
| true | true |
1c4aea51c99be63d27dd8fd9ab6ca5dbffa66cae | 176,012 | py | Python | galois/_fields/_main.py | iyanmv/galois | a5e6386a684e3e0b47af608217002795dc25c702 | [
"MIT"
] | null | null | null | galois/_fields/_main.py | iyanmv/galois | a5e6386a684e3e0b47af608217002795dc25c702 | [
"MIT"
] | null | null | null | galois/_fields/_main.py | iyanmv/galois | a5e6386a684e3e0b47af608217002795dc25c702 | [
"MIT"
] | null | null | null | """
A module that contains the main classes for Galois fields -- FieldClass, FieldArray,
and Poly. They're all in one file because they have circular dependencies. The specific GF2
FieldClass is also included.
"""
import inspect
import math
import random
from typing import Tuple, List, Sequence, Iterable, Optional, Union
from typing_extensions import Literal
import numba
import numpy as np
from .._factor import divisors
from .._overrides import set_module
from .._poly_conversion import integer_to_poly, poly_to_integer, str_to_integer, poly_to_str, sparse_poly_to_integer, sparse_poly_to_str, str_to_sparse_poly
from ._dtypes import DTYPES
from ._linalg import dot, row_reduce, lu_decompose, lup_decompose
from ._functions import FunctionMeta
from ._ufuncs import UfuncMeta
__all__ = ["FieldClass", "FieldArray", "GF2", "Poly"]
###############################################################################
# NumPy ndarray subclass for Galois fields
###############################################################################
@set_module("galois")
class FieldClass(FunctionMeta, UfuncMeta):
"""
Defines a metaclass for all :obj:`galois.FieldArray` classes.
Important
---------
:obj:`galois.FieldClass` is a metaclass for :obj:`galois.FieldArray` subclasses created with the class factory
:func:`galois.GF` and should not be instantiated directly. This metaclass gives :obj:`galois.FieldArray` subclasses
methods and attributes related to their Galois fields.
This class is included in the API to allow the user to test if a class is a Galois field array class.
.. ipython:: python
GF = galois.GF(7)
isinstance(GF, galois.FieldClass)
"""
# pylint: disable=no-value-for-parameter,unsupported-membership-test,abstract-method,too-many-public-methods
def __new__(cls, name, bases, namespace, **kwargs): # pylint: disable=unused-argument
return super().__new__(cls, name, bases, namespace)
def __init__(cls, name, bases, namespace, **kwargs):
super().__init__(name, bases, namespace, **kwargs)
cls._characteristic = kwargs.get("characteristic", 0)
cls._degree = kwargs.get("degree", 0)
cls._order = kwargs.get("order", 0)
cls._order_str = None
cls._ufunc_mode = None
cls._ufunc_target = None
cls._dtypes = cls._determine_dtypes()
if "irreducible_poly" in kwargs:
cls._irreducible_poly = kwargs["irreducible_poly"]
cls._irreducible_poly_int = cls._irreducible_poly.integer
else:
cls._irreducible_poly = None
cls._irreducible_poly_int = 0
cls._primitive_element = kwargs.get("primitive_element", None)
cls._is_primitive_poly = kwargs.get("is_primitive_poly", None)
cls._prime_subfield = None
cls._display_mode = "int"
if cls.degree == 1:
cls._order_str = f"order={cls.order}"
else:
cls._order_str = f"order={cls.characteristic}^{cls.degree}"
def __str__(cls):
return f"<class 'numpy.ndarray over {cls.name}'>"
def __repr__(cls):
return str(cls)
###############################################################################
# Helper methods
###############################################################################
def _determine_dtypes(cls):
"""
At a minimum, valid dtypes are ones that can hold x for x in [0, order).
"""
dtypes = [dtype for dtype in DTYPES if np.iinfo(dtype).max >= cls.order - 1]
if len(dtypes) == 0:
dtypes = [np.object_]
return dtypes
###############################################################################
# Class methods
###############################################################################
def compile(cls, mode: str):
"""
Recompile the just-in-time compiled numba ufuncs for a new calculation mode.
This function updates :obj:`ufunc_mode`.
Parameters
----------
mode : str
The ufunc calculation mode.
* `"auto"`: Selects "jit-lookup" for fields with order less than :math:`2^{20}`, "jit-calculate" for larger fields, and "python-calculate"
for fields whose elements cannot be represented with :obj:`numpy.int64`.
* `"jit-lookup"`: JIT compiles arithmetic ufuncs to use Zech log, log, and anti-log lookup tables for efficient computation.
In the few cases where explicit calculation is faster than table lookup, explicit calculation is used.
* `"jit-calculate"`: JIT compiles arithmetic ufuncs to use explicit calculation. The "jit-calculate" mode is designed for large
fields that cannot or should not store lookup tables in RAM. Generally, the "jit-calculate" mode is slower than "jit-lookup".
* `"python-calculate"`: Uses pure-python ufuncs with explicit calculation. This is reserved for fields whose elements cannot be
represented with :obj:`numpy.int64` and instead use :obj:`numpy.object_` with python :obj:`int` (which has arbitrary precision).
"""
if not isinstance(mode, (type(None), str)):
raise TypeError(f"Argument `mode` must be a string, not {type(mode)}.")
# if not mode in ["auto", "jit-lookup", "jit-calculate", "python-calculate"]:
# raise ValueError(f"Argument `mode` must be in ['auto', 'jit-lookup', 'jit-calculate', 'python-calculate'], not {mode!r}.")
mode = cls.default_ufunc_mode if mode == "auto" else mode
if mode not in cls.ufunc_modes:
raise ValueError(f"Argument `mode` must be in {cls.ufunc_modes} for {cls.name}, not {mode!r}.")
if mode == cls.ufunc_mode:
# Don't need to rebuild these ufuncs
return
cls._ufunc_mode = mode
cls._compile_ufuncs()
def display(
cls,
mode: Literal["int", "poly", "power"] = "int"
) -> "DisplayContext":
r"""
Sets the display mode for all Galois field arrays of this type.
The display mode can be set to either the integer representation, polynomial representation, or power
representation. This function updates :obj:`display_mode`.
Warning
-------
For the power representation, :func:`np.log` is computed on each element. So for large fields without lookup
tables, displaying arrays in the power representation may take longer than expected.
Parameters
----------
mode : str, optional
The field element representation.
* `"int"` (default): The element displayed as the integer representation of the polynomial. For example, :math:`2x^2 + x + 2` is an element of
:math:`\mathrm{GF}(3^3)` and is equivalent to the integer :math:`23 = 2 \cdot 3^2 + 3 + 2`.
* `"poly"`: The element as a polynomial over :math:`\mathrm{GF}(p)` of degree less than :math:`m`. For example, :math:`2x^2 + x + 2` is an element
of :math:`\mathrm{GF}(3^3)`.
* `"power"`: The element as a power of the primitive element, see :obj:`FieldClass.primitive_element`. For example, :math:`2x^2 + x + 2 = \alpha^5`
in :math:`\mathrm{GF}(3^3)` with irreducible polynomial :math:`x^3 + 2x + 1` and primitive element :math:`\alpha = x`.
Returns
-------
DisplayContext
A context manager for use in a `with` statement. If permanently setting the display mode, disregard the
return value.
Examples
--------
Change the display mode by calling the :func:`display` method.
.. ipython:: python
GF = galois.GF(3**3)
print(GF.properties)
a = GF(23); a
# Permanently set the display mode to the polynomial representation
GF.display("poly"); a
# Permanently set the display mode to the power representation
GF.display("power"); a
# Permanently reset the default display mode to the integer representation
GF.display(); a
The :func:`display` method can also be used as a context manager, as shown below.
For the polynomial representation, when the primitive element is :math:`\alpha = x` in :math:`\mathrm{GF}(p)[x]` the polynomial
indeterminate used is :math:`\alpha`.
.. ipython:: python
GF = galois.GF(2**8)
print(GF.properties)
a = GF.Random()
print(GF.display_mode, a)
with GF.display("poly"):
print(GF.display_mode, a)
with GF.display("power"):
print(GF.display_mode, a)
# The display mode is reset after exiting the context manager
print(GF.display_mode, a)
But when the primitive element is :math:`\alpha \ne x` in :math:`\mathrm{GF}(p)[x]`, the polynomial
indeterminate used is :math:`x`.
.. ipython:: python
GF = galois.GF(2**8, irreducible_poly=galois.Poly.Degrees([8,4,3,1,0]))
print(GF.properties)
a = GF.Random()
print(GF.display_mode, a)
with GF.display("poly"):
print(GF.display_mode, a)
with GF.display("power"):
print(GF.display_mode, a)
# The display mode is reset after exiting the context manager
print(GF.display_mode, a)
"""
if not isinstance(mode, (type(None), str)):
raise TypeError(f"Argument `mode` must be a string, not {type(mode)}.")
if mode not in ["int", "poly", "power"]:
raise ValueError(f"Argument `mode` must be in ['int', 'poly', 'power'], not {mode!r}.")
context = DisplayContext(cls)
cls._display_mode = mode # Set the new state
return context
def repr_table(
cls,
primitive_element: Optional[Union[int, str, np.ndarray, "FieldArray"]] = None,
sort: Literal["power", "poly", "vector", "int"] = "power"
) -> str:
r"""
Generates a field element representation table comparing the power, polynomial, vector, and integer representations.
Parameters
----------
primitive_element : int, str, np.ndarray, galois.FieldArray, optional
The primitive element to use for the power representation. The default is `None` which uses the field's
default primitive element, :obj:`primitive_element`. If an array, it must be a 0-D array.
sort : str, optional
The sorting method for the table, either `"power"` (default), `"poly"`, `"vector"`, or `"int"`. Sorting by "power" will order
the rows of the table by ascending powers of the primitive element. Sorting by any of the others will order the rows in
lexicographically-increasing polynomial/vector order, which is equivalent to ascending order of the integer representation.
Returns
-------
str
A UTF-8 formatted table comparing the power, polynomial, vector, and integer representations of each
field element.
Examples
--------
.. ipython:: python
GF = galois.GF(2**4)
print(GF.properties)
Generate a representation table for :math:`\mathrm{GF}(2^4)`. Since :math:`x^4 + x + 1` is a primitive polynomial,
:math:`x` is a primitive element of the field. Notice, :math:`\textrm{ord}(x) = 15`.
.. ipython:: python
print(GF.repr_table())
Generate a representation table for :math:`\mathrm{GF}(2^4)` using a different primitive element :math:`x^3 + x^2 + x`.
Notice, :math:`\textrm{ord}(x^3 + x^2 + x) = 15`.
.. ipython:: python
alpha = GF.primitive_elements[-1]
print(GF.repr_table(alpha))
Generate a representation table for :math:`\mathrm{GF}(2^4)` using a non-primitive element :math:`x^3 + x^2`. Notice,
:math:`\textrm{ord}(x^3 + x^2) = 5 \ne 15`.
.. ipython:: python
beta = GF("x^3 + x^2")
print(GF.repr_table(beta))
"""
if sort not in ["power", "poly", "vector", "int"]:
raise ValueError(f"Argument `sort` must be in ['power', 'poly', 'vector', 'int'], not {sort!r}.")
if primitive_element is None:
primitive_element = cls.primitive_element
degrees = np.arange(0, cls.order - 1)
x = primitive_element**degrees
if sort != "power":
idxs = np.argsort(x)
degrees, x = degrees[idxs], x[idxs]
x = np.concatenate((np.atleast_1d(cls(0)), x)) # Add 0 = alpha**-Inf
prim = poly_to_str(integer_to_poly(primitive_element, cls.characteristic))
# Define print helper functions
if len(prim) > 1:
print_power = lambda power: "0" if power is None else f"({prim})^{power}"
else:
print_power = lambda power: "0" if power is None else f"{prim}^{power}"
print_poly = lambda x: poly_to_str(integer_to_poly(x, cls.characteristic))
print_vec = lambda x: str(integer_to_poly(x, cls.characteristic, degree=cls.degree-1))
print_int = lambda x: str(int(x))
# Determine column widths
N_power = max([len(print_power(max(degrees))), len("Power")]) + 2
N_poly = max([len(print_poly(e)) for e in x] + [len("Polynomial")]) + 2
N_vec = max([len(print_vec(e)) for e in x] + [len("Vector")]) + 2
N_int = max([len(print_int(e)) for e in x] + [len("Integer")]) + 2
# Useful characters: https://www.utf8-chartable.de/unicode-utf8-table.pl?start=9472
string = "╔" + "═"*N_power + "╤" + "═"*N_poly + "╤" + "═"*N_vec + "╤" + "═"*N_int + "╗"
string += "\n║" + "Power".center(N_power) + "│" + "Polynomial".center(N_poly) + "│" + "Vector".center(N_vec) + "│" + "Integer".center(N_int) + "║"
string += "\n║" + "═"*N_power + "╪" + "═"*N_poly + "╪" + "═"*N_vec + "╪" + "═"*N_int + "║"
for i in range(x.size):
d = None if i == 0 else degrees[i - 1]
string += "\n║" + print_power(d).center(N_power) + "│" + poly_to_str(integer_to_poly(x[i], cls.characteristic)).center(N_poly) + "│" + str(integer_to_poly(x[i], cls.characteristic, degree=cls.degree-1)).center(N_vec) + "│" + cls._print_int(x[i]).center(N_int) + "║"
if i < x.size - 1:
string += "\n╟" + "─"*N_power + "┼" + "─"*N_poly + "┼" + "─"*N_vec + "┼" + "─"*N_int + "╢"
string += "\n╚" + "═"*N_power + "╧" + "═"*N_poly + "╧"+ "═"*N_vec + "╧" + "═"*N_int + "╝"
return string
def arithmetic_table(
cls,
operation: Literal["+", "-", "*", "/"],
x: Optional["FieldArray"] = None,
y: Optional["FieldArray"] = None
) -> str:
r"""
Generates the specified arithmetic table for the Galois field.
Parameters
----------
operation : str
The arithmetic operation, either `"+"`, `"-"`, `"*"`, or `"/"`.
x : galois.FieldArray, optional
Optionally specify the :math:`x` values for the arithmetic table. The default is `None`
which represents :math:`\{0, \dots, p^m - 1\}`.
y : galois.FieldArray, optional
Optionally specify the :math:`y` values for the arithmetic table. The default is `None`
which represents :math:`\{0, \dots, p^m - 1\}` for addition, subtraction, and multiplication and
:math:`\{1, \dots, p^m - 1\}` for division.
Returns
-------
str
A UTF-8 formatted arithmetic table.
Examples
--------
.. ipython:: python
GF = galois.GF(3**2)
print(GF.arithmetic_table("+"))
.. ipython:: python
GF.display("poly");
print(GF.arithmetic_table("+"))
.. ipython:: python
GF.display("power");
print(GF.arithmetic_table("+"))
.. ipython:: python
GF.display("poly");
x = GF.Random(5); x
y = GF.Random(3); y
print(GF.arithmetic_table("+", x=x, y=y))
GF.display();
"""
if not operation in ["+", "-", "*", "/"]:
raise ValueError(f"Argument `operation` must be in ['+', '-', '*', '/'], not {operation!r}.")
if cls.display_mode == "power":
# Order elements by powers of the primitive element
x_default = np.concatenate((np.atleast_1d(cls(0)), cls.primitive_element**np.arange(0, cls.order - 1, dtype=cls.dtypes[-1])))
else:
x_default = cls.Elements()
y_default = x_default if operation != "/" else x_default[1:]
x = x_default if x is None else cls(x)
y = y_default if y is None else cls(y)
X, Y = np.meshgrid(x, y, indexing="ij")
if operation == "+":
Z = X + Y
elif operation == "-":
Z = X - Y
elif operation == "*":
Z = X * Y
else:
Z = X / Y
if cls.display_mode == "int":
print_element = cls._print_int
elif cls.display_mode == "poly":
print_element = cls._print_poly
else:
cls._set_print_power_vars(x)
print_element = cls._print_power
operation_str = f"x {operation} y"
N = max([len(print_element(e)) for e in x]) + 2
N_left = max(N, len(operation_str) + 2)
# Useful characters: https://www.utf8-chartable.de/unicode-utf8-table.pl?start=9472
string = "╔" + "═"*N_left + "╦" + ("═"*N + "╤")*(y.size - 1) + "═"*N + "╗"
string += "\n║" + operation_str.rjust(N_left - 1) + " ║"
for j in range(y.size):
string += print_element(y[j]).center(N)
string += "│" if j < y.size - 1 else "║"
string += "\n╠" + "═"*N_left + "╬" + ("═"*N + "╪")*(y.size - 1) + "═"*N + "╣"
for i in range(x.size):
string += "\n║" + print_element(x[i]).rjust(N_left - 1) + " ║"
for j in range(y.size):
string += print_element(Z[i,j]).center(N)
string += "│" if j < y.size - 1 else "║"
if i < x.size - 1:
string += "\n╟" + "─"*N_left + "╫" + ("─"*N + "┼")*(y.size - 1) + "─"*N + "╢"
string += "\n╚" + "═"*N_left + "╩" + ("═"*N + "╧")*(y.size - 1) + "═"*N + "╝"
return string
###############################################################################
# Array display methods
###############################################################################
def _formatter(cls, array):
# pylint: disable=attribute-defined-outside-init
formatter = {}
if cls.display_mode == "poly":
formatter["int"] = cls._print_poly
formatter["object"] = cls._print_poly
elif cls.display_mode == "power":
cls._set_print_power_vars(array)
formatter["int"] = cls._print_power
formatter["object"] = cls._print_power
elif array.dtype == np.object_:
formatter["object"] = cls._print_int
return formatter
def _print_int(cls, element): # pylint: disable=no-self-use
return f"{int(element)}"
def _print_poly(cls, element):
poly = integer_to_poly(element, cls.characteristic)
poly_var = "α" if cls.primitive_element == cls.characteristic else "x"
return poly_to_str(poly, poly_var=poly_var)
def _set_print_power_vars(cls, array):
nonzero_idxs = np.nonzero(array)
if array.ndim > 1:
max_power = np.max(cls._ufunc("log")(array[nonzero_idxs], cls.primitive_element))
if max_power > 1:
cls._display_power_width = 2 + len(str(max_power))
else:
cls._display_power_width = 1
else:
cls._display_power_width = None
def _print_power(cls, element):
if element == 0:
s = "0"
else:
power = cls._ufunc("log")(element, cls.primitive_element)
if power > 1:
s = f"α^{power}"
elif power == 1:
s = "α"
else:
s = "1"
if cls._display_power_width:
return s.rjust(cls._display_power_width)
else:
return s
###############################################################################
# Class attributes
###############################################################################
@property
def name(cls) -> str:
"""
str: The Galois field name.
Examples
--------
.. ipython:: python
galois.GF(2).name
galois.GF(2**8).name
galois.GF(31).name
galois.GF(7**5).name
"""
if cls._degree == 1:
return f"GF({cls._characteristic})"
else:
return f"GF({cls._characteristic}^{cls._degree})"
@property
def characteristic(cls) -> int:
r"""
int: The prime characteristic :math:`p` of the Galois field :math:`\mathrm{GF}(p^m)`. Adding
:math:`p` copies of any element will always result in :math:`0`.
Examples
--------
.. ipython:: python
GF = galois.GF(2**8, display="poly")
GF.characteristic
a = GF.Random(low=1); a
a * GF.characteristic
@suppress
GF.display();
.. ipython:: python
GF = galois.GF(31)
GF.characteristic
a = GF.Random(low=1); a
a * GF.characteristic
"""
return cls._characteristic
@property
def degree(cls) -> int:
r"""
int: The prime characteristic's degree :math:`m` of the Galois field :math:`\mathrm{GF}(p^m)`. The degree
is a positive integer.
Examples
--------
.. ipython:: python
galois.GF(2).degree
galois.GF(2**8).degree
galois.GF(31).degree
galois.GF(7**5).degree
"""
return cls._degree
@property
def order(cls) -> int:
r"""
int: The order :math:`p^m` of the Galois field :math:`\mathrm{GF}(p^m)`. The order of the field is also equal to
the field's size.
Examples
--------
.. ipython:: python
galois.GF(2).order
galois.GF(2**8).order
galois.GF(31).order
galois.GF(7**5).order
"""
return cls._order
@property
def irreducible_poly(cls) -> "Poly":
r"""
galois.Poly: The irreducible polynomial :math:`f(x)` of the Galois field :math:`\mathrm{GF}(p^m)`. The irreducible
polynomial is of degree :math:`m` over :math:`\mathrm{GF}(p)`.
Examples
--------
.. ipython:: python
galois.GF(2).irreducible_poly
galois.GF(2**8).irreducible_poly
galois.GF(31).irreducible_poly
galois.GF(7**5).irreducible_poly
"""
# Ensure accesses of this property don't alter it
return cls._irreducible_poly.copy()
@property
def is_primitive_poly(cls) -> bool:
r"""
bool: Indicates whether the :obj:`irreducible_poly` is a primitive polynomial. If so, :math:`x` is a primitive element
of the Galois field.
Examples
--------
.. ipython:: python
GF = galois.GF(2**8, display="poly")
GF.irreducible_poly
GF.primitive_element
# The irreducible polynomial is a primitive polynomial if the primitive element is a root
GF.irreducible_poly(GF.primitive_element, field=GF)
GF.is_primitive_poly
@suppress
GF.display();
Here is an example using the :math:`\mathrm{GF}(2^8)` field from AES, which does not use a primitive polynomial.
.. ipython:: python
GF = galois.GF(2**8, irreducible_poly=galois.Poly.Degrees([8,4,3,1,0]), display="poly")
GF.irreducible_poly
GF.primitive_element
# The irreducible polynomial is a primitive polynomial if the primitive element is a root
GF.irreducible_poly(GF.primitive_element, field=GF)
GF.is_primitive_poly
@suppress
GF.display();
"""
return cls._is_primitive_poly
@property
def primitive_element(cls) -> "FieldArray":
r"""
galois.FieldArray: A primitive element :math:`\alpha` of the Galois field :math:`\mathrm{GF}(p^m)`. A primitive element is a multiplicative
generator of the field, such that :math:`\mathrm{GF}(p^m) = \{0, 1, \alpha, \alpha^2, \dots, \alpha^{p^m - 2}\}`.
A primitive element is a root of the primitive polynomial :math:`f(x)`, such that :math:`f(\alpha) = 0` over
:math:`\mathrm{GF}(p^m)`.
Examples
--------
.. ipython:: python
galois.GF(2).primitive_element
galois.GF(2**8).primitive_element
galois.GF(31).primitive_element
galois.GF(7**5).primitive_element
"""
# Ensure accesses of this property doesn't alter it
return cls(cls._primitive_element) # pylint: disable=no-value-for-parameter
@property
def primitive_elements(cls) -> "FieldArray":
r"""
galois.FieldArray: All primitive elements :math:`\alpha` of the Galois field :math:`\mathrm{GF}(p^m)`. A primitive element is a multiplicative
generator of the field, such that :math:`\mathrm{GF}(p^m) = \{0, 1, \alpha, \alpha^2, \dots, \alpha^{p^m - 2}\}`.
Examples
--------
.. ipython:: python
galois.GF(2).primitive_elements
galois.GF(2**8).primitive_elements
galois.GF(31).primitive_elements
galois.GF(7**5).primitive_elements
"""
n = cls.order - 1
totatives = [t for t in range(1, n + 1) if math.gcd(n, t) == 1]
powers = np.array(totatives)
return np.sort(cls.primitive_element ** powers)
@property
def quadratic_residues(cls) -> "FieldArray":
r"""
galois.FieldArray: All quadratic residues in the Galois field.
An element :math:`x` in :math:`\mathrm{GF}(p^m)` is a *quadratic residue* if there exists a :math:`y` such that
:math:`y^2 = x` in the field.
In fields with characteristic 2, every element is a quadratic residue. In fields with characteristic greater than 2,
exactly half of the nonzero elements are quadratic residues (and they have two unique square roots).
See also :func:`FieldArray.is_quadratic_residue`.
Examples
--------
.. ipython:: python
GF = galois.GF(11)
x = GF.quadratic_residues; x
r = np.sqrt(x)
r, -r
r**2
(-r)**2
.. ipython:: python
GF = galois.GF(2**4)
x = GF.quadratic_residues; x
r = np.sqrt(x)
r, -r
r**2
(-r)**2
"""
x = cls.Elements()
is_quadratic_residue = x.is_quadratic_residue()
return x[is_quadratic_residue]
@property
def quadratic_non_residues(cls) -> "FieldArray":
r"""
galois.FieldArray: All quadratic non-residues in the Galois field.
An element :math:`x` in :math:`\mathrm{GF}(p^m)` is a *quadratic non-residue* if there does not exist a :math:`y` such that
:math:`y^2 = x` in the field.
In fields with characteristic 2, no elements are quadratic non-residues. In fields with characteristic greater than 2,
exactly half of the nonzero elements are quadratic non-residues.
See also :func:`FieldArray.is_quadratic_residue`.
Examples
--------
.. ipython:: python
GF = galois.GF(11)
GF.quadratic_non_residues
.. ipython:: python
GF = galois.GF(2**4)
GF.quadratic_non_residues
"""
x = cls.Elements()
is_quadratic_residue = x.is_quadratic_residue()
return x[~is_quadratic_residue]
@property
def is_prime_field(cls) -> bool:
"""
bool: Indicates if the field's order is prime.
Examples
--------
.. ipython:: python
galois.GF(2).is_prime_field
galois.GF(2**8).is_prime_field
galois.GF(31).is_prime_field
galois.GF(7**5).is_prime_field
"""
return cls._degree == 1
@property
def is_extension_field(cls) -> bool:
"""
bool: Indicates if the field's order is a prime power.
Examples
--------
.. ipython:: python
galois.GF(2).is_extension_field
galois.GF(2**8).is_extension_field
galois.GF(31).is_extension_field
galois.GF(7**5).is_extension_field
"""
return cls._degree > 1
@property
def prime_subfield(cls) -> "FieldClass":
r"""
galois.FieldClass: The prime subfield :math:`\mathrm{GF}(p)` of the extension field :math:`\mathrm{GF}(p^m)`.
Examples
--------
.. ipython:: python
print(galois.GF(2).prime_subfield.properties)
print(galois.GF(2**8).prime_subfield.properties)
print(galois.GF(31).prime_subfield.properties)
print(galois.GF(7**5).prime_subfield.properties)
"""
return cls._prime_subfield
@property
def dtypes(cls) -> List[np.dtype]:
"""
list: List of valid integer :obj:`numpy.dtype` values that are compatible with this Galois field. Creating an array with an
unsupported dtype will throw a `TypeError` exception.
Examples
--------
.. ipython:: python
GF = galois.GF(2); GF.dtypes
GF = galois.GF(2**8); GF.dtypes
GF = galois.GF(31); GF.dtypes
GF = galois.GF(7**5); GF.dtypes
For Galois fields that cannot be represented by :obj:`numpy.int64`, the only valid dtype is :obj:`numpy.object_`.
.. ipython:: python
GF = galois.GF(2**100); GF.dtypes
GF = galois.GF(36893488147419103183); GF.dtypes
"""
return cls._dtypes
@property
def display_mode(cls) -> str:
r"""
str: The representation of Galois field elements, either `"int"`, `"poly"`, or `"power"`. This can be
changed with :func:`display`.
Examples
--------
For the polynomial representation, when the primitive element is :math:`\alpha = x` in :math:`\mathrm{GF}(p)[x]` the polynomial
indeterminate used is :math:`\alpha`.
.. ipython:: python
GF = galois.GF(2**8)
print(GF.properties)
a = GF.Random()
print(GF.display_mode, a)
with GF.display("poly"):
print(GF.display_mode, a)
with GF.display("power"):
print(GF.display_mode, a)
# The display mode is reset after exiting the context manager
print(GF.display_mode, a)
But when the primitive element is :math:`\alpha \ne x` in :math:`\mathrm{GF}(p)[x]`, the polynomial
indeterminate used is :math:`x`.
.. ipython:: python
GF = galois.GF(2**8, irreducible_poly=galois.Poly.Degrees([8,4,3,1,0]))
print(GF.properties)
a = GF.Random()
print(GF.display_mode, a)
with GF.display("poly"):
print(GF.display_mode, a)
with GF.display("power"):
print(GF.display_mode, a)
# The display mode is reset after exiting the context manager
print(GF.display_mode, a)
The power representation displays elements as powers of :math:`\alpha` the primitive element, see
:obj:`FieldClass.primitive_element`.
.. ipython:: python
with GF.display("power"):
print(GF.display_mode, a)
# The display mode is reset after exiting the context manager
print(GF.display_mode, a)
"""
return cls._display_mode
@property
def ufunc_mode(cls) -> str:
"""
str: The mode for ufunc compilation, either `"jit-lookup"`, `"jit-calculate"`, or `"python-calculate"`.
Examples
--------
.. ipython:: python
galois.GF(2).ufunc_mode
galois.GF(2**8).ufunc_mode
galois.GF(31).ufunc_mode
galois.GF(7**5).ufunc_mode
"""
return cls._ufunc_mode
@property
def ufunc_modes(cls) -> List[str]:
"""
list: All supported ufunc modes for this Galois field array class.
Examples
--------
.. ipython:: python
galois.GF(2).ufunc_modes
galois.GF(2**8).ufunc_modes
galois.GF(31).ufunc_modes
galois.GF(2**100).ufunc_modes
"""
if cls.dtypes == [np.object_]:
return ["python-calculate"]
else:
return ["jit-lookup", "jit-calculate"]
@property
def default_ufunc_mode(cls) -> str:
"""
str: The default ufunc arithmetic mode for this Galois field.
Examples
--------
.. ipython:: python
galois.GF(2).default_ufunc_mode
galois.GF(2**8).default_ufunc_mode
galois.GF(31).default_ufunc_mode
galois.GF(2**100).default_ufunc_mode
"""
if cls.dtypes == [np.object_]:
return "python-calculate"
elif cls.order <= 2**20:
return "jit-lookup"
else:
return "jit-calculate"
@property
def properties(cls) -> str:
"""
str: A formatted string displaying relevant properties of the Galois field.
Examples
--------
.. ipython:: python
GF = galois.GF(2); print(GF.properties)
GF = galois.GF(2**8); print(GF.properties)
GF = galois.GF(31); print(GF.properties)
GF = galois.GF(7**5); print(GF.properties)
"""
string = f"{cls.name}:"
string += f"\n characteristic: {cls.characteristic}"
string += f"\n degree: {cls.degree}"
string += f"\n order: {cls.order}"
string += f"\n irreducible_poly: {cls.irreducible_poly.string}"
string += f"\n is_primitive_poly: {cls.is_primitive_poly}"
string += f"\n primitive_element: {poly_to_str(integer_to_poly(cls.primitive_element, cls.characteristic))}"
return string
class DirMeta(type):
"""
A mixin metaclass that overrides __dir__ so that dir() and tab-completion in ipython of `FieldArray` classes
(which are `FieldClass` instances) include the methods and properties from the metaclass. Python does not
natively include metaclass properties in dir().
This is a separate class because it will be mixed in to `GF2Meta`, `GF2mMeta`, `GFpMeta`, and `GFpmMeta` separately. Otherwise, the
sphinx documentation of `FieldArray` gets messed up.
Also, to not mess up the sphinx documentation of `GF2`, we had to create a custom sphinx template `class_gf2.rst` that
manually includes all the classmethods and methods. This is because there is no way to redefine __dir__ for `GF2` and not have
sphinx get confused when using autoclass.
"""
def __dir__(cls):
if isinstance(cls, FieldClass):
meta_dir = dir(type(cls))
classmethods = [attribute for attribute in super().__dir__() if attribute[0] != "_" and inspect.ismethod(getattr(cls, attribute))]
return sorted(meta_dir + classmethods)
else:
return super().__dir__()
class DisplayContext:
"""
Simple context manager for the :obj:`FieldClass.display` method.
"""
def __init__(self, cls):
# Save the previous state
self.cls = cls
self.mode = cls.display_mode
def __enter__(self):
# Don't need to do anything, we already set the new mode in the display() method
pass
def __exit__(self, exc_type, exc_value, traceback):
# Reset mode and upon exiting the context
self.cls._display_mode = self.mode
###############################################################################
# NumPy arrays over Galois fields
###############################################################################
@set_module("galois")
class FieldArray(np.ndarray, metaclass=FieldClass):
r"""
An array over :math:`\mathrm{GF}(p^m)`.
Important
---------
:obj:`galois.FieldArray` is an abstract base class for all Galois field array classes and cannot be instantiated
directly. Instead, :obj:`galois.FieldArray` subclasses are created using the class factory :func:`galois.GF`.
This class is included in the API to allow the user to test if an array is a Galois field array subclass.
.. ipython:: python
GF = galois.GF(7)
issubclass(GF, galois.FieldArray)
x = GF([1,2,3]); x
isinstance(x, galois.FieldArray)
Notes
-----
:obj:`galois.FieldArray` is an abstract base class and cannot be instantiated directly. Instead, the user creates a :obj:`galois.FieldArray`
subclass for the field :math:`\mathrm{GF}(p^m)` by calling the class factory :func:`galois.GF`, e.g. `GF = galois.GF(p**m)`. In this case,
`GF` is a subclass of :obj:`galois.FieldArray` and an instance of :obj:`galois.FieldClass`, a metaclass that defines special methods and attributes
related to the Galois field.
:obj:`galois.FieldArray`, and `GF`, is a subclass of :obj:`numpy.ndarray` and its constructor `x = GF(array_like)` has the same syntax as
:func:`numpy.array`. The returned :obj:`galois.FieldArray` instance `x` is a :obj:`numpy.ndarray` that is acted upon like any other
numpy array, except all arithmetic is performed in :math:`\mathrm{GF}(p^m)` not in :math:`\mathbb{Z}` or :math:`\mathbb{R}`.
Examples
--------
Construct the Galois field class for :math:`\mathrm{GF}(2^8)` using the class factory :func:`galois.GF` and then display
some relevant properties of the field. See :obj:`galois.FieldClass` for a complete list of Galois field array class
methods and attributes.
.. ipython:: python
GF256 = galois.GF(2**8)
GF256
print(GF256.properties)
Depending on the field's order, only certain numpy dtypes are supported. See :obj:`galois.FieldClass.dtypes` for more details.
.. ipython:: python
GF256.dtypes
Galois field arrays can be created from existing numpy arrays.
.. ipython:: python
x = np.array([155, 232, 162, 159, 63, 29, 247, 141, 75, 189], dtype=int)
# Explicit Galois field array creation -- a copy is performed
GF256(x)
# Or view an existing numpy array as a Galois field array -- no copy is performed
x.view(GF256)
Galois field arrays can also be created explicitly by converting an "array-like" object.
.. ipython:: python
# A scalar GF(2^8) element from its integer representation
GF256(37)
# A scalar GF(2^8) element from its polynomial representation
GF256("x^5 + x^2 + 1")
# A GF(2^8) array from a list of elements in their integer representation
GF256([[142, 27], [92, 253]])
# A GF(2^8) array from a list of elements in their integer and polynomial representations
GF256([[142, "x^5 + x^2 + 1"], [92, 253]])
There's also an alternate constructor :func:`Vector` (and accompanying :func:`vector` method) to convert an array of coefficients
over :math:`\mathrm{GF}(p)` with last dimension :math:`m` into Galois field elements in :math:`\mathrm{GF}(p^m)`.
.. ipython:: python
# A scalar GF(2^8) element from its vector representation
GF256.Vector([0, 0, 1, 0, 0, 1, 0, 1])
# A GF(2^8) array from a list of elements in their vector representation
GF256.Vector([[[1, 0, 0, 0, 1, 1, 1, 0], [0, 0, 0, 1, 1, 0, 1, 1]], [[0, 1, 0, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 0, 1]]])
Newly-created arrays will use the smallest unsigned dtype, unless otherwise specified.
.. ipython:: python
a = GF256([66, 166, 27, 182, 125]); a
a.dtype
b = GF256([66, 166, 27, 182, 125], dtype=np.int64); b
b.dtype
"""
# pylint: disable=unsupported-membership-test,not-an-iterable,too-many-public-methods
def __new__(
cls,
array: Union[int, str, Iterable, np.ndarray, "FieldArray"],
dtype: Optional[Union[np.dtype, int, object]] = None,
copy: bool = True,
order: Literal["K", "A", "C", "F"] = "K",
ndmin: int = 0
) -> "FieldArray":
if cls is FieldArray:
raise NotImplementedError("FieldArray is an abstract base class that cannot be directly instantiated. Instead, create a FieldArray subclass for GF(p^m) arithmetic using `GF = galois.GF(p**m)` and instantiate an array using `x = GF(array_like)`.")
return cls._array(array, dtype=dtype, copy=copy, order=order, ndmin=ndmin)
def __init__(
self,
array: Union[int, str, Iterable, np.ndarray, "FieldArray"],
dtype: Optional[Union[np.dtype, int, object]] = None,
copy: bool = True,
order: Literal["K", "A", "C", "F"] = "K",
ndmin: int = 0
):
r"""
Creates an array over :math:`\mathrm{GF}(p^m)`.
Parameters
----------
array : int, str, tuple, list, numpy.ndarray, galois.FieldArray
The input array-like object to be converted to a Galois field array. See the examples section for demonstations of array creation
using each input type. See see :func:`galois.FieldClass.display` and :obj:`galois.FieldClass.display_mode` for a description of the
"integer" and "polynomial" representation of Galois field elements.
* :obj:`int`: A single integer, which is the "integer representation" of a Galois field element, creates a 0-D array.
* :obj:`str`: A single string, which is the "polynomial representation" of a Galois field element, creates a 0-D array.
* :obj:`tuple`, :obj:`list`: A list or tuple (or nested lists/tuples) of ints or strings (which can be mix-and-matched) creates an array of
Galois field elements from their integer or polynomial representations.
* :obj:`numpy.ndarray`, :obj:`galois.FieldArray`: An array of ints creates a copy of the array over this specific field.
dtype : numpy.dtype, optional
The :obj:`numpy.dtype` of the array elements. The default is `None` which represents the smallest unsigned
dtype for this class, i.e. the first element in :obj:`galois.FieldClass.dtypes`.
copy : bool, optional
The `copy` keyword argument from :func:`numpy.array`. The default is `True` which makes a copy of the input array.
order : str, optional
The `order` keyword argument from :func:`numpy.array`. Valid values are `"K"` (default), `"A"`, `"C"`, or `"F"`.
ndmin : int, optional
The `ndmin` keyword argument from :func:`numpy.array`. The minimum number of dimensions of the output.
The default is 0.
Returns
-------
galois.FieldArray
An array over :math:`\mathrm{GF}(p^m)`.
"""
# pylint: disable=unused-argument,super-init-not-called
# Adding __init__ and not doing anything is done to overwrite the superclass's __init__ docstring
return
@classmethod
def _get_dtype(cls, dtype):
if dtype is None:
return cls.dtypes[0]
# Convert "dtype" to a numpy dtype. This does platform specific conversion, if necessary.
# For example, np.dtype(int) == np.int64 (on some systems).
dtype = np.dtype(dtype)
if dtype not in cls.dtypes:
raise TypeError(f"{cls.name} arrays only support dtypes {[np.dtype(d).name for d in cls.dtypes]}, not {dtype.name!r}.")
return dtype
@classmethod
def _array(cls, array_like, dtype=None, copy=True, order="K", ndmin=0):
dtype = cls._get_dtype(dtype)
array_like = cls._check_array_like_object(array_like)
array = np.array(array_like, dtype=dtype, copy=copy, order=order, ndmin=ndmin)
return array.view(cls)
@classmethod
def _check_array_like_object(cls, array_like):
if isinstance(array_like, cls):
# If this was a previously-created and vetted array, there's no need to reverify
return array_like
if isinstance(array_like, str):
# Convert the string to an integer and verify it's in range
array_like = cls._check_string_value(array_like)
cls._check_array_values(array_like)
elif isinstance(array_like, (int, np.integer)):
# Just check that the single int is in range
cls._check_array_values(array_like)
elif isinstance(array_like, (list, tuple)):
# Recursively check the items in the iterable to ensure they're of the correct type
# and that their values are in range
array_like = cls._check_iterable_types_and_values(array_like)
elif isinstance(array_like, np.ndarray):
# If this a NumPy array, but not a FieldArray, verify the array
if array_like.dtype == np.object_:
array_like = cls._check_array_types_dtype_object(array_like)
elif not np.issubdtype(array_like.dtype, np.integer):
raise TypeError(f"{cls.name} arrays must have integer dtypes, not {array_like.dtype}.")
cls._check_array_values(array_like)
else:
raise TypeError(f"{cls.name} arrays can be created with scalars of type int, not {type(array_like)}.")
return array_like
@classmethod
def _check_iterable_types_and_values(cls, iterable):
new_iterable = []
for item in iterable:
if isinstance(item, (list, tuple)):
item = cls._check_iterable_types_and_values(item)
new_iterable.append(item)
continue
if isinstance(item, str):
item = cls._check_string_value(item)
elif not isinstance(item, (int, np.integer, FieldArray)):
raise TypeError(f"When {cls.name} arrays are created/assigned with an iterable, each element must be an integer. Found type {type(item)}.")
cls._check_array_values(item)
# if not 0 <= item < cls.order:
# raise ValueError(f"{cls.name} arrays must have elements in 0 <= x < {cls.order}, not {item}.")
# Ensure the type is int so dtype=object classes don't get all mixed up
new_iterable.append(int(item))
return new_iterable
@classmethod
def _check_array_types_dtype_object(cls, array):
if array.size == 0:
return array
if array.ndim == 0:
if not isinstance(array[()], (int, np.integer, FieldArray)):
raise TypeError(f"When {cls.name} arrays are created/assigned with a numpy array with `dtype=object`, each element must be an integer. Found type {type(array[()])}.")
return int(array)
iterator = np.nditer(array, flags=["multi_index", "refs_ok"])
for _ in iterator:
a = array[iterator.multi_index]
if not isinstance(a, (int, np.integer, FieldArray)):
raise TypeError(f"When {cls.name} arrays are created/assigned with a numpy array with `dtype=object`, each element must be an integer. Found type {type(a)}.")
# Ensure the type is int so dtype=object classes don't get all mixed up
array[iterator.multi_index] = int(a)
return array
@classmethod
def _check_array_values(cls, array):
if not isinstance(array, np.ndarray):
# Convert single integer to array so next step doesn't fail
array = np.array(array)
# Check the value of the "field elements" and make sure they are valid
if np.any(array < 0) or np.any(array >= cls.order):
idxs = np.logical_or(array < 0, array >= cls.order)
values = array if array.ndim == 0 else array[idxs]
raise ValueError(f"{cls.name} arrays must have elements in `0 <= x < {cls.order}`, not {values}.")
@classmethod
def _check_string_value(cls, string):
return str_to_integer(string, cls.prime_subfield)
###############################################################################
# Alternate constructors
###############################################################################
@classmethod
def Zeros(
cls,
shape: Union[int, Sequence[int]],
dtype: Optional[Union[np.dtype, int, object]] = None
) -> "FieldArray":
"""
Creates a Galois field array with all zeros.
Parameters
----------
shape : int, tuple
A numpy-compliant `shape` tuple, see :obj:`numpy.ndarray.shape`. An empty tuple `()` represents a scalar.
A single integer or 1-tuple, e.g. `N` or `(N,)`, represents the size of a 1-D array. A 2-tuple, e.g.
`(M,N)`, represents a 2-D array with each element indicating the size in each dimension.
dtype : numpy.dtype, optional
The :obj:`numpy.dtype` of the array elements. The default is `None` which represents the smallest unsigned
dtype for this class, i.e. the first element in :obj:`galois.FieldClass.dtypes`.
Returns
-------
galois.FieldArray
A Galois field array of zeros.
Examples
--------
.. ipython:: python
GF = galois.GF(31)
GF.Zeros((2,5))
"""
dtype = cls._get_dtype(dtype)
array = np.zeros(shape, dtype=dtype)
return array.view(cls)
@classmethod
def Ones(
cls,
shape: Union[int, Sequence[int]],
dtype: Optional[Union[np.dtype, int, object]] = None
) -> "FieldArray":
"""
Creates a Galois field array with all ones.
Parameters
----------
shape : int, tuple
A numpy-compliant `shape` tuple, see :obj:`numpy.ndarray.shape`. An empty tuple `()` represents a scalar.
A single integer or 1-tuple, e.g. `N` or `(N,)`, represents the size of a 1-D array. A 2-tuple, e.g.
`(M,N)`, represents a 2-D array with each element indicating the size in each dimension.
dtype : numpy.dtype, optional
The :obj:`numpy.dtype` of the array elements. The default is `None` which represents the smallest unsigned
dtype for this class, i.e. the first element in :obj:`galois.FieldClass.dtypes`.
Returns
-------
galois.FieldArray
A Galois field array of ones.
Examples
--------
.. ipython:: python
GF = galois.GF(31)
GF.Ones((2,5))
"""
dtype = cls._get_dtype(dtype)
array = np.ones(shape, dtype=dtype)
return array.view(cls)
@classmethod
def Range(
cls,
start: int,
stop: int,
step: Optional[int] = 1,
dtype: Optional[Union[np.dtype, int, object]] = None
) -> "FieldArray":
"""
Creates a 1-D Galois field array with a range of field elements.
Parameters
----------
start : int
The starting Galois field value (inclusive) in its integer representation.
stop : int
The stopping Galois field value (exclusive) in its integer representation.
step : int, optional
The space between values. The default is 1.
dtype : numpy.dtype, optional
The :obj:`numpy.dtype` of the array elements. The default is `None` which represents the smallest unsigned
dtype for this class, i.e. the first element in :obj:`galois.FieldClass.dtypes`.
Returns
-------
galois.FieldArray
A 1-D Galois field array of a range of field elements.
Examples
--------
.. ipython:: python
GF = galois.GF(31)
GF.Range(10,20)
"""
if not stop <= cls.order:
raise ValueError(f"The stopping value must be less than the field order of {cls.order}, not {stop}.")
dtype = cls._get_dtype(dtype)
array = np.arange(start, stop, step=step, dtype=dtype)
return array.view(cls)
@classmethod
def Random(
cls,
shape: Union[int, Sequence[int]] = (),
low: Optional[int] = 0,
high: Optional[int] = None,
seed: Optional[Union[int, np.random.Generator]] = None,
dtype: Optional[Union[np.dtype, int, object]] = None
) -> "FieldArray":
"""
Creates a Galois field array with random field elements.
Parameters
----------
shape : int, tuple
A numpy-compliant `shape` tuple, see :obj:`numpy.ndarray.shape`. An empty tuple `()` represents a scalar.
A single integer or 1-tuple, e.g. `N` or `(N,)`, represents the size of a 1-D array. A 2-tuple, e.g.
`(M,N)`, represents a 2-D array with each element indicating the size in each dimension.
low : int, optional
The lowest value (inclusive) of a random field element in its integer representation. The default is 0.
high : int, optional
The highest value (exclusive) of a random field element in its integer representation. The default is `None`
which represents the field's order :math:`p^m`.
seed: int, numpy.random.Generator, optional
Non-negative integer used to initialize the PRNG. The default is `None` which means that unpredictable
entropy will be pulled from the OS to be used as the seed. A :obj:`numpy.random.Generator` can also be passed. If so,
it is used directly when `dtype != np.object_`. Its state is used to seed `random.seed()`, otherwise.
dtype : numpy.dtype, optional
The :obj:`numpy.dtype` of the array elements. The default is `None` which represents the smallest unsigned
dtype for this class, i.e. the first element in :obj:`galois.FieldClass.dtypes`.
Returns
-------
galois.FieldArray
A Galois field array of random field elements.
Examples
--------
Generate a random matrix with an unpredictable seed.
.. ipython:: python
GF = galois.GF(31)
GF.Random((2,5))
Generate a random array with a specified seed. This produces repeatable outputs.
.. ipython:: python
GF.Random(10, seed=123456789)
GF.Random(10, seed=123456789)
Generate a group of random arrays with one global seed.
.. ipython:: python
rng = np.random.default_rng(123456789)
GF.Random(10, seed=rng)
GF.Random(10, seed=rng)
"""
dtype = cls._get_dtype(dtype)
high = cls.order if high is None else high
if not 0 <= low < high <= cls.order:
raise ValueError(f"Arguments must satisfy `0 <= low < high <= order`, not `0 <= {low} < {high} <= {cls.order}`.")
if seed is not None:
if not isinstance(seed, (int, np.integer, np.random.Generator)):
raise ValueError("Seed must be an integer, a numpy.random.Generator or None.")
if isinstance(seed, (int, np.integer)) and seed < 0:
raise ValueError("Seed must be non-negative.")
if dtype != np.object_:
rng = np.random.default_rng(seed)
array = rng.integers(low, high, shape, dtype=dtype)
else:
array = np.empty(shape, dtype=dtype)
iterator = np.nditer(array, flags=["multi_index", "refs_ok"])
_seed = None
if seed is not None:
if isinstance(seed, np.integer):
# np.integers not supported by random and seeding based on hashing deprecated since Python 3.9
_seed = seed.item()
elif isinstance(seed, np.random.Generator):
_seed = seed.bit_generator.state['state']['state']
seed.bit_generator.advance(1)
else: # int
_seed = seed
random.seed(_seed)
for _ in iterator:
array[iterator.multi_index] = random.randint(low, high - 1)
return array.view(cls)
@classmethod
def Elements(
cls,
dtype: Optional[Union[np.dtype, int, object]] = None
) -> "FieldArray":
r"""
Creates a 1-D Galois field array of the field's elements :math:`\{0, \dots, p^m-1\}`.
Parameters
----------
dtype : numpy.dtype, optional
The :obj:`numpy.dtype` of the array elements. The default is `None` which represents the smallest unsigned
dtype for this class, i.e. the first element in :obj:`galois.FieldClass.dtypes`.
Returns
-------
galois.FieldArray
A 1-D Galois field array of all the field's elements.
Examples
--------
.. ipython:: python
GF = galois.GF(2**4)
GF.Elements()
As usual, Galois field elements can be displayed in either the "integer" (default), "polynomial", or "power" representation.
This can be changed by calling :func:`galois.FieldClass.display`.
.. ipython:: python
# Permanently set the display mode to "poly"
GF.display("poly");
GF.Elements()
# Temporarily set the display mode to "power"
with GF.display("power"):
print(GF.Elements())
# Reset the display mode to "int"
GF.display();
"""
return cls.Range(0, cls.order, step=1, dtype=dtype)
@classmethod
def Identity(
cls,
size: int,
dtype: Optional[Union[np.dtype, int, object]] = None
) -> "FieldArray":
r"""
Creates an :math:`n \times n` Galois field identity matrix.
Parameters
----------
size : int
The size :math:`n` along one axis of the matrix. The resulting array has shape `(size, size)`.
dtype : numpy.dtype, optional
The :obj:`numpy.dtype` of the array elements. The default is `None` which represents the smallest unsigned
dtype for this class, i.e. the first element in :obj:`galois.FieldClass.dtypes`.
Returns
-------
galois.FieldArray
A Galois field identity matrix of shape `(size, size)`.
Examples
--------
.. ipython:: python
GF = galois.GF(31)
GF.Identity(4)
"""
dtype = cls._get_dtype(dtype)
array = np.identity(size, dtype=dtype)
return array.view(cls)
@classmethod
def Vandermonde(
cls,
a: Union[int, "FieldArray"],
m: int,
n: int,
dtype: Optional[Union[np.dtype, int, object]] = None
) -> "FieldArray":
r"""
Creates an :math:`m \times n` Vandermonde matrix of :math:`a \in \mathrm{GF}(p^m)`.
Parameters
----------
a : int, galois.FieldArray
An element of :math:`\mathrm{GF}(p^m)`.
m : int
The number of rows in the Vandermonde matrix.
n : int
The number of columns in the Vandermonde matrix.
dtype : numpy.dtype, optional
The :obj:`numpy.dtype` of the array elements. The default is `None` which represents the smallest unsigned
dtype for this class, i.e. the first element in :obj:`galois.FieldClass.dtypes`.
Returns
-------
galois.FieldArray
The :math:`m \times n` Vandermonde matrix.
Examples
--------
.. ipython:: python
GF = galois.GF(2**3)
a = GF.primitive_element
V = GF.Vandermonde(a, 7, 7)
with GF.display("power"):
print(V)
"""
if not isinstance(a, (int, np.integer, cls)):
raise TypeError(f"Argument `a` must be an integer or element of {cls.name}, not {type(a)}.")
if not isinstance(m, (int, np.integer)):
raise TypeError(f"Argument `m` must be an integer, not {type(m)}.")
if not isinstance(n, (int, np.integer)):
raise TypeError(f"Argument `n` must be an integer, not {type(n)}.")
if not m > 0:
raise ValueError(f"Argument `m` must be non-negative, not {m}.")
if not n > 0:
raise ValueError(f"Argument `n` must be non-negative, not {n}.")
dtype = cls._get_dtype(dtype)
a = cls(a, dtype=dtype)
if not a.ndim == 0:
raise ValueError(f"Argument `a` must be a scalar, not {a.ndim}-D.")
v = a ** np.arange(0, m)
V = np.power.outer(v, np.arange(0, n))
return V
@classmethod
def Vector(
cls,
array: Union[Iterable, np.ndarray, "FieldArray"],
dtype: Optional[Union[np.dtype, int, object]] = None
) -> "FieldArray":
r"""
Creates a Galois field array over :math:`\mathrm{GF}(p^m)` from length-:math:`m` vectors over the prime subfield :math:`\mathrm{GF}(p)`.
This function is the inverse operation of the :func:`vector` method.
Parameters
----------
array : array_like
The input array with field elements in :math:`\mathrm{GF}(p)` to be converted to a Galois field array in :math:`\mathrm{GF}(p^m)`.
The last dimension of the input array must be :math:`m`. An input array with shape `(n1, n2, m)` has output shape `(n1, n2)`. By convention,
the vectors are ordered from highest degree to 0-th degree.
dtype : numpy.dtype, optional
The :obj:`numpy.dtype` of the array elements. The default is `None` which represents the smallest unsigned
dtype for this class, i.e. the first element in :obj:`galois.FieldClass.dtypes`.
Returns
-------
galois.FieldArray
A Galois field array over :math:`\mathrm{GF}(p^m)`.
Examples
--------
.. ipython:: python
GF = galois.GF(2**6)
vec = galois.GF2.Random((3,6)); vec
a = GF.Vector(vec); a
with GF.display("poly"):
print(a)
a.vector()
"""
order = cls.prime_subfield.order
degree = cls.degree
array = cls.prime_subfield(array).view(np.ndarray).astype(cls.dtypes[-1]) # Use the largest dtype so computation doesn't overflow
if not array.shape[-1] == degree:
raise ValueError(f"The last dimension of `array` must be the field extension dimension {cls.degree}, not {array.shape[-1]}.")
degrees = np.arange(degree - 1, -1, -1, dtype=cls.dtypes[-1])
array = np.sum(array * order**degrees, axis=-1)
return cls(array, dtype=dtype)
###############################################################################
# Instance methods
###############################################################################
def additive_order(self) -> Union[np.integer, np.ndarray]:
r"""
Computes the additive order of each element in :math:`x`.
Returns
-------
numpy.integer, numpy.ndarray
An integer array of the additive order of each element in :math:`x`. The return value is a single integer if the
input array :math:`x` is a scalar.
Notes
-----
The additive order :math:`a` of :math:`x` in :math:`\mathrm{GF}(p^m)` is the smallest integer :math:`a`
such that :math:`x a = 0`. With the exception of :math:`0`, the additive order of every element is
the finite field's characteristic.
Examples
--------
Below is the additive order of each element of :math:`\mathrm{GF}(2^4)`.
.. ipython:: python
GF = galois.GF(2**4)
x = GF.Elements(); x
order = x.additive_order(); order
x*order
"""
x = self
field = type(self)
if x.ndim == 0:
order = np.int64(1) if x == 0 else np.int64(field.characteristic)
else:
order = field.characteristic * np.ones(x.shape, dtype=np.int64)
order[np.where(x == 0)] = 1
return order
def multiplicative_order(self) -> Union[np.integer, np.ndarray]:
r"""
Computes the multiplicative order :math:`\textrm{ord}(x)` of each element in :math:`x`.
Returns
-------
numpy.integer, numpy.ndarray
An integer array of the multiplicative order of each element in :math:`x`. The return value is a single integer if the
input array :math:`x` is a scalar.
Notes
-----
The multiplicative order :math:`\textrm{ord}(x) = a` of :math:`x` in :math:`\mathrm{GF}(p^m)` is the smallest power :math:`a`
such that :math:`x^a = 1`. If :math:`a = p^m - 1`, :math:`a` is said to be a generator of the multiplicative group
:math:`\mathrm{GF}(p^m)^\times`.
The multiplicative order of :math:`0` is not defined and will raise an :obj:`ArithmeticError`.
:func:`FieldArray.multiplicative_order` should not be confused with :obj:`FieldClass.order`. The former is a method on a
Galois field array that returns the multiplicative order of elements. The latter is a property of the field, namely
the finite field's order or size.
Examples
--------
Below is the multiplicative order of each non-zero element of :math:`\mathrm{GF}(2^4)`. The elements with
:math:`\textrm{ord}(x) = 15` are multiplicative generators of :math:`\mathrm{GF}(2^4)^\times`
.. ipython:: python
GF = galois.GF(2**4)
# The multiplicative order of 0 is not defined
x = GF.Range(1, GF.order); x
order = x.multiplicative_order(); order
# Elements with order of 15 are the primitive elements (generators) of the field
GF.primitive_elements
x**order
"""
if not np.count_nonzero(self) == self.size:
raise ArithmeticError("The multiplicative order of 0 is not defined.")
x = self
field = type(self)
if field.ufunc_mode == "jit-lookup":
# This algorithm is faster if np.log() has a lookup table
# β = α^k
# ord(α) = p^m - 1
# ord(β) = (p^m - 1) / gcd(p^m - 1, k)
k = np.log(x) # x as an exponent of α
order = (field.order - 1) // np.gcd(field.order - 1, k)
else:
d = np.array(divisors(field.order - 1)) # Divisors d such that d | p^m - 1
y = np.power.outer(x, d) # x^d -- the first divisor d for which x^d == 1 is the order of x
idxs = np.argmin(np.abs(y.view(np.ndarray) - 1), axis=-1) # First index of divisors, which is the order of x
order = d[idxs] # The order of each element of x
return order
def is_quadratic_residue(self) -> Union[np.bool_, np.ndarray]:
r"""
Determines if the elements of :math:`x` are quadratic residues in the Galois field.
Returns
-------
numpy.bool_, numpy.ndarray
An boolean array indicating if each element in :math:`x` is a quadratic residue. The return value is a single boolean if the
input array :math:`x` is a scalar.
Notes
-----
An element :math:`x` in :math:`\mathrm{GF}(p^m)` is a *quadratic residue* if there exists a :math:`y` such that
:math:`y^2 = x` in the field.
In fields with characteristic 2, every element is a quadratic residue. In fields with characteristic greater than 2,
exactly half of the nonzero elements are quadratic residues (and they have two unique square roots).
References
----------
* Section 3.5.1 from https://cacr.uwaterloo.ca/hac/about/chap3.pdf.
Examples
--------
.. ipython:: python
GF = galois.GF(11)
x = GF.Elements(); x
x.is_quadratic_residue()
.. ipython:: python
GF = galois.GF(2**4)
x = GF.Elements(); x
x.is_quadratic_residue()
.. ipython:: python
GF = galois.GF(3**3)
x = GF.Elements(); x
x.is_quadratic_residue()
"""
x = self
field = type(self)
if field.characteristic == 2:
# All elements are quadratic residues if the field's characteristic is 2
return np.ones(x.shape, dtype=bool) if x.ndim > 0 else np.bool_(True)
else:
# Compute the Legendre symbol on each element
return x ** ((field.order - 1)//2) != field.characteristic - 1
def vector(
self,
dtype: Optional[Union[np.dtype, int, object]] = None
) -> "FieldArray":
r"""
Converts the Galois field array over :math:`\mathrm{GF}(p^m)` to length-:math:`m` vectors over the prime subfield :math:`\mathrm{GF}(p)`.
This function is the inverse operation of the :func:`Vector` constructor. For an array with shape `(n1, n2)`, the output shape
is `(n1, n2, m)`. By convention, the vectors are ordered from highest degree to 0-th degree.
Parameters
----------
dtype : numpy.dtype, optional
The :obj:`numpy.dtype` of the array elements. The default is `None` which represents the smallest unsigned
dtype for this class, i.e. the first element in :obj:`galois.FieldClass.dtypes`.
Returns
-------
galois.FieldArray
A Galois field array of length-:math:`m` vectors over :math:`\mathrm{GF}(p)`.
Examples
--------
.. ipython:: python
GF = galois.GF(2**6)
a = GF.Random(3); a
with GF.display("poly"):
print(a)
vec = a.vector(); vec
GF.Vector(vec)
"""
order = type(self).prime_subfield.order
degree = type(self).degree
array = self.view(np.ndarray)
array = np.repeat(array, degree).reshape(*array.shape, degree)
x = 0
for i in range(degree):
q = (array[...,i] - x) // order**(degree - 1 - i)
array[...,i] = q
x += q*order**(degree - 1 - i)
return type(self).prime_subfield(array, dtype=dtype) # pylint: disable=unexpected-keyword-arg
def row_reduce(
self,
ncols: Optional[int] = None
) -> "FieldArray":
r"""
Performs Gaussian elimination on the matrix to achieve reduced row echelon form.
**Row reduction operations**
1. Swap the position of any two rows.
2. Multiply a row by a non-zero scalar.
3. Add one row to a scalar multiple of another row.
Parameters
----------
ncols : int, optional
The number of columns to perform Gaussian elimination over. The default is `None` which represents
the number of columns of the input array.
Returns
-------
galois.FieldArray
The reduced row echelon form of the input array.
Examples
--------
.. ipython:: python
GF = galois.GF(31)
A = GF.Random((4,4)); A
A.row_reduce()
np.linalg.matrix_rank(A)
One column is a linear combination of another.
.. ipython:: python
GF = galois.GF(31)
A = GF.Random((4,4)); A
A[:,2] = A[:,1] * GF(17); A
A.row_reduce()
np.linalg.matrix_rank(A)
One row is a linear combination of another.
.. ipython:: python
GF = galois.GF(31)
A = GF.Random((4,4)); A
A[3,:] = A[2,:] * GF(8); A
A.row_reduce()
np.linalg.matrix_rank(A)
"""
return row_reduce(self, ncols=ncols)
def lu_decompose(self) -> "FieldArray":
r"""
Decomposes the input array into the product of lower and upper triangular matrices.
Returns
-------
galois.FieldArray
The lower triangular matrix.
galois.FieldArray
The upper triangular matrix.
Examples
--------
.. ipython:: python
GF = galois.GF(5)
# Not every square matrix has an LU decomposition
A = GF([[2, 4, 4, 1], [3, 3, 1, 4], [4, 3, 4, 2], [4, 4, 3, 1]])
L, U = A.lu_decompose()
L
U
# A = L U
np.array_equal(A, L @ U)
"""
return lu_decompose(self)
def lup_decompose(self) -> "FieldArray":
r"""
Decomposes the input array into the product of lower and upper triangular matrices using partial pivoting.
Returns
-------
galois.FieldArray
The lower triangular matrix.
galois.FieldArray
The upper triangular matrix.
galois.FieldArray
The permutation matrix.
Examples
--------
.. ipython:: python
GF = galois.GF(5)
A = GF([[1, 3, 2, 0], [3, 4, 2, 3], [0, 2, 1, 4], [4, 3, 3, 1]])
L, U, P = A.lup_decompose()
L
U
P
# P A = L U
np.array_equal(P @ A, L @ U)
"""
return lup_decompose(self)
def field_trace(self) -> "FieldArray":
r"""
Computes the field trace :math:`\mathrm{Tr}_{L / K}(x)` of the elements of :math:`x`.
Returns
-------
galois.FieldArray
The field trace of :math:`x` in the prime subfield :math:`\mathrm{GF}(p)`.
Notes
-----
The `self` array :math:`x` is over the extension field :math:`L = \mathrm{GF}(p^m)`. The field trace of :math:`x` is
over the subfield :math:`K = \mathrm{GF}(p)`. In other words, :math:`\mathrm{Tr}_{L / K}(x) : L \rightarrow K`.
For finite fields, since :math:`L` is a Galois extension of :math:`K`, the field trace of :math:`x` is defined as a sum
of the Galois conjugates of :math:`x`.
.. math:: \mathrm{Tr}_{L / K}(x) = \sum_{i=0}^{m-1} x^{p^i}
References
----------
* https://en.wikipedia.org/wiki/Field_trace
Examples
--------
The field trace of the elements of :math:`\mathrm{GF}(3^2)` is shown below.
.. ipython:: python
GF = galois.GF(3**2, display="poly")
x = GF.Elements(); x
y = x.field_trace(); y
"""
if not type(self).is_extension_field:
raise TypeError(f"The Galois field must be an extension field to compute the field trace, not {type(self)}.")
field = type(self)
subfield = field.prime_subfield
p = field.characteristic
m = field.degree
conjugates = np.power.outer(self, p**np.arange(0, m, dtype=field.dtypes[-1]))
trace = np.add.reduce(conjugates, axis=-1)
return subfield(trace)
def field_norm(self) -> "FieldArray":
r"""
Computes the field norm :math:`\mathrm{N}_{L / K}(x)` of the elements of :math:`x`.
Returns
-------
galois.FieldArray
The field norm of :math:`x` in the prime subfield :math:`\mathrm{GF}(p)`.
Notes
-----
The `self` array :math:`x` is over the extension field :math:`L = \mathrm{GF}(p^m)`. The field norm of :math:`x` is
over the subfield :math:`K = \mathrm{GF}(p)`. In other words, :math:`\mathrm{N}_{L / K}(x) : L \rightarrow K`.
For finite fields, since :math:`L` is a Galois extension of :math:`K`, the field norm of :math:`x` is defined as a product
of the Galois conjugates of :math:`x`.
.. math:: \mathrm{N}_{L / K}(x) = \prod_{i=0}^{m-1} x^{p^i} = x^{(p^m - 1) / (p - 1)}
References
----------
* https://en.wikipedia.org/wiki/Field_norm
Examples
--------
The field norm of the elements of :math:`\mathrm{GF}(3^2)` is shown below.
.. ipython:: python
GF = galois.GF(3**2, display="poly")
x = GF.Elements(); x
y = x.field_norm(); y
"""
if not type(self).is_extension_field:
raise TypeError(f"The Galois field must be an extension field to compute the field norm, not {type(self)}.")
field = type(self)
subfield = field.prime_subfield
p = field.characteristic
m = field.degree
norm = self**((p**m - 1) // (p - 1))
return subfield(norm)
def characteristic_poly(self) -> "Poly":
r"""
Computes the characteristic polynomial of a finite field element :math:`a` or a square matrix :math:`\mathbf{A}`.
This function can be invoked on single finite field elements (scalar 0-D arrays) or square :math:`n \times n`
matrices (2-D arrays).
Returns
-------
Poly
For scalar inputs, the degree-:math:`m` characteristic polynomial :math:`p_a(x)` of :math:`a` over :math:`\mathrm{GF}(p)`.
For square :math:`n \times n` matrix inputs, the degree-:math:`n` characteristic polynomial :math:`p_A(x)` of
:math:`\mathbf{A}` over :math:`\mathrm{GF}(p^m)`.
Notes
-----
An element :math:`a` of :math:`\mathrm{GF}(p^m)` has characteristic polynomial :math:`p_a(x)` over :math:`\mathrm{GF}(p)`.
The characteristic polynomial when evaluated in :math:`\mathrm{GF}(p^m)` annihilates :math:`a`, i.e. :math:`p_a(a) = 0`.
In prime fields :math:`\mathrm{GF}(p)`, the characteristic polynomial of :math:`a` is simply :math:`p_a(x) = x - a`.
An :math:`n \times n` matrix :math:`\mathbf{A}` has characteristic polynomial
:math:`p_A(x) = \textrm{det}(x\mathbf{I} - \mathbf{A})` over :math:`\mathrm{GF}(p^m)`. The constant coefficient of the
characteristic polynomial is :math:`\textrm{det}(-\mathbf{A})`. The :math:`x^{n-1}` coefficient of the characteristic
polynomial is :math:`-\textrm{Tr}(\mathbf{A})`. The characteristic polynomial annihilates :math:`\mathbf{A}`, i.e.
:math:`p_A(\mathbf{A}) = \mathbf{0}`.
References
----------
* https://en.wikipedia.org/wiki/Characteristic_polynomial
Examples
--------
The characteristic polynomial of the element :math:`a`.
.. ipython:: python
GF = galois.GF(3**5)
a = GF.Random(); a
poly = a.characteristic_poly(); poly
# The characteristic polynomial annihilates a
poly(a, field=GF)
The characteristic polynomial of the square matrix :math:`\mathbf{A}`.
.. ipython:: python
GF = galois.GF(3**5)
A = GF.Random((3,3)); A
poly = A.characteristic_poly(); poly
# The x^0 coefficient is det(-A)
poly.coeffs[-1] == np.linalg.det(-A)
# The x^n-1 coefficient is -Tr(A)
poly.coeffs[1] == -np.trace(A)
# The characteristic polynomial annihilates the matrix A
poly(A, elementwise=False)
"""
if self.ndim == 0:
return self._characteristic_poly_element()
elif self.ndim == 2:
return self._characteristic_poly_matrix()
else:
raise ValueError(f"The array must be either 0-D to return the characteristic polynomial of a single element or 2-D to return the characteristic polynomial of a square matrix, not have shape {self.shape}.")
def _characteristic_poly_element(self):
field = type(self)
a = self
x = Poly.Identity(field)
if field.is_prime_field:
return x - a
else:
powers = a**(field.characteristic**np.arange(0, field.degree, dtype=field.dtypes[-1]))
poly = Poly.Roots(powers, field=field)
poly = Poly(poly.coeffs, field=field.prime_subfield)
return poly
def _characteristic_poly_matrix(self):
if not self.shape[0] == self.shape[1]:
raise ValueError(f"The 2-D array must be square to compute its characteristic polynomial, not have shape {self.shape}.")
field = type(self)
A = self
# Compute P = xI - A
P = np.zeros(self.shape, dtype=object)
for i in range(self.shape[0]):
for j in range(self.shape[0]):
if i == j:
P[i,j] = Poly([1, -A[i,j]], field=field)
else:
P[i,j] = Poly([-A[i,j]], field=field)
# Compute det(P)
return self._compute_poly_det(P)
def _compute_poly_det(self, A):
if A.shape == (2,2):
return A[0,0]*A[1,1] - A[0,1]*A[1,0]
field = type(self)
n = A.shape[0] # Size of the nxn matrix
det = Poly.Zero(field)
for i in range(n):
idxs = np.delete(np.arange(0, n), i)
if i % 2 == 0:
det += A[0,i] * self._compute_poly_det(A[1:,idxs])
else:
det -= A[0,i] * self._compute_poly_det(A[1:,idxs])
return det
def minimal_poly(self) -> "Poly":
r"""
Computes the minimal polynomial of a finite field element :math:`a`.
This function can be invoked only on single finite field elements (scalar 0-D arrays).
Returns
-------
Poly
For scalar inputs, the minimal polynomial :math:`p_a(x)` of :math:`a` over :math:`\mathrm{GF}(p)`.
Notes
-----
An element :math:`a` of :math:`\mathrm{GF}(p^m)` has minimal polynomial :math:`p_a(x)` over :math:`\mathrm{GF}(p)`.
The minimal polynomial when evaluated in :math:`\mathrm{GF}(p^m)` annihilates :math:`a`, i.e. :math:`p_a(a) = 0`.
The minimal polynomial always divides the characteristic polynomial. In prime fields :math:`\mathrm{GF}(p)`, the
minimal polynomial of :math:`a` is simply :math:`p_a(x) = x - a`.
References
----------
* https://en.wikipedia.org/wiki/Minimal_polynomial_(field_theory)
* https://en.wikipedia.org/wiki/Minimal_polynomial_(linear_algebra)
Examples
--------
The characteristic polynomial of the element :math:`a`.
.. ipython:: python
GF = galois.GF(3**5)
a = GF.Random(); a
poly = a.minimal_poly(); poly
# The minimal polynomial annihilates a
poly(a, field=GF)
# The minimal polynomial always divides the characteristic polynomial
a.characteristic_poly() / poly
"""
if self.ndim == 0:
return self._minimal_poly_element()
# elif self.ndim == 2:
# return self._minimal_poly_matrix()
else:
raise ValueError(f"The array must be either 0-D to return the minimal polynomial of a single element or 2-D to return the minimal polynomial of a square matrix, not have shape {self.shape}.")
def _minimal_poly_element(self):
field = type(self)
a = self
x = Poly.Identity(field)
if field.is_prime_field:
return x - a
else:
conjugates = np.unique(a**(field.characteristic**np.arange(0, field.degree, dtype=field.dtypes[-1])))
poly = Poly.Roots(conjugates, field=field)
poly = Poly(poly.coeffs, field=field.prime_subfield)
return poly
###############################################################################
# Special methods (redefined to add docstrings)
###############################################################################
def __add__(self, other): # pylint: disable=useless-super-delegation
"""
Adds two Galois field arrays element-wise.
`Broadcasting <https://numpy.org/doc/stable/user/basics.broadcasting.html>`_ rules apply. Both arrays must be over
the same Galois field.
Parameters
----------
other : galois.FieldArray
The other Galois field array.
Returns
-------
galois.FieldArray
The Galois field array `self + other`.
Examples
--------
.. ipython:: python
GF = galois.GF(7)
a = GF.Random((2,5)); a
b = GF.Random(5); b
a + b
"""
return super().__add__(other)
def __sub__(self, other): # pylint: disable=useless-super-delegation
"""
Subtracts two Galois field arrays element-wise.
`Broadcasting <https://numpy.org/doc/stable/user/basics.broadcasting.html>`_ rules apply. Both arrays must be over
the same Galois field.
Parameters
----------
other : galois.FieldArray
The other Galois field array.
Returns
-------
galois.FieldArray
The Galois field array `self - other`.
Examples
--------
.. ipython:: python
GF = galois.GF(7)
a = GF.Random((2,5)); a
b = GF.Random(5); b
a - b
"""
return super().__sub__(other)
def __mul__(self, other): # pylint: disable=useless-super-delegation
"""
Multiplies two Galois field arrays element-wise.
`Broadcasting <https://numpy.org/doc/stable/user/basics.broadcasting.html>`_ rules apply. Both arrays must be over
the same Galois field.
Warning
-------
When both multiplicands are :obj:`galois.FieldArray`, that indicates a Galois field multiplication. When one
multiplicand is an integer or integer :obj:`numpy.ndarray`, that indicates a scalar multiplication (repeated addition).
Galois field multiplication and scalar multiplication are equivalent in prime fields, but not in extension fields.
Parameters
----------
other : numpy.ndarray, galois.FieldArray
A :obj:`numpy.ndarray` of integers for scalar multiplication or a :obj:`galois.FieldArray` of Galois field elements
for finite field multiplication.
Returns
-------
galois.FieldArray
The Galois field array `self * other`.
Examples
--------
.. ipython:: python
GF = galois.GF(7)
a = GF.Random((2,5)); a
b = GF.Random(5); b
a * b
When both multiplicands are Galois field elements, that indicates a Galois field multiplication.
.. ipython:: python
GF = galois.GF(2**4, display="poly")
a = GF(7); a
b = GF(2); b
a * b
@suppress
GF.display();
When one multiplicand is an integer, that indicates a scalar multiplication (repeated addition).
.. ipython:: python
a * 2
a + a
"""
return super().__mul__(other)
def __truediv__(self, other): # pylint: disable=useless-super-delegation
"""
Divides two Galois field arrays element-wise.
`Broadcasting <https://numpy.org/doc/stable/user/basics.broadcasting.html>`_ rules apply. Both arrays must be over
the same Galois field. In Galois fields, true division and floor division are equivalent.
Parameters
----------
other : galois.FieldArray
The other Galois field array.
Returns
-------
galois.FieldArray
The Galois field array `self / other`.
Examples
--------
.. ipython:: python
GF = galois.GF(7)
a = GF.Random((2,5)); a
b = GF.Random(5, low=1); b
a / b
"""
return super().__truediv__(other)
def __floordiv__(self, other): # pylint: disable=useless-super-delegation
"""
Divides two Galois field arrays element-wise.
`Broadcasting <https://numpy.org/doc/stable/user/basics.broadcasting.html>`_ rules apply. Both arrays must be over
the same Galois field. In Galois fields, true division and floor division are equivalent.
Parameters
----------
other : galois.FieldArray
The other Galois field array.
Returns
-------
galois.FieldArray
The Galois field array `self // other`.
Examples
--------
.. ipython:: python
GF = galois.GF(7)
a = GF.Random((2,5)); a
b = GF.Random(5, low=1); b
a // b
"""
return super().__floordiv__(other) # pylint: disable=too-many-function-args
def __divmod__(self, other): # pylint: disable=useless-super-delegation
"""
Divides two Galois field arrays element-wise and returns the quotient and remainder.
`Broadcasting <https://numpy.org/doc/stable/user/basics.broadcasting.html>`_ rules apply. Both arrays must be over
the same Galois field. In Galois fields, true division and floor division are equivalent. In Galois fields, the remainder
is always zero.
Parameters
----------
other : galois.FieldArray
The other Galois field array.
Returns
-------
galois.FieldArray
The Galois field array `self // other`.
galois.FieldArray
The Galois field array `self % other`.
Examples
--------
.. ipython:: python
GF = galois.GF(7)
a = GF.Random((2,5)); a
b = GF.Random(5, low=1); b
q, r = divmod(a, b)
q, r
b*q + r
"""
return super().__divmod__(other)
def __mod__(self, other): # pylint: disable=useless-super-delegation
"""
Divides two Galois field arrays element-wise and returns the remainder.
`Broadcasting <https://numpy.org/doc/stable/user/basics.broadcasting.html>`_ rules apply. Both arrays must be over
the same Galois field. In Galois fields, true division and floor division are equivalent. In Galois fields, the remainder
is always zero.
Parameters
----------
other : galois.FieldArray
The other Galois field array.
Returns
-------
galois.FieldArray
The Galois field array `self % other`.
Examples
--------
.. ipython:: python
GF = galois.GF(7)
a = GF.Random((2,5)); a
b = GF.Random(5, low=1); b
a % b
"""
return super().__mod__(other)
def __pow__(self, other):
"""
Exponentiates a Galois field array element-wise.
`Broadcasting <https://numpy.org/doc/stable/user/basics.broadcasting.html>`_ rules apply. The first array must be a
Galois field array and the second must be an integer or integer array.
Parameters
----------
other : int, numpy.ndarray
The exponent(s) as an integer or integer array.
Returns
-------
galois.FieldArray
The Galois field array `self ** other`.
Examples
--------
.. ipython:: python
GF = galois.GF(7)
a = GF.Random((2,5)); a
b = np.random.default_rng().integers(0, 10, 5); b
a ** b
"""
# NOTE: Calling power here instead of `super().__pow__(other)` because when doing so `x ** GF(2)` will invoke `np.square(x)` and not throw
# an error. This way `np.power(x, GF(2))` is called which correctly checks whether the second argument is an integer.
return np.power(self, other)
###############################################################################
# Overridden numpy methods
###############################################################################
def __array_finalize__(self, obj):
"""
A numpy dunder method that is called after "new", "view", or "new from template". It is used here to ensure
that view casting to a Galois field array has the appropriate dtype and that the values are in the field.
"""
if obj is not None and not isinstance(obj, FieldArray):
# Only invoked on view casting
if obj.dtype not in type(self).dtypes:
raise TypeError(f"{type(self).name} can only have integer dtypes {type(self).dtypes}, not {obj.dtype}.")
self._check_array_values(obj)
def __getitem__(self, key):
item = super().__getitem__(key)
if np.isscalar(item):
# Return scalar array elements as 0-dimensional Galois field arrays. This enables Galois field arithmetic
# on scalars, which would otherwise be implemented using standard integer arithmetic.
item = self.__class__(item, dtype=self.dtype)
return item
def __setitem__(self, key, value):
# Verify the values to be written to the Galois field array are in the field
value = self._check_array_like_object(value)
super().__setitem__(key, value)
def __array_function__(self, func, types, args, kwargs):
if func in type(self)._OVERRIDDEN_FUNCTIONS:
output = getattr(type(self), type(self)._OVERRIDDEN_FUNCTIONS[func])(*args, **kwargs)
elif func in type(self)._OVERRIDDEN_LINALG_FUNCTIONS:
output = type(self)._OVERRIDDEN_LINALG_FUNCTIONS[func](*args, **kwargs)
elif func in type(self)._UNSUPPORTED_FUNCTIONS:
raise NotImplementedError(f"The numpy function {func.__name__!r} is not supported on Galois field arrays. If you believe this function should be supported, please submit a GitHub issue at https://github.com/mhostetter/galois/issues.\n\nIf you'd like to perform this operation on the data (but not necessarily a Galois field array), you should first call `array = array.view(np.ndarray)` and then call the function.")
else:
if func is np.insert:
args = list(args)
args[2] = self._check_array_like_object(args[2])
args = tuple(args)
output = super().__array_function__(func, types, args, kwargs) # pylint: disable=no-member
if func in type(self)._FUNCTIONS_REQUIRING_VIEW:
output = output.view(type(self)) if not np.isscalar(output) else type(self)(output, dtype=self.dtype)
return output
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
meta = {}
meta["types"] = [type(inputs[i]) for i in range(len(inputs))]
meta["operands"] = list(range(len(inputs)))
if method in ["at", "reduceat"]:
# Remove the second argument for "at" ufuncs which is the indices list
meta["operands"].pop(1)
meta["field_operands"] = [i for i in meta["operands"] if isinstance(inputs[i], self.__class__)]
meta["non_field_operands"] = [i for i in meta["operands"] if not isinstance(inputs[i], self.__class__)]
meta["field"] = self.__class__
meta["dtype"] = self.dtype
# meta["ufuncs"] = self._ufuncs
if ufunc in type(self)._OVERRIDDEN_UFUNCS:
# Set all ufuncs with "casting" keyword argument to "unsafe" so we can cast unsigned integers
# to integers. We know this is safe because we already verified the inputs.
if method not in ["reduce", "accumulate", "at", "reduceat"]:
kwargs["casting"] = "unsafe"
# Need to set the intermediate dtype for reduction operations or an error will be thrown. We
# use the largest valid dtype for this field.
if method in ["reduce"]:
kwargs["dtype"] = type(self).dtypes[-1]
return getattr(type(self), type(self)._OVERRIDDEN_UFUNCS[ufunc])(ufunc, method, inputs, kwargs, meta)
elif ufunc in type(self)._UNSUPPORTED_UFUNCS:
raise NotImplementedError(f"The numpy ufunc {ufunc.__name__!r} is not supported on {type(self).name} arrays. If you believe this ufunc should be supported, please submit a GitHub issue at https://github.com/mhostetter/galois/issues.")
else:
if ufunc in [np.bitwise_and, np.bitwise_or, np.bitwise_xor] and method not in ["reduce", "accumulate", "at", "reduceat"]:
kwargs["casting"] = "unsafe"
inputs, kwargs = type(self)._view_inputs_as_ndarray(inputs, kwargs)
output = super().__array_ufunc__(ufunc, method, *inputs, **kwargs) # pylint: disable=no-member
if ufunc in type(self)._UFUNCS_REQUIRING_VIEW and output is not None:
output = output.view(type(self)) if not np.isscalar(output) else type(self)(output, dtype=self.dtype)
return output
def astype(self, dtype, **kwargs): # pylint: disable=arguments-differ
if dtype not in type(self).dtypes:
raise TypeError(f"{type(self).name} arrays can only be cast as integer dtypes in {type(self).dtypes}, not {dtype}.")
return super().astype(dtype, **kwargs)
def dot(self, b, out=None):
# `np.dot(a, b)` is also available as `a.dot(b)`. Need to override this here for proper results.
return dot(self, b, out=out)
###############################################################################
# Display methods
###############################################################################
def __str__(self):
return self.__repr__()
# formatter = type(self)._formatter(self)
# with np.printoptions(formatter=formatter):
# string = super().__str__()
# return string
def __repr__(self):
formatter = type(self)._formatter(self)
cls = type(self)
class_name = cls.__name__
with np.printoptions(formatter=formatter):
cls.__name__ = "GF" # Rename the class so very large fields don't create large indenting
string = super().__repr__()
cls.__name__ = class_name
# Remove the dtype from the repr and add the Galois field order
dtype_idx = string.find("dtype")
if dtype_idx == -1:
string = string[:-1] + f", {cls._order_str})"
else:
string = string[:dtype_idx] + f"{cls._order_str})"
return string
###############################################################################
# Special GF2 FieldArray subclass
###############################################################################
class GF2Meta(FieldClass, DirMeta):
"""
A metaclass for the GF(2) class.
"""
# pylint: disable=no-value-for-parameter
# Need to have a unique cache of "calculate" functions for GF(2)
_FUNC_CACHE_CALCULATE = {}
def __init__(cls, name, bases, namespace, **kwargs):
super().__init__(name, bases, namespace, **kwargs)
cls._prime_subfield = cls
cls._is_primitive_poly = True
cls.compile(kwargs["compile"])
@property
def ufunc_modes(cls):
return ["jit-calculate"]
@property
def default_ufunc_mode(cls):
return "jit-calculate"
def _compile_ufuncs(cls):
super()._compile_ufuncs()
assert cls.ufunc_mode == "jit-calculate"
cls._ufuncs["add"] = np.bitwise_xor
cls._ufuncs["negative"] = np.positive
cls._ufuncs["subtract"] = np.bitwise_xor
cls._ufuncs["multiply"] = np.bitwise_and
cls._ufuncs["reciprocal"] = np.positive
cls._ufuncs["divide"] = np.bitwise_and
###############################################################################
# Override ufunc routines to use native numpy bitwise ufuncs for GF(2)
# arithmetic, which is faster than custom ufuncs
###############################################################################
def _ufunc_routine_reciprocal(cls, ufunc, method, inputs, kwargs, meta): # pylint: disable=unused-argument
"""
a, b in GF(2)
b = 1 / a, a = 1 is the only valid element with a multiplicative inverse, which is 1
= a
"""
cls._verify_unary_method_not_reduction(ufunc, method)
if np.count_nonzero(inputs[0]) != inputs[0].size:
raise ZeroDivisionError("Cannot compute the multiplicative inverse of 0 in a Galois field.")
output = getattr(cls._ufunc("reciprocal"), method)(*inputs, **kwargs)
return output
def _ufunc_routine_divide(cls, ufunc, method, inputs, kwargs, meta):
"""
Need to re-implement this to manually throw ZeroDivisionError if necessary
"""
cls._verify_operands_in_same_field(ufunc, inputs, meta)
if np.count_nonzero(inputs[meta["operands"][-1]]) != inputs[meta["operands"][-1]].size:
raise ZeroDivisionError("Cannot compute the multiplicative inverse of 0 in a Galois field.")
output = getattr(cls._ufunc("divide"), method)(*inputs, **kwargs)
output = cls._view_output_as_field(output, meta["field"], meta["dtype"])
return output
def _ufunc_routine_square(cls, ufunc, method, inputs, kwargs, meta): # pylint: disable=unused-argument
"""
a, c in GF(2)
c = a ** 2
= a * a
= a
"""
cls._verify_unary_method_not_reduction(ufunc, method)
return inputs[0]
###############################################################################
# Arithmetic functions using explicit calculation
###############################################################################
@staticmethod
def _add_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
"""
Not actually used. `np.bitwise_xor()` is faster.
"""
return a ^ b
@staticmethod
def _negative_calculate(a, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
"""
Not actually used. `np.positive()` is faster.
"""
return a
@staticmethod
def _subtract_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
"""
Not actually used. `np.bitwise_xor()` is faster.
"""
return a ^ b
@staticmethod
def _multiply_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
"""
Not actually used. `np.bitwise_and()` is faster.
"""
return a & b
@staticmethod
def _reciprocal_calculate(a, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
if a == 0:
raise ZeroDivisionError("Cannot compute the multiplicative inverse of 0 in a Galois field.")
return 1
@staticmethod
def _divide_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
if b == 0:
raise ZeroDivisionError("Cannot compute the multiplicative inverse of 0 in a Galois field.")
return a & b
@staticmethod
@numba.extending.register_jitable
def _power_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
if a == 0 and b < 0:
raise ZeroDivisionError("Cannot compute the multiplicative inverse of 0 in a Galois field.")
if b == 0:
return 1
else:
return a
@staticmethod
@numba.extending.register_jitable
def _log_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
if a == 0:
raise ArithmeticError("Cannot compute the discrete logarithm of 0 in a Galois field.")
if b != 1:
raise ArithmeticError("In GF(2), 1 is the only multiplicative generator.")
return 0
###############################################################################
# Ufuncs written in NumPy operations (not JIT compiled)
###############################################################################
@staticmethod
def _sqrt(a):
return a.copy()
@set_module("galois")
class GF2(FieldArray, metaclass=GF2Meta, characteristic=2, degree=1, order=2, primitive_element=1, compile="jit-calculate"):
r"""
An array over :math:`\mathrm{GF}(2)`.
This class is a pre-generated :obj:`galois.FieldArray` subclass generated with `galois.GF(2)` and is included in the API
for convenience. See :obj:`galois.FieldArray` and :obj:`galois.FieldClass` for more complete documentation and examples.
Examples
--------
This class is equivalent (and, in fact, identical) to the class returned from the Galois field class constructor.
.. ipython:: python
print(galois.GF2)
GF2 = galois.GF(2); print(GF2)
GF2 is galois.GF2
The Galois field properties can be viewed by class attributes, see :obj:`galois.FieldClass`.
.. ipython:: python
# View a summary of the field's properties
print(galois.GF2.properties)
# Or access each attribute individually
galois.GF2.irreducible_poly
galois.GF2.is_prime_field
The class's constructor mimics the call signature of :func:`numpy.array`.
.. ipython:: python
# Construct a Galois field array from an iterable
galois.GF2([1,0,1,1,0,0,0,1])
# Or an iterable of iterables
galois.GF2([[1,0], [1,1]])
# Or a single integer
galois.GF2(1)
"""
###############################################################################
# Polynomials over Galois fields
###############################################################################
# Values were obtained by running scripts/sparse_poly_performance_test.py
SPARSE_VS_BINARY_POLY_FACTOR = 0.00_05
SPARSE_VS_BINARY_POLY_MIN_COEFFS = int(1 / SPARSE_VS_BINARY_POLY_FACTOR)
SPARSE_VS_DENSE_POLY_FACTOR = 0.00_5
SPARSE_VS_DENSE_POLY_MIN_COEFFS = int(1 / SPARSE_VS_DENSE_POLY_FACTOR)
@set_module("galois")
class Poly:
r"""
Create a polynomial :math:`f(x)` over :math:`\mathrm{GF}(p^m)`.
The polynomial :math:`f(x) = a_d x^d + a_{d-1} x^{d-1} + \dots + a_1 x + a_0` has coefficients :math:`\{a_{d}, a_{d-1}, \dots, a_1, a_0\}`
in :math:`\mathrm{GF}(p^m)`.
Parameters
----------
coeffs : tuple, list, numpy.ndarray, galois.FieldArray
The polynomial coefficients :math:`\{a_d, a_{d-1}, \dots, a_1, a_0\}` with type :obj:`galois.FieldArray`. Alternatively, an iterable :obj:`tuple`,
:obj:`list`, or :obj:`numpy.ndarray` may be provided and the Galois field domain is taken from the `field` keyword argument.
field : galois.FieldClass, optional
The Galois field :math:`\mathrm{GF}(p^m)` the polynomial is over.
* :obj:`None` (default): If the coefficients are a :obj:`galois.FieldArray`, they won't be modified. If the coefficients are not explicitly
in a Galois field, they are assumed to be from :math:`\mathrm{GF}(2)` and are converted using `galois.GF2(coeffs)`.
* :obj:`galois.FieldClass`: The coefficients are explicitly converted to this Galois field `field(coeffs)`.
order : str, optional
The interpretation of the coefficient degrees.
* `"desc"` (default): The first element of `coeffs` is the highest degree coefficient, i.e. :math:`\{a_d, a_{d-1}, \dots, a_1, a_0\}`.
* `"asc"`: The first element of `coeffs` is the lowest degree coefficient, i.e. :math:`\{a_0, a_1, \dots, a_{d-1}, a_d\}`.
Returns
-------
galois.Poly
The polynomial :math:`f(x)`.
Examples
--------
Create a polynomial over :math:`\mathrm{GF}(2)`.
.. ipython:: python
galois.Poly([1,0,1,1])
galois.Poly.Degrees([3,1,0])
Create a polynomial over :math:`\mathrm{GF}(2^8)`.
.. ipython:: python
GF = galois.GF(2**8)
galois.Poly([124,0,223,0,0,15], field=GF)
# Alternate way of constructing the same polynomial
galois.Poly.Degrees([5,3,0], coeffs=[124,223,15], field=GF)
Polynomial arithmetic using binary operators.
.. ipython:: python
a = galois.Poly([117,0,63,37], field=GF); a
b = galois.Poly([224,0,21], field=GF); b
a + b
a - b
# Compute the quotient of the polynomial division
a / b
# True division and floor division are equivalent
a / b == a // b
# Compute the remainder of the polynomial division
a % b
# Compute both the quotient and remainder in one pass
divmod(a, b)
"""
# pylint: disable=too-many-public-methods
# Increase my array priority so numpy will call my __radd__ instead of its own __add__
__array_priority__ = 100
def __new__(
cls,
coeffs: Union[Tuple[int], List[int], np.ndarray, FieldArray],
field: Optional[FieldClass] = None,
order: Literal["desc", "asc"] = "desc"
) -> "Poly":
if not isinstance(coeffs, (list, tuple, np.ndarray, FieldArray)):
raise TypeError(f"Argument `coeffs` must array-like, not {type(coeffs)}.")
if not isinstance(field, (type(None), FieldClass)):
raise TypeError(f"Argument `field` must be a Galois field array class, not {field}.")
if not isinstance(order, str):
raise TypeError(f"Argument `order` must be a str, not {type(order)}.")
if isinstance(coeffs, (FieldArray, np.ndarray)) and not coeffs.ndim <= 1:
raise ValueError(f"Argument `coeffs` can have dimension at most 1, not {coeffs.ndim}.")
if not order in ["desc", "asc"]:
raise ValueError(f"Argument `order` must be either 'desc' or 'asc', not {order!r}.")
if isinstance(coeffs, (FieldArray, np.ndarray)):
coeffs = np.atleast_1d(coeffs)
if order == "asc":
coeffs = coeffs[::-1] # Ensure it's in descending-degree order
coeffs, field = cls._convert_coeffs(coeffs, field)
if field is GF2:
if len(coeffs) >= SPARSE_VS_BINARY_POLY_MIN_COEFFS and np.count_nonzero(coeffs) <= SPARSE_VS_BINARY_POLY_FACTOR*len(coeffs):
degrees = np.arange(coeffs.size - 1, -1, -1)
return SparsePoly(degrees, coeffs, field=field)
else:
integer = poly_to_integer(coeffs, 2)
return BinaryPoly(integer)
else:
if len(coeffs) >= SPARSE_VS_DENSE_POLY_MIN_COEFFS and np.count_nonzero(coeffs) <= SPARSE_VS_DENSE_POLY_FACTOR*len(coeffs):
degrees = np.arange(coeffs.size - 1, -1, -1)
return SparsePoly(degrees, coeffs, field=field)
else:
return DensePoly(coeffs, field=field)
@classmethod
def _convert_coeffs(cls, coeffs, field):
if isinstance(coeffs, FieldArray) and field is None:
# Use the field of the coefficients
field = type(coeffs)
else:
# Convert coefficients to the specified field (or GF2 if unspecified), taking into
# account negative coefficients
field = GF2 if field is None else field
coeffs = np.array(coeffs, dtype=field.dtypes[-1])
idxs = coeffs < 0
coeffs = field(np.abs(coeffs))
coeffs[idxs] *= -1
return coeffs, field
###############################################################################
# Alternate constructors
###############################################################################
@classmethod
def Zero(cls, field: Optional[FieldClass] = GF2) -> "Poly":
r"""
Constructs the polynomial :math:`f(x) = 0` over :math:`\mathrm{GF}(p^m)`.
Parameters
----------
field : galois.FieldClass, optional
The Galois field :math:`\mathrm{GF}(p^m)` the polynomial is over. The default is :obj:`galois.GF2`.
Returns
-------
galois.Poly
The polynomial :math:`f(x) = 0`.
Examples
--------
Construct the zero polynomial over :math:`\mathrm{GF}(2)`.
.. ipython:: python
galois.Poly.Zero()
Construct the zero polynomial over :math:`\mathrm{GF}(2^8)`.
.. ipython:: python
GF = galois.GF(2**8)
galois.Poly.Zero(field=GF)
"""
return Poly([0], field=field)
@classmethod
def One(cls, field: Optional[FieldClass] = GF2) -> "Poly":
r"""
Constructs the polynomial :math:`f(x) = 1` over :math:`\mathrm{GF}(p^m)`.
Parameters
----------
field : galois.FieldClass, optional
The Galois field :math:`\mathrm{GF}(p^m)` the polynomial is over. The default is :obj:`galois.GF2`.
Returns
-------
galois.Poly
The polynomial :math:`f(x) = 1`.
Examples
--------
Construct the one polynomial over :math:`\mathrm{GF}(2)`.
.. ipython:: python
galois.Poly.One()
Construct the one polynomial over :math:`\mathrm{GF}(2^8)`.
.. ipython:: python
GF = galois.GF(2**8)
galois.Poly.One(field=GF)
"""
return Poly([1], field=field)
@classmethod
def Identity(cls, field: Optional[FieldClass] = GF2) -> "Poly":
r"""
Constructs the polynomial :math:`f(x) = x` over :math:`\mathrm{GF}(p^m)`.
Parameters
----------
field : galois.FieldClass, optional
The Galois field :math:`\mathrm{GF}(p^m)` the polynomial is over. The default is :obj:`galois.GF2`.
Returns
-------
galois.Poly
The polynomial :math:`f(x) = x`.
Examples
--------
Construct the identity polynomial over :math:`\mathrm{GF}(2)`.
.. ipython:: python
galois.Poly.Identity()
Construct the identity polynomial over :math:`\mathrm{GF}(2^8)`.
.. ipython:: python
GF = galois.GF(2**8)
galois.Poly.Identity(field=GF)
"""
return Poly([1, 0], field=field)
@classmethod
def Random(
cls,
degree: int,
seed: Optional[Union[int, np.random.Generator]] = None,
field: Optional[FieldClass] = GF2
) -> "Poly":
r"""
Constructs a random polynomial over :math:`\mathrm{GF}(p^m)` with degree :math:`d`.
Parameters
----------
degree : int
The degree of the polynomial.
seed: int, numpy.random.Generator, optional
Non-negative integer used to initialize the PRNG. The default is `None` which means that unpredictable
entropy will be pulled from the OS to be used as the seed. A :obj:`numpy.random.Generator` can also be passed. If so,
it is used directly when `dtype != np.object_`. Its state is used to seed `random.seed()`, otherwise.
field : galois.FieldClass, optional
The Galois field :math:`\mathrm{GF}(p^m)` the polynomial is over. The default is :obj:`galois.GF2`.
Returns
-------
galois.Poly
The polynomial :math:`f(x)`.
Examples
--------
Construct a random degree-:math:`5` polynomial over :math:`\mathrm{GF}(2)`.
.. ipython:: python
galois.Poly.Random(5)
Construct a random degree-:math:`5` polynomial over :math:`\mathrm{GF}(2^8)` with a given seed. This produces repeatable results.
.. ipython:: python
GF = galois.GF(2**8)
galois.Poly.Random(5, seed=123456789, field=GF)
galois.Poly.Random(5, seed=123456789, field=GF)
Construct multiple polynomials with one global seed.
.. ipython:: python
rng = np.random.default_rng(123456789)
galois.Poly.Random(5, seed=rng, field=GF)
galois.Poly.Random(5, seed=rng, field=GF)
"""
if not isinstance(degree, (int, np.integer)):
raise TypeError(f"Argument `degree` must be an integer, not {type(degree)}.")
if seed is not None:
if not isinstance(seed, (int, np.integer, np.random.Generator)):
raise ValueError("Seed must be an integer, a numpy.random.Generator or None.")
if isinstance(seed, (int, np.integer)) and seed < 0:
raise ValueError("Seed must be non-negative.")
if not isinstance(field, FieldClass):
raise TypeError(f"Argument `field` must be a Galois field class, not {type(field)}.")
if not degree >= 0:
raise ValueError(f"Argument `degree` must be non-negative, not {degree}.")
rng = np.random.default_rng(seed) # Make the seed a PRNG object so it can "step" its state if the below "if" statement is invoked
coeffs = field.Random(degree + 1, seed=rng)
if coeffs[0] == 0:
coeffs[0] = field.Random(low=1, seed=rng) # Ensure leading coefficient is non-zero
return Poly(coeffs, field=field)
@classmethod
def Integer(cls, integer: int, field: Optional[FieldClass] = GF2) -> "Poly":
r"""
Constructs a polynomial over :math:`\mathrm{GF}(p^m)` from its integer representation.
Parameters
----------
integer : int
The integer representation of the polynomial :math:`f(x)`.
field : galois.FieldClass, optional
The Galois field :math:`\mathrm{GF}(p^m)` the polynomial is over. The default is :obj:`galois.GF2`.
Returns
-------
galois.Poly
The polynomial :math:`f(x)`.
Notes
-----
The integer value :math:`i` represents the polynomial :math:`f(x) = a_d x^{d} + a_{d-1} x^{d-1} + \dots + a_1 x + a_0`
over the field :math:`\mathrm{GF}(p^m)` if :math:`i = a_{d}(p^m)^{d} + a_{d-1}(p^m)^{d-1} + \dots + a_1(p^m) + a_0` using integer arithmetic,
not finite field arithmetic.
Said differently, if the polynomial coefficients :math:`\{a_d, a_{d-1}, \dots, a_1, a_0\}` are considered as the "digits" of a radix-:math:`p^m`
value, the polynomial's integer representation is the decimal value (radix-:math:`10`).
Examples
--------
Construct a polynomial over :math:`\mathrm{GF}(2)` from its integer representation.
.. ipython:: python
galois.Poly.Integer(5)
Construct a polynomial over :math:`\mathrm{GF}(2^8)` from its integer representation.
.. ipython:: python
GF = galois.GF(2**8)
galois.Poly.Integer(13*256**3 + 117, field=GF)
"""
if not isinstance(integer, (int, np.integer)):
raise TypeError(f"Argument `integer` be an integer, not {type(integer)}")
if not isinstance(field, FieldClass):
raise TypeError(f"Argument `field` must be a Galois field class, not {type(field)}.")
if not integer >= 0:
raise ValueError(f"Argument `integer` must be non-negative, not {integer}.")
if field is GF2:
# Explicitly create a binary poly
return BinaryPoly(integer)
else:
coeffs = integer_to_poly(integer, field.order)
return Poly(coeffs, field=field)
@classmethod
def String(cls, string: str, field: Optional[FieldClass] = GF2) -> "Poly":
r"""
Constructs a polynomial over :math:`\mathrm{GF}(p^m)` from its string representation.
Parameters
----------
string : str
The string representation of the polynomial :math:`f(x)`.
field : galois.FieldClass, optional
The Galois field :math:`\mathrm{GF}(p^m)` the polynomial is over. The default is :obj:`galois.GF2`.
Returns
-------
galois.Poly
The polynomial :math:`f(x)`.
Notes
-----
The string parsing rules include:
* Either `^` or `**` may be used for indicating the polynomial degrees. For example, `"13x^3 + 117"` or `"13x**3 + 117"`.
* Multiplication operators `*` may be used between coefficients and the polynomial indeterminate `x`, but are not required. For example,
`"13x^3 + 117"` or `"13*x^3 + 117"`.
* Polynomial coefficients of 1 may be specified or omitted. For example, `"x^3 + 117"` or `"1*x^3 + 117"`.
* The polynomial indeterminate can be any single character, but must be consistent. For example, `"13x^3 + 117"` or `"13y^3 + 117"`.
* Spaces are not required between terms. For example, `"13x^3 + 117"` or `"13x^3+117"`.
* Any combination of the above rules is acceptable.
Examples
--------
Construct a polynomial over :math:`\mathrm{GF}(2)` from its string representation.
.. ipython:: python
galois.Poly.String("x^2 + 1")
Construct a polynomial over :math:`\mathrm{GF}(2^8)` from its string representation.
.. ipython:: python
GF = galois.GF(2**8)
galois.Poly.String("13x^3 + 117", field=GF)
"""
if not isinstance(string, str):
raise TypeError(f"Argument `string` be an string, not {type(string)}")
return Poly.Degrees(*str_to_sparse_poly(string), field=field)
@classmethod
def Degrees(
cls,
degrees: Union[Tuple[int], List[int], np.ndarray],
coeffs: Optional[Union[Tuple[int], List[int], np.ndarray, FieldArray]] = None,
field: Optional[FieldClass] = None
) -> "Poly":
r"""
Constructs a polynomial over :math:`\mathrm{GF}(p^m)` from its non-zero degrees.
Parameters
----------
degrees : tuple, list, numpy.ndarray
The polynomial degrees with non-zero coefficients.
coeffs : tuple, list, numpy.ndarray, galois.FieldArray, optional
The corresponding non-zero polynomial coefficients with type :obj:`galois.FieldArray`. Alternatively, an iterable :obj:`tuple`,
:obj:`list`, or :obj:`numpy.ndarray` may be provided and the Galois field domain is taken from the `field` keyword argument. The
default is `None` which corresponds to all ones.
field : galois.FieldClass, optional
The Galois field :math:`\mathrm{GF}(p^m)` the polynomial is over.
* :obj:`None` (default): If the coefficients are a :obj:`galois.FieldArray`, they won't be modified. If the coefficients are not explicitly
in a Galois field, they are assumed to be from :math:`\mathrm{GF}(2)` and are converted using `galois.GF2(coeffs)`.
* :obj:`galois.FieldClass`: The coefficients are explicitly converted to this Galois field `field(coeffs)`.
Returns
-------
galois.Poly
The polynomial :math:`f(x)`.
Examples
--------
Construct a polynomial over :math:`\mathrm{GF}(2)` by specifying the degrees with non-zero coefficients.
.. ipython:: python
galois.Poly.Degrees([3,1,0])
Construct a polynomial over :math:`\mathrm{GF}(2^8)` by specifying the degrees with non-zero coefficients.
.. ipython:: python
GF = galois.GF(2**8)
galois.Poly.Degrees([3,1,0], coeffs=[251,73,185], field=GF)
"""
if not isinstance(degrees, (list, tuple, np.ndarray)):
raise TypeError(f"Argument `degrees` must array-like, not {type(degrees)}.")
if not isinstance(coeffs, (type(None), list, tuple, np.ndarray, FieldArray)):
raise TypeError(f"Argument `coeffs` must array-like, not {type(coeffs)}.")
if not isinstance(field, (type(None), FieldClass)):
raise TypeError(f"Argument `field` must be a Galois field array class, not {type(field)}.")
degrees = np.array(degrees, dtype=np.int64)
coeffs = [1,]*len(degrees) if coeffs is None else coeffs
coeffs, field = cls._convert_coeffs(coeffs, field)
if not degrees.ndim <= 1:
raise ValueError(f"Argument `degrees` can have dimension at most 1, not {degrees.ndim}.")
if not degrees.size == np.unique(degrees).size:
raise ValueError(f"Argument `degrees` must have unique entries, not {degrees}.")
if not np.all(degrees >= 0):
raise ValueError(f"Argument `degrees` must have non-negative values, not {degrees}.")
if not coeffs.ndim <= 1:
raise ValueError(f"Argument `coeffs` can have dimension at most 1, not {coeffs.ndim}.")
if not degrees.size == coeffs.size:
raise ValueError(f"Arguments `degrees` and `coeffs` must have the same length, not {degrees.size} and {coeffs.size}.")
# No nonzero degrees means it's the zero polynomial
if len(degrees) == 0:
degrees, coeffs = np.array([0]), field([0])
if field is GF2:
if len(degrees) < SPARSE_VS_BINARY_POLY_FACTOR*max(degrees):
# Explicitly create a sparse poly over GF(2)
return SparsePoly(degrees, coeffs=coeffs, field=field)
else:
integer = sparse_poly_to_integer(degrees, coeffs, 2)
return BinaryPoly(integer)
else:
if len(degrees) < SPARSE_VS_DENSE_POLY_FACTOR*max(degrees):
# Explicitly create a sparse poly over GF(p^m)
return SparsePoly(degrees, coeffs=coeffs, field=field)
else:
degree = max(degrees) # The degree of the polynomial
all_coeffs = type(coeffs).Zeros(degree + 1)
all_coeffs[degree - degrees] = coeffs
return DensePoly(all_coeffs)
@classmethod
def Roots(
cls,
roots: Union[Tuple[int], List[int], np.ndarray, FieldArray],
multiplicities: Optional[Union[Tuple[int], List[int], np.ndarray]] = None,
field: Optional[FieldClass] = None
) -> "Poly":
r"""
Constructs a monic polynomial over :math:`\mathrm{GF}(p^m)` from its roots.
Parameters
----------
roots : tuple, list, numpy.ndarray, galois.FieldArray
The roots of the desired polynomial with type :obj:`galois.FieldArray`. Alternatively, an iterable :obj:`tuple`,
:obj:`list`, or :obj:`numpy.ndarray` may be provided and the Galois field domain is taken from the `field` keyword argument.
multiplicities : tuple, list, numpy.ndarray, optional
The corresponding root multiplicities. The default is `None` which corresponds to all ones, i.e. `[1,]*len(roots)`.
field : galois.FieldClass, optional
The Galois field :math:`\mathrm{GF}(p^m)` the polynomial is over.
* :obj:`None` (default): If the roots are a :obj:`galois.FieldArray`, they won't be modified. If the roots are not explicitly
in a Galois field, they are assumed to be from :math:`\mathrm{GF}(2)` and are converted using `galois.GF2(roots)`.
* :obj:`galois.FieldClass`: The roots are explicitly converted to this Galois field `field(roots)`.
Returns
-------
galois.Poly
The polynomial :math:`f(x)`.
Notes
-----
The polynomial :math:`f(x)` with :math:`k` roots :math:`\{r_1, r_2, \dots, r_k\}` with multiplicities
:math:`\{m_1, m_2, \dots, m_k\}` is
.. math::
f(x) &= (x - r_1)^{m_1} (x - r_2)^{m_2} \dots (x - r_k)^{m_k}
f(x) &= a_d x^d + a_{d-1} x^{d-1} + \dots + a_1 x + a_0
with degree :math:`d = \sum_{i=1}^{k} m_i`.
Examples
--------
Construct a polynomial over :math:`\mathrm{GF}(2)` from a list of its roots.
.. ipython:: python
roots = [0, 0, 1]
p = galois.Poly.Roots(roots); p
# Evaluate the polynomial at its roots
p(roots)
Construct a polynomial over :math:`\mathrm{GF}(2^8)` from a list of its roots with specific multiplicities.
.. ipython:: python
GF = galois.GF(2**8)
roots = [121, 198, 225]
multiplicities = [1, 2, 1]
p = galois.Poly.Roots(roots, multiplicities=multiplicities, field=GF); p
# Evaluate the polynomial at its roots
p(roots)
"""
multiplicities = [1,]*len(roots) if multiplicities is None else multiplicities
if not isinstance(roots, (tuple, list, np.ndarray, FieldArray)):
raise TypeError(f"Argument `roots` must be array-like, not {type(roots)}.")
if not isinstance(multiplicities, (tuple, list, np.ndarray)):
raise TypeError(f"Argument `multiplicities` must be array-like, not {type(multiplicities)}.")
if not isinstance(field, (type(None), FieldClass)):
raise TypeError(f"Argument `field` must be a Galois field array class, not {field}.")
roots, field = cls._convert_coeffs(roots, field)
roots = field(roots).flatten()
if not len(roots) == len(multiplicities):
raise ValueError(f"Arguments `roots` and `multiplicities` must have the same length, not {len(roots)} and {len(multiplicities)}.")
poly = Poly.One(field=field)
x = Poly.Identity(field=field)
for root, multiplicity in zip(roots, multiplicities):
poly *= (x - root)**multiplicity
return poly
###############################################################################
# Methods
###############################################################################
def coefficients(
self,
size: Optional[int] = None,
order: Literal["desc", "asc"] = "desc"
) -> FieldArray:
"""
Returns the polynomial coefficients in the order and size specified.
Parameters
----------
size : int, optional
The fixed size of the coefficient array. Zeros will be added for higher-order terms. This value must be
at least `degree + 1` or a :obj:`ValueError` will be raised. The default is `None` which corresponds
to `degree + 1`.
order : str, optional
The interpretation of the coefficient degrees.
* `"desc"` (default): The first element returned is the highest degree coefficient.
* `"asc"`: The first element returned is the lowest degree coefficient.
Returns
-------
galois.FieldArray
An array of the polynomial coefficients with length `size`, either in ascending order or descending order.
Notes
-----
This accessor is similar to :obj:`coeffs`, but it has more settings. By default, `Poly.coeffs == Poly.coefficients()`.
Examples
--------
.. ipython:: python
GF = galois.GF(7)
p = galois.Poly([3, 0, 5, 2], field=GF); p
p.coeffs
p.coefficients()
# Return the coefficients in ascending order
p.coefficients(order="asc")
# Return the coefficients in ascending order with size 8
p.coefficients(8, order="asc")
"""
if not isinstance(size, (type(None), int, np.integer)):
raise TypeError(f"Argument `size` must be an integer, not {type(size)}.")
if not isinstance(order, str):
raise TypeError(f"Argument `order` must be a str, not {type(order)}.")
size = len(self) if size is None else size
if not size >= len(self):
raise ValueError(f"Argument `size` must be at least `degree + 1` which is {len(self)}, not {size}.")
if not order in ["desc", "asc"]:
raise ValueError(f"Argument `order` must be either 'desc' or 'asc', not {order!r}.")
coeffs = self.field.Zeros(size)
coeffs[-len(self):] = self.coeffs
if order == "asc":
coeffs = np.flip(coeffs)
return coeffs
def copy(self) -> "Poly":
"""
Deep copies the polynomial.
Returns
-------
galois.Poly
A copy of the original polynomial.
"""
raise NotImplementedError
def reverse(self) -> "Poly":
r"""
Returns the :math:`d`-th reversal :math:`x^d f(\frac{1}{x})` of the polynomial :math:`f(x)` with degree :math:`d`.
Returns
-------
galois.Poly
The :math:`n`-th reversal :math:`x^n f(\frac{1}{x})`.
Notes
-----
For a polynomial :math:`f(x) = a_d x^d + a_{d-1} x^{d-1} + \dots + a_1 x + a_0` with degree :math:`d`, the :math:`d`-th
reversal is equivalent to reversing the coefficients.
.. math::
\textrm{rev}_d f(x) = x^d f(x^{-1}) = a_0 x^d + a_{1} x^{d-1} + \dots + a_{d-1} x + a_d
Examples
--------
.. ipython:: python
GF = galois.GF(7)
f = galois.Poly([5, 0, 3, 4], field=GF); f
f.reverse()
"""
return Poly(self.coeffs[::-1])
def roots(self, multiplicity: bool = False) -> FieldArray:
r"""
Calculates the roots :math:`r` of the polynomial :math:`f(x)`, such that :math:`f(r) = 0`.
Parameters
----------
multiplicity : bool, optional
Optionally return the multiplicity of each root. The default is `False` which only returns the unique
roots.
Returns
-------
galois.FieldArray
Galois field array of roots of :math:`f(x)`. The roots are ordered in increasing order.
np.ndarray
The multiplicity of each root, only returned if `multiplicity=True`.
Notes
-----
This implementation uses Chien's search to find the roots :math:`\{r_1, r_2, \dots, r_k\}` of the degree-:math:`d`
polynomial
.. math::
f(x) = a_{d}x^{d} + a_{d-1}x^{d-1} + \dots + a_1x + a_0,
where :math:`k \le d`. Then, :math:`f(x)` can be factored as
.. math::
f(x) = (x - r_1)^{m_1} (x - r_2)^{m_2} \dots (x - r_k)^{m_k},
where :math:`m_i` is the multiplicity of root :math:`r_i` and :math:`d = \sum_{i=1}^{k} m_i`.
The Galois field elements can be represented as :math:`\mathrm{GF}(p^m) = \{0, 1, \alpha, \alpha^2, \dots, \alpha^{p^m-2}\}`,
where :math:`\alpha` is a primitive element of :math:`\mathrm{GF}(p^m)`.
:math:`0` is a root of :math:`f(x)` if :math:`a_0 = 0`. :math:`1` is a root of :math:`f(x)` if :math:`\sum_{j=0}^{d} a_j = 0`. The
remaining elements of :math:`\mathrm{GF}(p^m)` are powers of :math:`\alpha`. The following equations calculate :math:`f(\alpha^i)`,
where :math:`\alpha^i` is a root of :math:`f(x)` if :math:`f(\alpha^i) = 0`.
.. math::
f(\alpha^i) &= a_{d}(\alpha^i)^{d} + a_{d-1}(\alpha^i)^{d-1} + \dots + a_1(\alpha^i) + a_0
f(\alpha^i) &\overset{\Delta}{=} \lambda_{i,d} + \lambda_{i,d-1} + \dots + \lambda_{i,1} + \lambda_{i,0}
f(\alpha^i) &= \sum_{j=0}^{d} \lambda_{i,j}
The next power of :math:`\alpha` can be easily calculated from the previous calculation.
.. math::
f(\alpha^{i+1}) &= a_{d}(\alpha^{i+1})^{d} + a_{d-1}(\alpha^{i+1})^{d-1} + \dots + a_1(\alpha^{i+1}) + a_0
f(\alpha^{i+1}) &= a_{d}(\alpha^i)^{d}\alpha^d + a_{d-1}(\alpha^i)^{d-1}\alpha^{d-1} + \dots + a_1(\alpha^i)\alpha + a_0
f(\alpha^{i+1}) &= \lambda_{i,d}\alpha^d + \lambda_{i,d-1}\alpha^{d-1} + \dots + \lambda_{i,1}\alpha + \lambda_{i,0}
f(\alpha^{i+1}) &= \sum_{j=0}^{d} \lambda_{i,j}\alpha^j
References
----------
* https://en.wikipedia.org/wiki/Chien_search
Examples
--------
Find the roots of a polynomial over :math:`\mathrm{GF}(2)`.
.. ipython:: python
p = galois.Poly.Roots([0,]*7 + [1,]*13); p
p.roots()
p.roots(multiplicity=True)
Find the roots of a polynomial over :math:`\mathrm{GF}(2^8)`.
.. ipython:: python
GF = galois.GF(2**8)
p = galois.Poly.Roots([18,]*7 + [155,]*13 + [227,]*9, field=GF); p
p.roots()
p.roots(multiplicity=True)
"""
if not isinstance(multiplicity, bool):
raise TypeError(f"Argument `multiplicity` must be a bool, not {type(multiplicity)}.")
roots = self.field._poly_roots(self.nonzero_degrees, self.nonzero_coeffs)
if not multiplicity:
return roots
else:
multiplicities = np.array([self._root_multiplicity(root) for root in roots])
return roots, multiplicities
def _root_multiplicity(self, root):
poly = self.copy()
multiplicity = 1
while True:
# If the root is also a root of the derivative, then its a multiple root.
poly = poly.derivative()
if poly == 0:
# Cannot test whether p'(root) = 0 because p'(x) = 0. We've exhausted the non-zero derivatives. For
# any Galois field, taking `characteristic` derivatives results in p'(x) = 0. For a root with multiplicity
# greater than the field's characteristic, we need factor to the polynomial. Here we factor out (x - root)^m,
# where m is the current multiplicity.
poly = self.copy() // (Poly([1, -root], field=self.field)**multiplicity)
if poly(root) == 0:
multiplicity += 1
else:
break
return multiplicity
def derivative(self, k: int = 1) -> "Poly":
r"""
Computes the :math:`k`-th formal derivative :math:`\frac{d^k}{dx^k} f(x)` of the polynomial :math:`f(x)`.
Parameters
----------
k : int, optional
The number of derivatives to compute. 1 corresponds to :math:`p'(x)`, 2 corresponds to :math:`p''(x)`, etc.
The default is 1.
Returns
-------
galois.Poly
The :math:`k`-th formal derivative of the polynomial :math:`f(x)`.
Notes
-----
For the polynomial
.. math::
f(x) = a_d x^d + a_{d-1} x^{d-1} + \dots + a_1 x + a_0
the first formal derivative is defined as
.. math::
f'(x) = (d) \cdot a_{d} x^{d-1} + (d-1) \cdot a_{d-1} x^{d-2} + \dots + (2) \cdot a_{2} x + a_1
where :math:`\cdot` represents scalar multiplication (repeated addition), not finite field multiplication.
For example, :math:`3 \cdot a = a + a + a`.
References
----------
* https://en.wikipedia.org/wiki/Formal_derivative
Examples
--------
Compute the derivatives of a polynomial over :math:`\mathrm{GF}(2)`.
.. ipython:: python
p = galois.Poly.Random(7); p
p.derivative()
# k derivatives of a polynomial where k is the Galois field's characteristic will always result in 0
p.derivative(2)
Compute the derivatives of a polynomial over :math:`\mathrm{GF}(7)`.
.. ipython:: python
GF = galois.GF(7)
p = galois.Poly.Random(11, field=GF); p
p.derivative()
p.derivative(2)
p.derivative(3)
# k derivatives of a polynomial where k is the Galois field's characteristic will always result in 0
p.derivative(7)
Compute the derivatives of a polynomial over :math:`\mathrm{GF}(2^8)`.
.. ipython:: python
GF = galois.GF(2**8)
p = galois.Poly.Random(7, field=GF); p
p.derivative()
# k derivatives of a polynomial where k is the Galois field's characteristic will always result in 0
p.derivative(2)
"""
if not isinstance(k, (int, np.integer)):
raise TypeError(f"Argument `k` must be an integer, not {type(k)}.")
if not k > 0:
raise ValueError(f"Argument `k` must be a positive integer, not {k}.")
if 0 in self.nonzero_degrees:
# Cut off the 0th degree
degrees = self.nonzero_degrees[:-1] - 1
coeffs = self.nonzero_coeffs[:-1] * self.nonzero_degrees[:-1] # Scalar multiplication
else:
degrees = self.nonzero_degrees - 1
coeffs = self.nonzero_coeffs * self.nonzero_degrees # Scalar multiplication
p_prime = Poly.Degrees(degrees, coeffs, field=self.field)
k -= 1
if k > 0:
return p_prime.derivative(k)
else:
return p_prime
###############################################################################
# Overridden dunder methods
###############################################################################
def __str__(self):
return f"Poly({self.string}, {self.field.name})"
def __repr__(self):
return str(self)
def __hash__(self):
t = tuple([self.field.order,] + self.nonzero_degrees.tolist() + self.nonzero_coeffs.tolist())
return hash(t)
def __call__(self, x: FieldArray, field: Optional[FieldClass] = None, elementwise: bool = True) -> FieldArray:
"""
Evaluates the polynomial at :math:`x`.
Parameters
----------
x : galois.FieldArray
An array (or 0-D scalar) of field elements to evaluate the polynomial over.
field : galois.FieldClass, optional
The Galois field to evaluate the polynomial over. The default is `None` which represents
the polynomial's current field, i.e. :obj:`field`.
elementwise : bool, optional
Indicates to evaluate arrays elementwise. The default is `True`. If `False`, the polynomial
indeterminate is evaluated at the square matrix :math:`X`.
Returns
-------
galois.FieldArray
The result of the polynomial evaluation of the same shape as :math:`x`.
Examples
--------
.. ipython:: python
GF = galois.GF(2**8)
p = galois.Poly([37, 123, 0, 201], field=GF); p
Evaluate the polynomial elementwise at :math:`x`.
.. ipython:: python
x = GF.Random(4); x
p(x)
GF(37)*x**3 + GF(123)*x**2 + GF(201)
Evaluate the polynomial at the matrix :math:`X`.
.. ipython:: python
X = GF.Random((2,2)); X
p(X, elementwise=False)
GF(37)*np.linalg.matrix_power(X,3) + GF(123)*np.linalg.matrix_power(X,2) + GF(201)*GF.Identity(2)
"""
if not isinstance(field, (type(None), FieldClass)):
raise TypeError(f"Argument `field` must be a Galois field array class, not {type(field)}.")
field = self.field if field is None else field
coeffs = field(self.coeffs)
x = field(x)
if elementwise:
return field._poly_evaluate(coeffs, x)
else:
if not (x.ndim == 2 and x.shape[0] == x.shape[1]):
raise ValueError(f"Argument `x` must be a square matrix when evaluating the polynomial not elementwise, not have shape {x.shape}.")
return field._poly_evaluate_matrix(coeffs, x)
def __len__(self) -> int:
"""
Returns the length of the coefficient array.
The length of the coefficient array is `Poly.degree + 1`.
Returns
-------
int
The length of the coefficient array.
Examples
--------
.. ipython:: python
a = galois.Poly.Random(5); a
a.coeffs
len(a)
a.degree + 1
"""
return self.degree + 1
def _check_inputs_are_polys(self, a, b):
"""
Verify polynomial arithmetic operands are either galois.Poly or scalars in a finite field.
"""
if not isinstance(a, (Poly, self.field)):
raise TypeError(f"Both operands must be a galois.Poly or a single element of its field {self.field.name}, not {type(a)}.")
if not isinstance(b, (Poly, self.field)):
raise TypeError(f"Both operands must be a galois.Poly or a single element of its field {self.field.name}, not {type(b)}.")
if (isinstance(a, Poly) and isinstance(b, Poly)) and not a.field is b.field:
raise TypeError(f"Both polynomial operands must be over the same field, not {a.field.name} and {b.field.name}.")
def _check_inputs_are_polys_or_ints(self, a, b):
"""
Verify polynomial arithmetic operands are either galois.Poly, scalars in a finite field, or an integer (scalar multiplication).
"""
if not isinstance(a, (Poly, self.field, int, np.integer)):
raise TypeError(f"Both operands must be a galois.Poly, a single element of its field {self.field.name}, or an integer, not {type(a)}.")
if not isinstance(b, (Poly, self.field, int, np.integer)):
raise TypeError(f"Both operands must be a galois.Poly, a single element of its field {self.field.name}, or an integer, not {type(b)}.")
if (isinstance(a, Poly) and isinstance(b, Poly)) and not a.field is b.field:
raise TypeError(f"Both polynomial operands must be over the same field, not {a.field.name} and {b.field.name}.")
def _convert_field_scalars_to_polys(self, a, b):
"""
Convert finite field scalars to 0-degree polynomials in that field.
"""
# Promote a single field element to a 0-degree polynomial
if isinstance(a, self.field):
if not a.size == 1:
raise ValueError(f"Arguments that are Galois field elements must have size 1 (equivalently a 0-degree polynomial), not size {a.size}.")
a = Poly(np.atleast_1d(a))
if isinstance(b, self.field):
if not b.size == 1:
raise ValueError(f"Arguments that are Galois field elements must have size 1 (equivalently a 0-degree polynomial), not size {b.size}.")
b = Poly(np.atleast_1d(b))
return a, b
@staticmethod
def _determine_poly_class(a, b):
"""
Determine the type of polynomial arithmetic to perform.
"""
if isinstance(a, SparsePoly) or isinstance(b, SparsePoly):
return SparsePoly
elif isinstance(a, BinaryPoly) or isinstance(b, BinaryPoly):
return BinaryPoly
else:
return DensePoly
def __add__(self, other):
"""
Adds two polynomials.
Parameters
----------
other : galois.Poly
The polynomial :math:`b(x)`.
Returns
-------
galois.Poly
The polynomial :math:`c(x) = a(x) + b(x)`.
Examples
--------
.. ipython:: python
a = galois.Poly.Random(5); a
b = galois.Poly.Random(3); b
a + b
"""
self._check_inputs_are_polys(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
cls = self._determine_poly_class(a, b)
return cls._add(a, b)
def __radd__(self, other):
self._check_inputs_are_polys(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
cls = self._determine_poly_class(a, b)
return cls._add(b, a)
def __sub__(self, other):
"""
Subtracts two polynomials.
Parameters
----------
other : galois.Poly
The polynomial :math:`b(x)`.
Returns
-------
galois.Poly
The polynomial :math:`c(x) = a(x) - b(x)`.
Examples
--------
.. ipython:: python
a = galois.Poly.Random(5); a
b = galois.Poly.Random(3); b
a - b
"""
self._check_inputs_are_polys(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
cls = self._determine_poly_class(a, b)
return cls._sub(a, b)
def __rsub__(self, other):
self._check_inputs_are_polys(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
cls = self._determine_poly_class(a, b)
return cls._sub(b, a)
def __mul__(self, other):
"""
Multiplies two polynomials.
Parameters
----------
other : galois.Poly
The polynomial :math:`b(x)`.
Returns
-------
galois.Poly
The polynomial :math:`c(x) = a(x) b(x)`.
Examples
--------
.. ipython:: python
a = galois.Poly.Random(5); a
b = galois.Poly.Random(3); b
a * b
"""
self._check_inputs_are_polys_or_ints(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
if isinstance(a, (int, np.integer)):
# Ensure the integer is in the second operand for scalar multiplication
a, b = b, a
cls = self._determine_poly_class(a, b)
return cls._mul(a, b)
def __rmul__(self, other):
self._check_inputs_are_polys_or_ints(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
if isinstance(b, (int, np.integer)):
# Ensure the integer is in the second operand for scalar multiplication
b, a = a, b
cls = self._determine_poly_class(a, b)
return cls._mul(b, a)
def __divmod__(self, other):
"""
Divides two polynomials and returns the quotient and remainder.
Parameters
----------
other : galois.Poly
The polynomial :math:`b(x)`.
Returns
-------
galois.Poly
The quotient polynomial :math:`q(x)` such that :math:`a(x) = b(x)q(x) + r(x)`.
galois.Poly
The remainder polynomial :math:`r(x)` such that :math:`a(x) = b(x)q(x) + r(x)`.
Examples
--------
.. ipython:: python
a = galois.Poly.Random(5); a
b = galois.Poly.Random(3); b
q, r = divmod(a, b)
q, r
b*q + r
"""
self._check_inputs_are_polys(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
cls = self._determine_poly_class(a, b)
return cls._divmod(a, b)
def __rdivmod__(self, other):
self._check_inputs_are_polys(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
cls = self._determine_poly_class(a, b)
return cls._divmod(b, a)
def __truediv__(self, other):
"""
Divides two polynomials and returns the quotient.
True division and floor division are equivalent.
Parameters
----------
other : galois.Poly
The polynomial :math:`b(x)`.
Returns
-------
galois.Poly
The quotient polynomial :math:`q(x)` such that :math:`a(x) = b(x)q(x) + r(x)`.
Examples
--------
.. ipython:: python
a = galois.Poly.Random(5); a
b = galois.Poly.Random(3); b
divmod(a, b)
a / b
"""
self._check_inputs_are_polys(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
cls = self._determine_poly_class(a, b)
return cls._divmod(a, b)[0]
def __rtruediv__(self, other):
self._check_inputs_are_polys(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
cls = self._determine_poly_class(a, b)
return cls._divmod(b, a)[0]
def __floordiv__(self, other):
"""
Divides two polynomials and returns the quotient.
True division and floor division are equivalent.
Parameters
----------
other : galois.Poly
The polynomial :math:`b(x)`.
Returns
-------
galois.Poly
The quotient polynomial :math:`q(x)` such that :math:`a(x) = b(x)q(x) + r(x)`.
Examples
--------
.. ipython:: python
a = galois.Poly.Random(5); a
b = galois.Poly.Random(3); b
divmod(a, b)
a // b
"""
self._check_inputs_are_polys(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
cls = self._determine_poly_class(a, b)
return cls._divmod(a, b)[0]
def __rfloordiv__(self, other):
self._check_inputs_are_polys(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
cls = self._determine_poly_class(a, b)
return cls._divmod(b, a)[0]
def __mod__(self, other):
"""
Divides two polynomials and returns the remainder.
Parameters
----------
other : galois.Poly
The polynomial :math:`b(x)`.
Returns
-------
galois.Poly
The remainder polynomial :math:`r(x)` such that :math:`a(x) = b(x)q(x) + r(x)`.
Examples
--------
.. ipython:: python
a = galois.Poly.Random(5); a
b = galois.Poly.Random(3); b
divmod(a, b)
a % b
"""
self._check_inputs_are_polys(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
cls = self._determine_poly_class(a, b)
return cls._mod(a, b)
def __rmod__(self, other):
self._check_inputs_are_polys(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
cls = self._determine_poly_class(a, b)
return cls._mod(b, a)
def __pow__(self, other):
"""
Exponentiates the polynomial to an integer power.
Parameters
----------
other : int
The non-negative integer exponent.
Returns
-------
galois.Poly
The polynomial :math:`a(x)^b`.
Examples
--------
.. ipython:: python
a = galois.Poly.Random(5); a
a**3
a * a * a
"""
if not isinstance(other, (int, np.integer)):
raise TypeError(f"For polynomial exponentiation, the second argument must be an int, not {other}.")
if not other >= 0:
raise ValueError(f"Can only exponentiate polynomials to non-negative integers, not {other}.")
a, power = self, other
field = self.field
# c(x) = a(x) ** power
if power == 0:
return Poly.One(field)
c_square = a # The "squaring" part
c_mult = Poly.One(field) # The "multiplicative" part
while power > 1:
if power % 2 == 0:
c_square *= c_square
power //= 2
else:
c_mult *= c_square
power -= 1
c = c_mult * c_square
return c
def __neg__(self):
raise NotImplementedError
def __eq__(self, other):
if isinstance(other, (int, np.integer)):
# Compare poly to a integer scalar (assumed to be from the same field)
return self.degree == 0 and np.array_equal(self.coeffs, [other])
elif isinstance(other, FieldArray):
# Compare poly to a finite field scalar (may or may not be from the same field)
if not other.ndim == 0:
raise ValueError(f"Can only compare galois.Poly to a 0-D galois.FieldArray scalar, not shape {other.shape}.")
return self.field is type(other) and self.degree == 0 and np.array_equal(self.coeffs, np.atleast_1d(other))
elif not isinstance(other, Poly):
raise TypeError(f"Can only compare galois.Poly and galois.Poly / int / galois.FieldArray scalar objects, not {type(other)}.")
else:
# Compare two poly objects to each other
return self.field is other.field and np.array_equal(self.nonzero_degrees, other.nonzero_degrees) and np.array_equal(self.nonzero_coeffs, other.nonzero_coeffs)
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def _add(cls, a, b):
raise NotImplementedError
@classmethod
def _sub(cls, a, b):
raise NotImplementedError
@classmethod
def _mul(cls, a, b):
raise NotImplementedError
@classmethod
def _divmod(cls, a, b):
raise NotImplementedError
@classmethod
def _mod(cls, a, b):
raise NotImplementedError
###############################################################################
# Instance properties
###############################################################################
@property
def field(self) -> FieldClass:
"""
galois.FieldClass: The Galois field array class to which the coefficients belong.
Examples
--------
.. ipython:: python
a = galois.Poly.Random(5); a
a.field
.. ipython:: python
GF = galois.GF(2**8)
b = galois.Poly.Random(5, field=GF); b
b.field
"""
raise NotImplementedError
@property
def degree(self) -> int:
"""
int: The degree of the polynomial, i.e. the highest degree with non-zero coefficient.
Examples
--------
.. ipython:: python
GF = galois.GF(7)
p = galois.Poly([3, 0, 5, 2], field=GF); p
p.degree
"""
raise NotImplementedError
@property
def nonzero_degrees(self) -> np.ndarray:
"""
numpy.ndarray: An array of the polynomial degrees that have non-zero coefficients, in degree-descending order. The entries of
:obj:`nonzero_degrees` are paired with :obj:`nonzero_coeffs`.
Examples
--------
.. ipython:: python
GF = galois.GF(7)
p = galois.Poly([3, 0, 5, 2], field=GF); p
p.nonzero_degrees
"""
raise NotImplementedError
@property
def nonzero_coeffs(self) -> FieldArray:
"""
galois.FieldArray: The non-zero coefficients of the polynomial in degree-descending order. The entries of :obj:`nonzero_degrees`
are paired with :obj:`nonzero_coeffs`.
Examples
--------
.. ipython:: python
GF = galois.GF(7)
p = galois.Poly([3, 0, 5, 2], field=GF); p
p.nonzero_coeffs
"""
raise NotImplementedError
@property
def degrees(self) -> np.ndarray:
"""
numpy.ndarray: An array of the polynomial degrees in degree-descending order. The entries of :obj:`degrees`
are paired with :obj:`coeffs`.
Examples
--------
.. ipython:: python
GF = galois.GF(7)
p = galois.Poly([3, 0, 5, 2], field=GF); p
p.degrees
"""
raise NotImplementedError
@property
def coeffs(self) -> FieldArray:
"""
galois.FieldArray: The coefficients of the polynomial in degree-descending order. The entries of :obj:`degrees` are
paired with :obj:`coeffs`.
Examples
--------
.. ipython:: python
GF = galois.GF(7)
p = galois.Poly([3, 0, 5, 2], field=GF); p
p.coeffs
"""
raise NotImplementedError
@property
def integer(self) -> int:
r"""
int: The integer representation of the polynomial. For the polynomial :math:`f(x) = a_d x^d + a_{d-1} x^{d-1} + \dots + a_1 x + a_0`
over the field :math:`\mathrm{GF}(p^m)`, the integer representation is :math:`i = a_d (p^m)^{d} + a_{d-1} (p^m)^{d-1} + \dots + a_1 (p^m) + a_0`
using integer arithmetic, not finite field arithmetic.
Said differently, if the polynomial coefficients :math:`\{a_d, a_{d-1}, \dots, a_1, a_0\}` are considered as the "digits" of a radix-:math:`p^m`
value, the polynomial's integer representation is the decimal value (radix-:math:`10`).
Examples
--------
.. ipython:: python
GF = galois.GF(7)
p = galois.Poly([3, 0, 5, 2], field=GF); p
p.integer
p.integer == 3*7**3 + 5*7**1 + 2*7**0
"""
return sparse_poly_to_integer(self.nonzero_degrees, self.nonzero_coeffs, self.field.order)
@property
def string(self) -> str:
"""
str: The string representation of the polynomial, without specifying the Galois field.
Examples
--------
.. ipython:: python
GF = galois.GF(7)
p = galois.Poly([3, 0, 5, 2], field=GF); p
p.string
"""
return sparse_poly_to_str(self.nonzero_degrees, self.nonzero_coeffs)
class DensePoly(Poly):
"""
Implementation of dense polynomials over Galois fields.
"""
__slots__ = ["_coeffs"]
def __new__(cls, coeffs, field=None): # pylint: disable=signature-differs
# Arguments aren't verified in Poly.__new__()
obj = object.__new__(cls)
obj._coeffs = coeffs
if obj._coeffs.size > 1:
# Remove leading zero coefficients
idxs = np.nonzero(obj._coeffs)[0]
if idxs.size > 0:
obj._coeffs = obj._coeffs[idxs[0]:]
else:
obj._coeffs = obj._coeffs[-1]
# Ensure the coefficient array isn't 0-dimensional
obj._coeffs = np.atleast_1d(obj._coeffs)
return obj
###############################################################################
# Methods
###############################################################################
def copy(self):
return DensePoly(self._coeffs.copy())
###############################################################################
# Arithmetic methods
###############################################################################
def __neg__(self):
return DensePoly(-self._coeffs)
@classmethod
def _add(cls, a, b):
field = a.field
# c(x) = a(x) + b(x)
c_coeffs = field.Zeros(max(a.coeffs.size, b.coeffs.size))
c_coeffs[-a.coeffs.size:] = a.coeffs
c_coeffs[-b.coeffs.size:] += b.coeffs
return Poly(c_coeffs)
@classmethod
def _sub(cls, a, b):
field = a.field
# c(x) = a(x) + b(x)
c_coeffs = field.Zeros(max(a.coeffs.size, b.coeffs.size))
c_coeffs[-a.coeffs.size:] = a.coeffs
c_coeffs[-b.coeffs.size:] -= b.coeffs
return Poly(c_coeffs)
@classmethod
def _mul(cls, a, b):
if isinstance(b, (int, np.integer)):
# Scalar multiplication (p * 3 = p + p + p)
c_coeffs = a.coeffs * b
else:
# c(x) = a(x) * b(x)
c_coeffs = np.convolve(a.coeffs, b.coeffs)
return Poly(c_coeffs)
@classmethod
def _divmod(cls, a, b):
field = a.field
zero = Poly.Zero(field)
# q(x)*b(x) + r(x) = a(x)
if b.degree == 0:
return Poly(a.coeffs // b.coeffs), zero
elif a == 0:
return zero, zero
elif a.degree < b.degree:
return zero, a.copy()
else:
q_coeffs, r_coeffs = field._poly_divmod(a.coeffs, b.coeffs)
return Poly(q_coeffs), Poly(r_coeffs)
@classmethod
def _mod(cls, a, b):
return cls._divmod(a, b)[1]
###############################################################################
# Instance properties
###############################################################################
@property
def field(self):
return type(self._coeffs)
@property
def degree(self):
return self._coeffs.size - 1
@property
def nonzero_degrees(self):
return self.degree - np.nonzero(self._coeffs)[0]
@property
def nonzero_coeffs(self):
return self._coeffs[np.nonzero(self._coeffs)[0]]
@property
def degrees(self):
return np.arange(self.degree, -1, -1)
@property
def coeffs(self):
return self._coeffs.copy()
class BinaryPoly(Poly):
"""
Implementation of polynomials over GF(2).
"""
__slots__ = ["_integer", "_coeffs"]
def __new__(cls, integer): # pylint: disable=signature-differs
if not isinstance(integer, (int, np.integer)):
raise TypeError(f"Argument `integer` must be an integer, not {type(integer)}.")
if not integer >= 0:
raise ValueError(f"Argument `integer` must be non-negative, not {integer}.")
obj = object.__new__(cls)
obj._integer = integer
obj._coeffs = None # Only compute these if requested
return obj
###############################################################################
# Methods
###############################################################################
def copy(self):
return BinaryPoly(self._integer)
###############################################################################
# Arithmetic methods
###############################################################################
def __neg__(self):
return self.copy()
@classmethod
def _add(cls, a, b):
return BinaryPoly(a.integer ^ b.integer)
@classmethod
def _sub(cls, a, b):
return BinaryPoly(a.integer ^ b.integer)
@classmethod
def _mul(cls, a, b):
if isinstance(b, (int, np.integer)):
# Scalar multiplication (p * 3 = p + p + p)
return BinaryPoly(a.integer) if b % 2 == 1 else BinaryPoly(0)
else:
# Re-order operands such that a > b so the while loop has less loops
a = a.integer
b = b.integer
if b > a:
a, b = b, a
c = 0
while b > 0:
if b & 0b1:
c ^= a # Add a(x) to c(x)
b >>= 1 # Divide b(x) by x
a <<= 1 # Multiply a(x) by x
return BinaryPoly(c)
@classmethod
def _divmod(cls, a, b):
deg_a = a.degree
deg_q = a.degree - b.degree
deg_r = b.degree - 1
a = a.integer
b = b.integer
q = 0
mask = 1 << deg_a
for i in range(deg_q, -1, -1):
q <<= 1
if a & mask:
a ^= b << i
q ^= 1 # Set the LSB then left shift
assert a & mask == 0
mask >>= 1
# q = a >> deg_r
mask = (1 << (deg_r + 1)) - 1 # The last deg_r + 1 bits of a
r = a & mask
return BinaryPoly(q), BinaryPoly(r)
@classmethod
def _mod(cls, a, b):
return cls._divmod(a, b)[1]
###############################################################################
# Instance properties
###############################################################################
@property
def field(self):
return GF2
@property
def degree(self):
if self._integer == 0:
return 0
else:
return len(bin(self._integer)[2:]) - 1
@property
def nonzero_degrees(self):
return self.degree - np.nonzero(self.coeffs)[0]
@property
def nonzero_coeffs(self):
return self.coeffs[np.nonzero(self.coeffs)[0]]
@property
def degrees(self):
return np.arange(self.degree, -1, -1)
@property
def coeffs(self):
if self._coeffs is None:
binstr = bin(self._integer)[2:]
self._coeffs = GF2([int(b) for b in binstr])
return self._coeffs.copy()
@property
def integer(self):
return self._integer
class SparsePoly(Poly):
"""
Implementation of sparse polynomials over Galois fields.
"""
__slots__ = ["_degrees", "_coeffs"]
def __new__(cls, degrees, coeffs=None, field=None): # pylint: disable=signature-differs
coeffs = [1,]*len(degrees) if coeffs is None else coeffs
if not isinstance(degrees, (list, tuple, np.ndarray)):
raise TypeError(f"Argument `degrees` must be array-like, not {type(degrees)}.")
if not isinstance(coeffs, (list, tuple, np.ndarray)):
raise TypeError(f"Argument `coeffs` must be array-like, not {type(coeffs)}.")
if not len(degrees) == len(coeffs):
raise ValueError(f"Arguments `degrees` and `coeffs` must have the same length, not {len(degrees)} and {len(coeffs)}.")
if not all(degree >= 0 for degree in degrees):
raise ValueError(f"Argument `degrees` must have non-negative values, not {degrees}.")
obj = object.__new__(cls)
if isinstance(coeffs, FieldArray) and field is None:
obj._degrees = np.array(degrees)
obj._coeffs = coeffs
else:
field = GF2 if field is None else field
if isinstance(coeffs, np.ndarray):
# Ensure coeffs is an iterable
coeffs = coeffs.tolist()
obj._degrees = np.array(degrees)
obj._coeffs = field([-field(abs(c)) if c < 0 else field(c) for c in coeffs])
# Sort the degrees and coefficients in descending order
idxs = np.argsort(degrees)[::-1]
obj._degrees = obj._degrees[idxs]
obj._coeffs = obj._coeffs[idxs]
# Remove zero coefficients
idxs = np.nonzero(obj._coeffs)[0]
obj._degrees = obj._degrees[idxs]
obj._coeffs = obj._coeffs[idxs]
return obj
###############################################################################
# Methods
###############################################################################
def copy(self):
return SparsePoly(self.degrees, self.coeffs)
def reverse(self):
return SparsePoly(self.degree - self.degrees, self.coeffs)
###############################################################################
# Arithmetic methods
###############################################################################
def __neg__(self):
return SparsePoly(self._degrees, -self._coeffs)
@classmethod
def _add(cls, a, b):
field = a.field
# c(x) = a(x) + b(x)
cc = dict(zip(a.nonzero_degrees, a.nonzero_coeffs))
for b_degree, b_coeff in zip(b.nonzero_degrees, b.nonzero_coeffs):
cc[b_degree] = cc.get(b_degree, field(0)) + b_coeff
return Poly.Degrees(list(cc.keys()), list(cc.values()), field=field)
@classmethod
def _sub(cls, a, b):
field = a.field
# c(x) = a(x) - b(x)
cc = dict(zip(a.nonzero_degrees, a.nonzero_coeffs))
for b_degree, b_coeff in zip(b.nonzero_degrees, b.nonzero_coeffs):
cc[b_degree] = cc.get(b_degree, field(0)) - b_coeff
return Poly.Degrees(list(cc.keys()), list(cc.values()), field=field)
@classmethod
def _mul(cls, a, b):
field = a.field
if isinstance(b, (int, np.integer)):
# Scalar multiplication (p * 3 = p + p + p)
return Poly.Degrees(a.nonzero_degrees, a.nonzero_coeffs * b)
else:
# c(x) = a(x) * b(x)
cc = {}
for a_degree, a_coeff in zip(a.nonzero_degrees, a.nonzero_coeffs):
for b_degree, b_coeff in zip(b.nonzero_degrees, b.nonzero_coeffs):
cc[a_degree + b_degree] = cc.get(a_degree + b_degree, field(0)) + a_coeff*b_coeff
return Poly.Degrees(list(cc.keys()), list(cc.values()), field=field)
@classmethod
def _divmod(cls, a, b):
field = a.field
zero = Poly.Zero(field)
# q(x)*b(x) + r(x) = a(x)
if b.degree == 0:
q_degrees = a.nonzero_degrees
q_coeffs = [a_coeff // b.coeffs[0] for a_coeff in a.nonzero_coeffs]
return Poly.Degrees(q_degrees, q_coeffs, field=field), zero
elif a == 0:
return zero, zero
elif a.degree < b.degree:
return zero, a.copy()
else:
aa = dict(zip(a.nonzero_degrees, a.nonzero_coeffs))
b_coeffs = b.coeffs
q_degree = a.degree - b.degree
r_degree = b.degree # One larger than final remainder
qq = {}
r_coeffs = field.Zeros(r_degree + 1)
# Preset remainder so we can rotate at the start of loop
for i in range(0, b.degree):
r_coeffs[1 + i] = aa.get(a.degree - i, 0)
for i in range(0, q_degree + 1):
r_coeffs = np.roll(r_coeffs, -1)
r_coeffs[-1] = aa.get(a.degree - (i + b.degree), 0)
if r_coeffs[0] > 0:
q = r_coeffs[0] // b_coeffs[0]
r_coeffs -= q*b_coeffs
qq[q_degree - i] = q
return Poly.Degrees(list(qq.keys()), list(qq.values()), field=field), Poly(r_coeffs[1:])
@classmethod
def _mod(cls, a, b):
field = a.field
zero = Poly.Zero(field)
# q(x)*b(x) + r(x) = a(x)
if b.degree == 0:
return zero
elif a == 0:
return zero
elif a.degree < b.degree:
return a.copy()
else:
aa = dict(zip(a.nonzero_degrees, a.nonzero_coeffs))
b_coeffs = b.coeffs
q_degree = a.degree - b.degree
r_degree = b.degree # One larger than final remainder
r_coeffs = field.Zeros(r_degree + 1)
# Preset remainder so we can rotate at the start of loop
for i in range(0, b.degree):
r_coeffs[1 + i] = aa.get(a.degree - i, 0)
for i in range(0, q_degree + 1):
r_coeffs = np.roll(r_coeffs, -1)
r_coeffs[-1] = aa.get(a.degree - (i + b.degree), 0)
if r_coeffs[0] > 0:
q = r_coeffs[0] // b_coeffs[0]
r_coeffs -= q*b_coeffs
return Poly(r_coeffs[1:])
###############################################################################
# Instance properties
###############################################################################
@property
def field(self):
return type(self._coeffs)
@property
def degree(self):
return 0 if self._degrees.size == 0 else int(np.max(self._degrees))
@property
def nonzero_degrees(self):
return self._degrees.copy()
@property
def nonzero_coeffs(self):
return self._coeffs.copy()
@property
def degrees(self):
return np.arange(self.degree, -1, -1)
@property
def coeffs(self):
# Assemble a full list of coefficients, including zeros
coeffs = self.field.Zeros(self.degree + 1)
if self.nonzero_degrees.size > 0:
coeffs[self.degree - self.nonzero_degrees] = self.nonzero_coeffs
return coeffs
# Define the GF(2) primitive polynomial here, not in _fields/_gf2.py, to avoid a circular dependency with `Poly`.
# The primitive polynomial is p(x) = x - alpha, where alpha = 1. Over GF(2), this is equivalent
# to p(x) = x + 1.
GF2._irreducible_poly = Poly([1, 1]) # pylint: disable=protected-access
| 36.845719 | 428 | 0.55853 | import inspect
import math
import random
from typing import Tuple, List, Sequence, Iterable, Optional, Union
from typing_extensions import Literal
import numba
import numpy as np
from .._factor import divisors
from .._overrides import set_module
from .._poly_conversion import integer_to_poly, poly_to_integer, str_to_integer, poly_to_str, sparse_poly_to_integer, sparse_poly_to_str, str_to_sparse_poly
from ._dtypes import DTYPES
from ._linalg import dot, row_reduce, lu_decompose, lup_decompose
from ._functions import FunctionMeta
from ._ufuncs import UfuncMeta
__all__ = ["FieldClass", "FieldArray", "GF2", "Poly"]
@set_module("galois")
class FieldClass(FunctionMeta, UfuncMeta):
def __new__(cls, name, bases, namespace, **kwargs): return super().__new__(cls, name, bases, namespace)
def __init__(cls, name, bases, namespace, **kwargs):
super().__init__(name, bases, namespace, **kwargs)
cls._characteristic = kwargs.get("characteristic", 0)
cls._degree = kwargs.get("degree", 0)
cls._order = kwargs.get("order", 0)
cls._order_str = None
cls._ufunc_mode = None
cls._ufunc_target = None
cls._dtypes = cls._determine_dtypes()
if "irreducible_poly" in kwargs:
cls._irreducible_poly = kwargs["irreducible_poly"]
cls._irreducible_poly_int = cls._irreducible_poly.integer
else:
cls._irreducible_poly = None
cls._irreducible_poly_int = 0
cls._primitive_element = kwargs.get("primitive_element", None)
cls._is_primitive_poly = kwargs.get("is_primitive_poly", None)
cls._prime_subfield = None
cls._display_mode = "int"
if cls.degree == 1:
cls._order_str = f"order={cls.order}"
else:
cls._order_str = f"order={cls.characteristic}^{cls.degree}"
def __str__(cls):
return f"<class 'numpy.ndarray over {cls.name}'>"
def __repr__(cls):
return str(cls)
def _determine_dtypes(cls):
dtypes = [dtype for dtype in DTYPES if np.iinfo(dtype).max >= cls.order - 1]
if len(dtypes) == 0:
dtypes = [np.object_]
return dtypes
def compile(cls, mode: str):
if not isinstance(mode, (type(None), str)):
raise TypeError(f"Argument `mode` must be a string, not {type(mode)}.")
mode = cls.default_ufunc_mode if mode == "auto" else mode
if mode not in cls.ufunc_modes:
raise ValueError(f"Argument `mode` must be in {cls.ufunc_modes} for {cls.name}, not {mode!r}.")
if mode == cls.ufunc_mode:
return
cls._ufunc_mode = mode
cls._compile_ufuncs()
def display(
cls,
mode: Literal["int", "poly", "power"] = "int"
) -> "DisplayContext":
if not isinstance(mode, (type(None), str)):
raise TypeError(f"Argument `mode` must be a string, not {type(mode)}.")
if mode not in ["int", "poly", "power"]:
raise ValueError(f"Argument `mode` must be in ['int', 'poly', 'power'], not {mode!r}.")
context = DisplayContext(cls)
cls._display_mode = mode # Set the new state
return context
def repr_table(
cls,
primitive_element: Optional[Union[int, str, np.ndarray, "FieldArray"]] = None,
sort: Literal["power", "poly", "vector", "int"] = "power"
) -> str:
if sort not in ["power", "poly", "vector", "int"]:
raise ValueError(f"Argument `sort` must be in ['power', 'poly', 'vector', 'int'], not {sort!r}.")
if primitive_element is None:
primitive_element = cls.primitive_element
degrees = np.arange(0, cls.order - 1)
x = primitive_element**degrees
if sort != "power":
idxs = np.argsort(x)
degrees, x = degrees[idxs], x[idxs]
x = np.concatenate((np.atleast_1d(cls(0)), x)) # Add 0 = alpha**-Inf
prim = poly_to_str(integer_to_poly(primitive_element, cls.characteristic))
# Define print helper functions
if len(prim) > 1:
print_power = lambda power: "0" if power is None else f"({prim})^{power}"
else:
print_power = lambda power: "0" if power is None else f"{prim}^{power}"
print_poly = lambda x: poly_to_str(integer_to_poly(x, cls.characteristic))
print_vec = lambda x: str(integer_to_poly(x, cls.characteristic, degree=cls.degree-1))
print_int = lambda x: str(int(x))
# Determine column widths
N_power = max([len(print_power(max(degrees))), len("Power")]) + 2
N_poly = max([len(print_poly(e)) for e in x] + [len("Polynomial")]) + 2
N_vec = max([len(print_vec(e)) for e in x] + [len("Vector")]) + 2
N_int = max([len(print_int(e)) for e in x] + [len("Integer")]) + 2
# Useful characters: https://www.utf8-chartable.de/unicode-utf8-table.pl?start=9472
string = "╔" + "═"*N_power + "╤" + "═"*N_poly + "╤" + "═"*N_vec + "╤" + "═"*N_int + "╗"
string += "\n║" + "Power".center(N_power) + "│" + "Polynomial".center(N_poly) + "│" + "Vector".center(N_vec) + "│" + "Integer".center(N_int) + "║"
string += "\n║" + "═"*N_power + "╪" + "═"*N_poly + "╪" + "═"*N_vec + "╪" + "═"*N_int + "║"
for i in range(x.size):
d = None if i == 0 else degrees[i - 1]
string += "\n║" + print_power(d).center(N_power) + "│" + poly_to_str(integer_to_poly(x[i], cls.characteristic)).center(N_poly) + "│" + str(integer_to_poly(x[i], cls.characteristic, degree=cls.degree-1)).center(N_vec) + "│" + cls._print_int(x[i]).center(N_int) + "║"
if i < x.size - 1:
string += "\n╟" + "─"*N_power + "┼" + "─"*N_poly + "┼" + "─"*N_vec + "┼" + "─"*N_int + "╢"
string += "\n╚" + "═"*N_power + "╧" + "═"*N_poly + "╧"+ "═"*N_vec + "╧" + "═"*N_int + "╝"
return string
def arithmetic_table(
cls,
operation: Literal["+", "-", "*", "/"],
x: Optional["FieldArray"] = None,
y: Optional["FieldArray"] = None
) -> str:
if not operation in ["+", "-", "*", "/"]:
raise ValueError(f"Argument `operation` must be in ['+', '-', '*', '/'], not {operation!r}.")
if cls.display_mode == "power":
# Order elements by powers of the primitive element
x_default = np.concatenate((np.atleast_1d(cls(0)), cls.primitive_element**np.arange(0, cls.order - 1, dtype=cls.dtypes[-1])))
else:
x_default = cls.Elements()
y_default = x_default if operation != "/" else x_default[1:]
x = x_default if x is None else cls(x)
y = y_default if y is None else cls(y)
X, Y = np.meshgrid(x, y, indexing="ij")
if operation == "+":
Z = X + Y
elif operation == "-":
Z = X - Y
elif operation == "*":
Z = X * Y
else:
Z = X / Y
if cls.display_mode == "int":
print_element = cls._print_int
elif cls.display_mode == "poly":
print_element = cls._print_poly
else:
cls._set_print_power_vars(x)
print_element = cls._print_power
operation_str = f"x {operation} y"
N = max([len(print_element(e)) for e in x]) + 2
N_left = max(N, len(operation_str) + 2)
# Useful characters: https://www.utf8-chartable.de/unicode-utf8-table.pl?start=9472
string = "╔" + "═"*N_left + "╦" + ("═"*N + "╤")*(y.size - 1) + "═"*N + "╗"
string += "\n║" + operation_str.rjust(N_left - 1) + " ║"
for j in range(y.size):
string += print_element(y[j]).center(N)
string += "│" if j < y.size - 1 else "║"
string += "\n╠" + "═"*N_left + "╬" + ("═"*N + "╪")*(y.size - 1) + "═"*N + "╣"
for i in range(x.size):
string += "\n║" + print_element(x[i]).rjust(N_left - 1) + " ║"
for j in range(y.size):
string += print_element(Z[i,j]).center(N)
string += "│" if j < y.size - 1 else "║"
if i < x.size - 1:
string += "\n╟" + "─"*N_left + "╫" + ("─"*N + "┼")*(y.size - 1) + "─"*N + "╢"
string += "\n╚" + "═"*N_left + "╩" + ("═"*N + "╧")*(y.size - 1) + "═"*N + "╝"
return string
###############################################################################
# Array display methods
###############################################################################
def _formatter(cls, array):
# pylint: disable=attribute-defined-outside-init
formatter = {}
if cls.display_mode == "poly":
formatter["int"] = cls._print_poly
formatter["object"] = cls._print_poly
elif cls.display_mode == "power":
cls._set_print_power_vars(array)
formatter["int"] = cls._print_power
formatter["object"] = cls._print_power
elif array.dtype == np.object_:
formatter["object"] = cls._print_int
return formatter
def _print_int(cls, element): # pylint: disable=no-self-use
return f"{int(element)}"
def _print_poly(cls, element):
poly = integer_to_poly(element, cls.characteristic)
poly_var = "α" if cls.primitive_element == cls.characteristic else "x"
return poly_to_str(poly, poly_var=poly_var)
def _set_print_power_vars(cls, array):
nonzero_idxs = np.nonzero(array)
if array.ndim > 1:
max_power = np.max(cls._ufunc("log")(array[nonzero_idxs], cls.primitive_element))
if max_power > 1:
cls._display_power_width = 2 + len(str(max_power))
else:
cls._display_power_width = 1
else:
cls._display_power_width = None
def _print_power(cls, element):
if element == 0:
s = "0"
else:
power = cls._ufunc("log")(element, cls.primitive_element)
if power > 1:
s = f"α^{power}"
elif power == 1:
s = "α"
else:
s = "1"
if cls._display_power_width:
return s.rjust(cls._display_power_width)
else:
return s
###############################################################################
# Class attributes
###############################################################################
@property
def name(cls) -> str:
if cls._degree == 1:
return f"GF({cls._characteristic})"
else:
return f"GF({cls._characteristic}^{cls._degree})"
@property
def characteristic(cls) -> int:
return cls._characteristic
@property
def degree(cls) -> int:
return cls._degree
@property
def order(cls) -> int:
return cls._order
@property
def irreducible_poly(cls) -> "Poly":
# Ensure accesses of this property don't alter it
return cls._irreducible_poly.copy()
@property
def is_primitive_poly(cls) -> bool:
return cls._is_primitive_poly
@property
def primitive_element(cls) -> "FieldArray":
return cls(cls._primitive_element) # pylint: disable=no-value-for-parameter
@property
def primitive_elements(cls) -> "FieldArray":
n = cls.order - 1
totatives = [t for t in range(1, n + 1) if math.gcd(n, t) == 1]
powers = np.array(totatives)
return np.sort(cls.primitive_element ** powers)
@property
def quadratic_residues(cls) -> "FieldArray":
x = cls.Elements()
is_quadratic_residue = x.is_quadratic_residue()
return x[is_quadratic_residue]
@property
def quadratic_non_residues(cls) -> "FieldArray":
x = cls.Elements()
is_quadratic_residue = x.is_quadratic_residue()
return x[~is_quadratic_residue]
@property
def is_prime_field(cls) -> bool:
return cls._degree == 1
@property
def is_extension_field(cls) -> bool:
return cls._degree > 1
@property
def prime_subfield(cls) -> "FieldClass":
return cls._prime_subfield
@property
def dtypes(cls) -> List[np.dtype]:
return cls._dtypes
@property
def display_mode(cls) -> str:
return cls._display_mode
@property
def ufunc_mode(cls) -> str:
return cls._ufunc_mode
@property
def ufunc_modes(cls) -> List[str]:
if cls.dtypes == [np.object_]:
return ["python-calculate"]
else:
return ["jit-lookup", "jit-calculate"]
@property
def default_ufunc_mode(cls) -> str:
if cls.dtypes == [np.object_]:
return "python-calculate"
elif cls.order <= 2**20:
return "jit-lookup"
else:
return "jit-calculate"
@property
def properties(cls) -> str:
string = f"{cls.name}:"
string += f"\n characteristic: {cls.characteristic}"
string += f"\n degree: {cls.degree}"
string += f"\n order: {cls.order}"
string += f"\n irreducible_poly: {cls.irreducible_poly.string}"
string += f"\n is_primitive_poly: {cls.is_primitive_poly}"
string += f"\n primitive_element: {poly_to_str(integer_to_poly(cls.primitive_element, cls.characteristic))}"
return string
class DirMeta(type):
def __dir__(cls):
if isinstance(cls, FieldClass):
meta_dir = dir(type(cls))
classmethods = [attribute for attribute in super().__dir__() if attribute[0] != "_" and inspect.ismethod(getattr(cls, attribute))]
return sorted(meta_dir + classmethods)
else:
return super().__dir__()
class DisplayContext:
def __init__(self, cls):
# Save the previous state
self.cls = cls
self.mode = cls.display_mode
def __enter__(self):
# Don't need to do anything, we already set the new mode in the display() method
pass
def __exit__(self, exc_type, exc_value, traceback):
self.cls._display_mode = self.mode
@set_module("galois")
class FieldArray(np.ndarray, metaclass=FieldClass):
def __new__(
cls,
array: Union[int, str, Iterable, np.ndarray, "FieldArray"],
dtype: Optional[Union[np.dtype, int, object]] = None,
copy: bool = True,
order: Literal["K", "A", "C", "F"] = "K",
ndmin: int = 0
) -> "FieldArray":
if cls is FieldArray:
raise NotImplementedError("FieldArray is an abstract base class that cannot be directly instantiated. Instead, create a FieldArray subclass for GF(p^m) arithmetic using `GF = galois.GF(p**m)` and instantiate an array using `x = GF(array_like)`.")
return cls._array(array, dtype=dtype, copy=copy, order=order, ndmin=ndmin)
def __init__(
self,
array: Union[int, str, Iterable, np.ndarray, "FieldArray"],
dtype: Optional[Union[np.dtype, int, object]] = None,
copy: bool = True,
order: Literal["K", "A", "C", "F"] = "K",
ndmin: int = 0
):
return
@classmethod
def _get_dtype(cls, dtype):
if dtype is None:
return cls.dtypes[0]
# Convert "dtype" to a numpy dtype. This does platform specific conversion, if necessary.
# For example, np.dtype(int) == np.int64 (on some systems).
dtype = np.dtype(dtype)
if dtype not in cls.dtypes:
raise TypeError(f"{cls.name} arrays only support dtypes {[np.dtype(d).name for d in cls.dtypes]}, not {dtype.name!r}.")
return dtype
@classmethod
def _array(cls, array_like, dtype=None, copy=True, order="K", ndmin=0):
dtype = cls._get_dtype(dtype)
array_like = cls._check_array_like_object(array_like)
array = np.array(array_like, dtype=dtype, copy=copy, order=order, ndmin=ndmin)
return array.view(cls)
@classmethod
def _check_array_like_object(cls, array_like):
if isinstance(array_like, cls):
# If this was a previously-created and vetted array, there's no need to reverify
return array_like
if isinstance(array_like, str):
array_like = cls._check_string_value(array_like)
cls._check_array_values(array_like)
elif isinstance(array_like, (int, np.integer)):
# Just check that the single int is in range
cls._check_array_values(array_like)
elif isinstance(array_like, (list, tuple)):
# Recursively check the items in the iterable to ensure they're of the correct type
array_like = cls._check_iterable_types_and_values(array_like)
elif isinstance(array_like, np.ndarray):
if array_like.dtype == np.object_:
array_like = cls._check_array_types_dtype_object(array_like)
elif not np.issubdtype(array_like.dtype, np.integer):
raise TypeError(f"{cls.name} arrays must have integer dtypes, not {array_like.dtype}.")
cls._check_array_values(array_like)
else:
raise TypeError(f"{cls.name} arrays can be created with scalars of type int, not {type(array_like)}.")
return array_like
@classmethod
def _check_iterable_types_and_values(cls, iterable):
new_iterable = []
for item in iterable:
if isinstance(item, (list, tuple)):
item = cls._check_iterable_types_and_values(item)
new_iterable.append(item)
continue
if isinstance(item, str):
item = cls._check_string_value(item)
elif not isinstance(item, (int, np.integer, FieldArray)):
raise TypeError(f"When {cls.name} arrays are created/assigned with an iterable, each element must be an integer. Found type {type(item)}.")
cls._check_array_values(item)
new_iterable.append(int(item))
return new_iterable
@classmethod
def _check_array_types_dtype_object(cls, array):
if array.size == 0:
return array
if array.ndim == 0:
if not isinstance(array[()], (int, np.integer, FieldArray)):
raise TypeError(f"When {cls.name} arrays are created/assigned with a numpy array with `dtype=object`, each element must be an integer. Found type {type(array[()])}.")
return int(array)
iterator = np.nditer(array, flags=["multi_index", "refs_ok"])
for _ in iterator:
a = array[iterator.multi_index]
if not isinstance(a, (int, np.integer, FieldArray)):
raise TypeError(f"When {cls.name} arrays are created/assigned with a numpy array with `dtype=object`, each element must be an integer. Found type {type(a)}.")
# Ensure the type is int so dtype=object classes don't get all mixed up
array[iterator.multi_index] = int(a)
return array
@classmethod
def _check_array_values(cls, array):
if not isinstance(array, np.ndarray):
array = np.array(array)
# Check the value of the "field elements" and make sure they are valid
if np.any(array < 0) or np.any(array >= cls.order):
idxs = np.logical_or(array < 0, array >= cls.order)
values = array if array.ndim == 0 else array[idxs]
raise ValueError(f"{cls.name} arrays must have elements in `0 <= x < {cls.order}`, not {values}.")
@classmethod
def _check_string_value(cls, string):
return str_to_integer(string, cls.prime_subfield)
###############################################################################
# Alternate constructors
###############################################################################
@classmethod
def Zeros(
cls,
shape: Union[int, Sequence[int]],
dtype: Optional[Union[np.dtype, int, object]] = None
) -> "FieldArray":
dtype = cls._get_dtype(dtype)
array = np.zeros(shape, dtype=dtype)
return array.view(cls)
@classmethod
def Ones(
cls,
shape: Union[int, Sequence[int]],
dtype: Optional[Union[np.dtype, int, object]] = None
) -> "FieldArray":
dtype = cls._get_dtype(dtype)
array = np.ones(shape, dtype=dtype)
return array.view(cls)
@classmethod
def Range(
cls,
start: int,
stop: int,
step: Optional[int] = 1,
dtype: Optional[Union[np.dtype, int, object]] = None
) -> "FieldArray":
if not stop <= cls.order:
raise ValueError(f"The stopping value must be less than the field order of {cls.order}, not {stop}.")
dtype = cls._get_dtype(dtype)
array = np.arange(start, stop, step=step, dtype=dtype)
return array.view(cls)
@classmethod
def Random(
cls,
shape: Union[int, Sequence[int]] = (),
low: Optional[int] = 0,
high: Optional[int] = None,
seed: Optional[Union[int, np.random.Generator]] = None,
dtype: Optional[Union[np.dtype, int, object]] = None
) -> "FieldArray":
dtype = cls._get_dtype(dtype)
high = cls.order if high is None else high
if not 0 <= low < high <= cls.order:
raise ValueError(f"Arguments must satisfy `0 <= low < high <= order`, not `0 <= {low} < {high} <= {cls.order}`.")
if seed is not None:
if not isinstance(seed, (int, np.integer, np.random.Generator)):
raise ValueError("Seed must be an integer, a numpy.random.Generator or None.")
if isinstance(seed, (int, np.integer)) and seed < 0:
raise ValueError("Seed must be non-negative.")
if dtype != np.object_:
rng = np.random.default_rng(seed)
array = rng.integers(low, high, shape, dtype=dtype)
else:
array = np.empty(shape, dtype=dtype)
iterator = np.nditer(array, flags=["multi_index", "refs_ok"])
_seed = None
if seed is not None:
if isinstance(seed, np.integer):
# np.integers not supported by random and seeding based on hashing deprecated since Python 3.9
_seed = seed.item()
elif isinstance(seed, np.random.Generator):
_seed = seed.bit_generator.state['state']['state']
seed.bit_generator.advance(1)
else: # int
_seed = seed
random.seed(_seed)
for _ in iterator:
array[iterator.multi_index] = random.randint(low, high - 1)
return array.view(cls)
@classmethod
def Elements(
cls,
dtype: Optional[Union[np.dtype, int, object]] = None
) -> "FieldArray":
return cls.Range(0, cls.order, step=1, dtype=dtype)
@classmethod
def Identity(
cls,
size: int,
dtype: Optional[Union[np.dtype, int, object]] = None
) -> "FieldArray":
dtype = cls._get_dtype(dtype)
array = np.identity(size, dtype=dtype)
return array.view(cls)
@classmethod
def Vandermonde(
cls,
a: Union[int, "FieldArray"],
m: int,
n: int,
dtype: Optional[Union[np.dtype, int, object]] = None
) -> "FieldArray":
if not isinstance(a, (int, np.integer, cls)):
raise TypeError(f"Argument `a` must be an integer or element of {cls.name}, not {type(a)}.")
if not isinstance(m, (int, np.integer)):
raise TypeError(f"Argument `m` must be an integer, not {type(m)}.")
if not isinstance(n, (int, np.integer)):
raise TypeError(f"Argument `n` must be an integer, not {type(n)}.")
if not m > 0:
raise ValueError(f"Argument `m` must be non-negative, not {m}.")
if not n > 0:
raise ValueError(f"Argument `n` must be non-negative, not {n}.")
dtype = cls._get_dtype(dtype)
a = cls(a, dtype=dtype)
if not a.ndim == 0:
raise ValueError(f"Argument `a` must be a scalar, not {a.ndim}-D.")
v = a ** np.arange(0, m)
V = np.power.outer(v, np.arange(0, n))
return V
@classmethod
def Vector(
cls,
array: Union[Iterable, np.ndarray, "FieldArray"],
dtype: Optional[Union[np.dtype, int, object]] = None
) -> "FieldArray":
order = cls.prime_subfield.order
degree = cls.degree
array = cls.prime_subfield(array).view(np.ndarray).astype(cls.dtypes[-1]) # Use the largest dtype so computation doesn't overflow
if not array.shape[-1] == degree:
raise ValueError(f"The last dimension of `array` must be the field extension dimension {cls.degree}, not {array.shape[-1]}.")
degrees = np.arange(degree - 1, -1, -1, dtype=cls.dtypes[-1])
array = np.sum(array * order**degrees, axis=-1)
return cls(array, dtype=dtype)
def additive_order(self) -> Union[np.integer, np.ndarray]:
x = self
field = type(self)
if x.ndim == 0:
order = np.int64(1) if x == 0 else np.int64(field.characteristic)
else:
order = field.characteristic * np.ones(x.shape, dtype=np.int64)
order[np.where(x == 0)] = 1
return order
def multiplicative_order(self) -> Union[np.integer, np.ndarray]:
if not np.count_nonzero(self) == self.size:
raise ArithmeticError("The multiplicative order of 0 is not defined.")
x = self
field = type(self)
if field.ufunc_mode == "jit-lookup":
k = np.log(x) order = (field.order - 1) // np.gcd(field.order - 1, k)
else:
d = np.array(divisors(field.order - 1)) y = np.power.outer(x, d) idxs = np.argmin(np.abs(y.view(np.ndarray) - 1), axis=-1) order = d[idxs]
return order
def is_quadratic_residue(self) -> Union[np.bool_, np.ndarray]:
x = self
field = type(self)
if field.characteristic == 2:
return np.ones(x.shape, dtype=bool) if x.ndim > 0 else np.bool_(True)
else:
# Compute the Legendre symbol on each element
return x ** ((field.order - 1)//2) != field.characteristic - 1
def vector(
self,
dtype: Optional[Union[np.dtype, int, object]] = None
) -> "FieldArray":
order = type(self).prime_subfield.order
degree = type(self).degree
array = self.view(np.ndarray)
array = np.repeat(array, degree).reshape(*array.shape, degree)
x = 0
for i in range(degree):
q = (array[...,i] - x) // order**(degree - 1 - i)
array[...,i] = q
x += q*order**(degree - 1 - i)
return type(self).prime_subfield(array, dtype=dtype) # pylint: disable=unexpected-keyword-arg
def row_reduce(
self,
ncols: Optional[int] = None
) -> "FieldArray":
return row_reduce(self, ncols=ncols)
def lu_decompose(self) -> "FieldArray":
return lu_decompose(self)
def lup_decompose(self) -> "FieldArray":
return lup_decompose(self)
def field_trace(self) -> "FieldArray":
if not type(self).is_extension_field:
raise TypeError(f"The Galois field must be an extension field to compute the field trace, not {type(self)}.")
field = type(self)
subfield = field.prime_subfield
p = field.characteristic
m = field.degree
conjugates = np.power.outer(self, p**np.arange(0, m, dtype=field.dtypes[-1]))
trace = np.add.reduce(conjugates, axis=-1)
return subfield(trace)
def field_norm(self) -> "FieldArray":
if not type(self).is_extension_field:
raise TypeError(f"The Galois field must be an extension field to compute the field norm, not {type(self)}.")
field = type(self)
subfield = field.prime_subfield
p = field.characteristic
m = field.degree
norm = self**((p**m - 1) // (p - 1))
return subfield(norm)
def characteristic_poly(self) -> "Poly":
if self.ndim == 0:
return self._characteristic_poly_element()
elif self.ndim == 2:
return self._characteristic_poly_matrix()
else:
raise ValueError(f"The array must be either 0-D to return the characteristic polynomial of a single element or 2-D to return the characteristic polynomial of a square matrix, not have shape {self.shape}.")
def _characteristic_poly_element(self):
field = type(self)
a = self
x = Poly.Identity(field)
if field.is_prime_field:
return x - a
else:
powers = a**(field.characteristic**np.arange(0, field.degree, dtype=field.dtypes[-1]))
poly = Poly.Roots(powers, field=field)
poly = Poly(poly.coeffs, field=field.prime_subfield)
return poly
def _characteristic_poly_matrix(self):
if not self.shape[0] == self.shape[1]:
raise ValueError(f"The 2-D array must be square to compute its characteristic polynomial, not have shape {self.shape}.")
field = type(self)
A = self
# Compute P = xI - A
P = np.zeros(self.shape, dtype=object)
for i in range(self.shape[0]):
for j in range(self.shape[0]):
if i == j:
P[i,j] = Poly([1, -A[i,j]], field=field)
else:
P[i,j] = Poly([-A[i,j]], field=field)
# Compute det(P)
return self._compute_poly_det(P)
def _compute_poly_det(self, A):
if A.shape == (2,2):
return A[0,0]*A[1,1] - A[0,1]*A[1,0]
field = type(self)
n = A.shape[0] # Size of the nxn matrix
det = Poly.Zero(field)
for i in range(n):
idxs = np.delete(np.arange(0, n), i)
if i % 2 == 0:
det += A[0,i] * self._compute_poly_det(A[1:,idxs])
else:
det -= A[0,i] * self._compute_poly_det(A[1:,idxs])
return det
def minimal_poly(self) -> "Poly":
if self.ndim == 0:
return self._minimal_poly_element()
# elif self.ndim == 2:
# return self._minimal_poly_matrix()
else:
raise ValueError(f"The array must be either 0-D to return the minimal polynomial of a single element or 2-D to return the minimal polynomial of a square matrix, not have shape {self.shape}.")
def _minimal_poly_element(self):
field = type(self)
a = self
x = Poly.Identity(field)
if field.is_prime_field:
return x - a
else:
conjugates = np.unique(a**(field.characteristic**np.arange(0, field.degree, dtype=field.dtypes[-1])))
poly = Poly.Roots(conjugates, field=field)
poly = Poly(poly.coeffs, field=field.prime_subfield)
return poly
###############################################################################
# Special methods (redefined to add docstrings)
###############################################################################
def __add__(self, other): # pylint: disable=useless-super-delegation
return super().__add__(other)
def __sub__(self, other): # pylint: disable=useless-super-delegation
return super().__sub__(other)
def __mul__(self, other): # pylint: disable=useless-super-delegation
return super().__mul__(other)
def __truediv__(self, other): # pylint: disable=useless-super-delegation
return super().__truediv__(other)
def __floordiv__(self, other): # pylint: disable=useless-super-delegation
return super().__floordiv__(other) # pylint: disable=too-many-function-args
def __divmod__(self, other): # pylint: disable=useless-super-delegation
return super().__divmod__(other)
def __mod__(self, other): # pylint: disable=useless-super-delegation
return super().__mod__(other)
def __pow__(self, other):
# NOTE: Calling power here instead of `super().__pow__(other)` because when doing so `x ** GF(2)` will invoke `np.square(x)` and not throw
# an error. This way `np.power(x, GF(2))` is called which correctly checks whether the second argument is an integer.
return np.power(self, other)
###############################################################################
# Overridden numpy methods
###############################################################################
def __array_finalize__(self, obj):
if obj is not None and not isinstance(obj, FieldArray):
# Only invoked on view casting
if obj.dtype not in type(self).dtypes:
raise TypeError(f"{type(self).name} can only have integer dtypes {type(self).dtypes}, not {obj.dtype}.")
self._check_array_values(obj)
def __getitem__(self, key):
item = super().__getitem__(key)
if np.isscalar(item):
# Return scalar array elements as 0-dimensional Galois field arrays. This enables Galois field arithmetic
# on scalars, which would otherwise be implemented using standard integer arithmetic.
item = self.__class__(item, dtype=self.dtype)
return item
def __setitem__(self, key, value):
# Verify the values to be written to the Galois field array are in the field
value = self._check_array_like_object(value)
super().__setitem__(key, value)
def __array_function__(self, func, types, args, kwargs):
if func in type(self)._OVERRIDDEN_FUNCTIONS:
output = getattr(type(self), type(self)._OVERRIDDEN_FUNCTIONS[func])(*args, **kwargs)
elif func in type(self)._OVERRIDDEN_LINALG_FUNCTIONS:
output = type(self)._OVERRIDDEN_LINALG_FUNCTIONS[func](*args, **kwargs)
elif func in type(self)._UNSUPPORTED_FUNCTIONS:
raise NotImplementedError(f"The numpy function {func.__name__!r} is not supported on Galois field arrays. If you believe this function should be supported, please submit a GitHub issue at https://github.com/mhostetter/galois/issues.\n\nIf you'd like to perform this operation on the data (but not necessarily a Galois field array), you should first call `array = array.view(np.ndarray)` and then call the function.")
else:
if func is np.insert:
args = list(args)
args[2] = self._check_array_like_object(args[2])
args = tuple(args)
output = super().__array_function__(func, types, args, kwargs)
if func in type(self)._FUNCTIONS_REQUIRING_VIEW:
output = output.view(type(self)) if not np.isscalar(output) else type(self)(output, dtype=self.dtype)
return output
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
meta = {}
meta["types"] = [type(inputs[i]) for i in range(len(inputs))]
meta["operands"] = list(range(len(inputs)))
if method in ["at", "reduceat"]:
meta["operands"].pop(1)
meta["field_operands"] = [i for i in meta["operands"] if isinstance(inputs[i], self.__class__)]
meta["non_field_operands"] = [i for i in meta["operands"] if not isinstance(inputs[i], self.__class__)]
meta["field"] = self.__class__
meta["dtype"] = self.dtype
if ufunc in type(self)._OVERRIDDEN_UFUNCS:
if method not in ["reduce", "accumulate", "at", "reduceat"]:
kwargs["casting"] = "unsafe"
if method in ["reduce"]:
kwargs["dtype"] = type(self).dtypes[-1]
return getattr(type(self), type(self)._OVERRIDDEN_UFUNCS[ufunc])(ufunc, method, inputs, kwargs, meta)
elif ufunc in type(self)._UNSUPPORTED_UFUNCS:
raise NotImplementedError(f"The numpy ufunc {ufunc.__name__!r} is not supported on {type(self).name} arrays. If you believe this ufunc should be supported, please submit a GitHub issue at https://github.com/mhostetter/galois/issues.")
else:
if ufunc in [np.bitwise_and, np.bitwise_or, np.bitwise_xor] and method not in ["reduce", "accumulate", "at", "reduceat"]:
kwargs["casting"] = "unsafe"
inputs, kwargs = type(self)._view_inputs_as_ndarray(inputs, kwargs)
output = super().__array_ufunc__(ufunc, method, *inputs, **kwargs)
if ufunc in type(self)._UFUNCS_REQUIRING_VIEW and output is not None:
output = output.view(type(self)) if not np.isscalar(output) else type(self)(output, dtype=self.dtype)
return output
def astype(self, dtype, **kwargs): if dtype not in type(self).dtypes:
raise TypeError(f"{type(self).name} arrays can only be cast as integer dtypes in {type(self).dtypes}, not {dtype}.")
return super().astype(dtype, **kwargs)
def dot(self, b, out=None):
return dot(self, b, out=out)
def __str__(self):
return self.__repr__()
def __repr__(self):
formatter = type(self)._formatter(self)
cls = type(self)
class_name = cls.__name__
with np.printoptions(formatter=formatter):
cls.__name__ = "GF" string = super().__repr__()
cls.__name__ = class_name
# Remove the dtype from the repr and add the Galois field order
dtype_idx = string.find("dtype")
if dtype_idx == -1:
string = string[:-1] + f", {cls._order_str})"
else:
string = string[:dtype_idx] + f"{cls._order_str})"
return string
###############################################################################
# Special GF2 FieldArray subclass
###############################################################################
class GF2Meta(FieldClass, DirMeta):
# pylint: disable=no-value-for-parameter
# Need to have a unique cache of "calculate" functions for GF(2)
_FUNC_CACHE_CALCULATE = {}
def __init__(cls, name, bases, namespace, **kwargs):
super().__init__(name, bases, namespace, **kwargs)
cls._prime_subfield = cls
cls._is_primitive_poly = True
cls.compile(kwargs["compile"])
@property
def ufunc_modes(cls):
return ["jit-calculate"]
@property
def default_ufunc_mode(cls):
return "jit-calculate"
def _compile_ufuncs(cls):
super()._compile_ufuncs()
assert cls.ufunc_mode == "jit-calculate"
cls._ufuncs["add"] = np.bitwise_xor
cls._ufuncs["negative"] = np.positive
cls._ufuncs["subtract"] = np.bitwise_xor
cls._ufuncs["multiply"] = np.bitwise_and
cls._ufuncs["reciprocal"] = np.positive
cls._ufuncs["divide"] = np.bitwise_and
###############################################################################
# Override ufunc routines to use native numpy bitwise ufuncs for GF(2)
# arithmetic, which is faster than custom ufuncs
###############################################################################
def _ufunc_routine_reciprocal(cls, ufunc, method, inputs, kwargs, meta): # pylint: disable=unused-argument
cls._verify_unary_method_not_reduction(ufunc, method)
if np.count_nonzero(inputs[0]) != inputs[0].size:
raise ZeroDivisionError("Cannot compute the multiplicative inverse of 0 in a Galois field.")
output = getattr(cls._ufunc("reciprocal"), method)(*inputs, **kwargs)
return output
def _ufunc_routine_divide(cls, ufunc, method, inputs, kwargs, meta):
cls._verify_operands_in_same_field(ufunc, inputs, meta)
if np.count_nonzero(inputs[meta["operands"][-1]]) != inputs[meta["operands"][-1]].size:
raise ZeroDivisionError("Cannot compute the multiplicative inverse of 0 in a Galois field.")
output = getattr(cls._ufunc("divide"), method)(*inputs, **kwargs)
output = cls._view_output_as_field(output, meta["field"], meta["dtype"])
return output
def _ufunc_routine_square(cls, ufunc, method, inputs, kwargs, meta): # pylint: disable=unused-argument
cls._verify_unary_method_not_reduction(ufunc, method)
return inputs[0]
###############################################################################
# Arithmetic functions using explicit calculation
###############################################################################
@staticmethod
def _add_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
return a ^ b
@staticmethod
def _negative_calculate(a, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
return a
@staticmethod
def _subtract_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
return a ^ b
@staticmethod
def _multiply_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
return a & b
@staticmethod
def _reciprocal_calculate(a, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
if a == 0:
raise ZeroDivisionError("Cannot compute the multiplicative inverse of 0 in a Galois field.")
return 1
@staticmethod
def _divide_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
if b == 0:
raise ZeroDivisionError("Cannot compute the multiplicative inverse of 0 in a Galois field.")
return a & b
@staticmethod
@numba.extending.register_jitable
def _power_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
if a == 0 and b < 0:
raise ZeroDivisionError("Cannot compute the multiplicative inverse of 0 in a Galois field.")
if b == 0:
return 1
else:
return a
@staticmethod
@numba.extending.register_jitable
def _log_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
if a == 0:
raise ArithmeticError("Cannot compute the discrete logarithm of 0 in a Galois field.")
if b != 1:
raise ArithmeticError("In GF(2), 1 is the only multiplicative generator.")
return 0
###############################################################################
# Ufuncs written in NumPy operations (not JIT compiled)
###############################################################################
@staticmethod
def _sqrt(a):
return a.copy()
@set_module("galois")
class GF2(FieldArray, metaclass=GF2Meta, characteristic=2, degree=1, order=2, primitive_element=1, compile="jit-calculate"):
###############################################################################
# Polynomials over Galois fields
###############################################################################
# Values were obtained by running scripts/sparse_poly_performance_test.py
SPARSE_VS_BINARY_POLY_FACTOR = 0.00_05
SPARSE_VS_BINARY_POLY_MIN_COEFFS = int(1 / SPARSE_VS_BINARY_POLY_FACTOR)
SPARSE_VS_DENSE_POLY_FACTOR = 0.00_5
SPARSE_VS_DENSE_POLY_MIN_COEFFS = int(1 / SPARSE_VS_DENSE_POLY_FACTOR)
@set_module("galois")
class Poly:
# pylint: disable=too-many-public-methods
# Increase my array priority so numpy will call my __radd__ instead of its own __add__
__array_priority__ = 100
def __new__(
cls,
coeffs: Union[Tuple[int], List[int], np.ndarray, FieldArray],
field: Optional[FieldClass] = None,
order: Literal["desc", "asc"] = "desc"
) -> "Poly":
if not isinstance(coeffs, (list, tuple, np.ndarray, FieldArray)):
raise TypeError(f"Argument `coeffs` must array-like, not {type(coeffs)}.")
if not isinstance(field, (type(None), FieldClass)):
raise TypeError(f"Argument `field` must be a Galois field array class, not {field}.")
if not isinstance(order, str):
raise TypeError(f"Argument `order` must be a str, not {type(order)}.")
if isinstance(coeffs, (FieldArray, np.ndarray)) and not coeffs.ndim <= 1:
raise ValueError(f"Argument `coeffs` can have dimension at most 1, not {coeffs.ndim}.")
if not order in ["desc", "asc"]:
raise ValueError(f"Argument `order` must be either 'desc' or 'asc', not {order!r}.")
if isinstance(coeffs, (FieldArray, np.ndarray)):
coeffs = np.atleast_1d(coeffs)
if order == "asc":
coeffs = coeffs[::-1] # Ensure it's in descending-degree order
coeffs, field = cls._convert_coeffs(coeffs, field)
if field is GF2:
if len(coeffs) >= SPARSE_VS_BINARY_POLY_MIN_COEFFS and np.count_nonzero(coeffs) <= SPARSE_VS_BINARY_POLY_FACTOR*len(coeffs):
degrees = np.arange(coeffs.size - 1, -1, -1)
return SparsePoly(degrees, coeffs, field=field)
else:
integer = poly_to_integer(coeffs, 2)
return BinaryPoly(integer)
else:
if len(coeffs) >= SPARSE_VS_DENSE_POLY_MIN_COEFFS and np.count_nonzero(coeffs) <= SPARSE_VS_DENSE_POLY_FACTOR*len(coeffs):
degrees = np.arange(coeffs.size - 1, -1, -1)
return SparsePoly(degrees, coeffs, field=field)
else:
return DensePoly(coeffs, field=field)
@classmethod
def _convert_coeffs(cls, coeffs, field):
if isinstance(coeffs, FieldArray) and field is None:
field = type(coeffs)
else:
field = GF2 if field is None else field
coeffs = np.array(coeffs, dtype=field.dtypes[-1])
idxs = coeffs < 0
coeffs = field(np.abs(coeffs))
coeffs[idxs] *= -1
return coeffs, field
@classmethod
def Zero(cls, field: Optional[FieldClass] = GF2) -> "Poly":
return Poly([0], field=field)
@classmethod
def One(cls, field: Optional[FieldClass] = GF2) -> "Poly":
return Poly([1], field=field)
@classmethod
def Identity(cls, field: Optional[FieldClass] = GF2) -> "Poly":
return Poly([1, 0], field=field)
@classmethod
def Random(
cls,
degree: int,
seed: Optional[Union[int, np.random.Generator]] = None,
field: Optional[FieldClass] = GF2
) -> "Poly":
if not isinstance(degree, (int, np.integer)):
raise TypeError(f"Argument `degree` must be an integer, not {type(degree)}.")
if seed is not None:
if not isinstance(seed, (int, np.integer, np.random.Generator)):
raise ValueError("Seed must be an integer, a numpy.random.Generator or None.")
if isinstance(seed, (int, np.integer)) and seed < 0:
raise ValueError("Seed must be non-negative.")
if not isinstance(field, FieldClass):
raise TypeError(f"Argument `field` must be a Galois field class, not {type(field)}.")
if not degree >= 0:
raise ValueError(f"Argument `degree` must be non-negative, not {degree}.")
rng = np.random.default_rng(seed) coeffs = field.Random(degree + 1, seed=rng)
if coeffs[0] == 0:
coeffs[0] = field.Random(low=1, seed=rng)
return Poly(coeffs, field=field)
@classmethod
def Integer(cls, integer: int, field: Optional[FieldClass] = GF2) -> "Poly":
if not isinstance(integer, (int, np.integer)):
raise TypeError(f"Argument `integer` be an integer, not {type(integer)}")
if not isinstance(field, FieldClass):
raise TypeError(f"Argument `field` must be a Galois field class, not {type(field)}.")
if not integer >= 0:
raise ValueError(f"Argument `integer` must be non-negative, not {integer}.")
if field is GF2:
return BinaryPoly(integer)
else:
coeffs = integer_to_poly(integer, field.order)
return Poly(coeffs, field=field)
@classmethod
def String(cls, string: str, field: Optional[FieldClass] = GF2) -> "Poly":
if not isinstance(string, str):
raise TypeError(f"Argument `string` be an string, not {type(string)}")
return Poly.Degrees(*str_to_sparse_poly(string), field=field)
@classmethod
def Degrees(
cls,
degrees: Union[Tuple[int], List[int], np.ndarray],
coeffs: Optional[Union[Tuple[int], List[int], np.ndarray, FieldArray]] = None,
field: Optional[FieldClass] = None
) -> "Poly":
if not isinstance(degrees, (list, tuple, np.ndarray)):
raise TypeError(f"Argument `degrees` must array-like, not {type(degrees)}.")
if not isinstance(coeffs, (type(None), list, tuple, np.ndarray, FieldArray)):
raise TypeError(f"Argument `coeffs` must array-like, not {type(coeffs)}.")
if not isinstance(field, (type(None), FieldClass)):
raise TypeError(f"Argument `field` must be a Galois field array class, not {type(field)}.")
degrees = np.array(degrees, dtype=np.int64)
coeffs = [1,]*len(degrees) if coeffs is None else coeffs
coeffs, field = cls._convert_coeffs(coeffs, field)
if not degrees.ndim <= 1:
raise ValueError(f"Argument `degrees` can have dimension at most 1, not {degrees.ndim}.")
if not degrees.size == np.unique(degrees).size:
raise ValueError(f"Argument `degrees` must have unique entries, not {degrees}.")
if not np.all(degrees >= 0):
raise ValueError(f"Argument `degrees` must have non-negative values, not {degrees}.")
if not coeffs.ndim <= 1:
raise ValueError(f"Argument `coeffs` can have dimension at most 1, not {coeffs.ndim}.")
if not degrees.size == coeffs.size:
raise ValueError(f"Arguments `degrees` and `coeffs` must have the same length, not {degrees.size} and {coeffs.size}.")
if len(degrees) == 0:
degrees, coeffs = np.array([0]), field([0])
if field is GF2:
if len(degrees) < SPARSE_VS_BINARY_POLY_FACTOR*max(degrees):
# Explicitly create a sparse poly over GF(2)
return SparsePoly(degrees, coeffs=coeffs, field=field)
else:
integer = sparse_poly_to_integer(degrees, coeffs, 2)
return BinaryPoly(integer)
else:
if len(degrees) < SPARSE_VS_DENSE_POLY_FACTOR*max(degrees):
# Explicitly create a sparse poly over GF(p^m)
return SparsePoly(degrees, coeffs=coeffs, field=field)
else:
degree = max(degrees) # The degree of the polynomial
all_coeffs = type(coeffs).Zeros(degree + 1)
all_coeffs[degree - degrees] = coeffs
return DensePoly(all_coeffs)
@classmethod
def Roots(
cls,
roots: Union[Tuple[int], List[int], np.ndarray, FieldArray],
multiplicities: Optional[Union[Tuple[int], List[int], np.ndarray]] = None,
field: Optional[FieldClass] = None
) -> "Poly":
multiplicities = [1,]*len(roots) if multiplicities is None else multiplicities
if not isinstance(roots, (tuple, list, np.ndarray, FieldArray)):
raise TypeError(f"Argument `roots` must be array-like, not {type(roots)}.")
if not isinstance(multiplicities, (tuple, list, np.ndarray)):
raise TypeError(f"Argument `multiplicities` must be array-like, not {type(multiplicities)}.")
if not isinstance(field, (type(None), FieldClass)):
raise TypeError(f"Argument `field` must be a Galois field array class, not {field}.")
roots, field = cls._convert_coeffs(roots, field)
roots = field(roots).flatten()
if not len(roots) == len(multiplicities):
raise ValueError(f"Arguments `roots` and `multiplicities` must have the same length, not {len(roots)} and {len(multiplicities)}.")
poly = Poly.One(field=field)
x = Poly.Identity(field=field)
for root, multiplicity in zip(roots, multiplicities):
poly *= (x - root)**multiplicity
return poly
###############################################################################
# Methods
###############################################################################
def coefficients(
self,
size: Optional[int] = None,
order: Literal["desc", "asc"] = "desc"
) -> FieldArray:
if not isinstance(size, (type(None), int, np.integer)):
raise TypeError(f"Argument `size` must be an integer, not {type(size)}.")
if not isinstance(order, str):
raise TypeError(f"Argument `order` must be a str, not {type(order)}.")
size = len(self) if size is None else size
if not size >= len(self):
raise ValueError(f"Argument `size` must be at least `degree + 1` which is {len(self)}, not {size}.")
if not order in ["desc", "asc"]:
raise ValueError(f"Argument `order` must be either 'desc' or 'asc', not {order!r}.")
coeffs = self.field.Zeros(size)
coeffs[-len(self):] = self.coeffs
if order == "asc":
coeffs = np.flip(coeffs)
return coeffs
def copy(self) -> "Poly":
raise NotImplementedError
def reverse(self) -> "Poly":
return Poly(self.coeffs[::-1])
def roots(self, multiplicity: bool = False) -> FieldArray:
if not isinstance(multiplicity, bool):
raise TypeError(f"Argument `multiplicity` must be a bool, not {type(multiplicity)}.")
roots = self.field._poly_roots(self.nonzero_degrees, self.nonzero_coeffs)
if not multiplicity:
return roots
else:
multiplicities = np.array([self._root_multiplicity(root) for root in roots])
return roots, multiplicities
def _root_multiplicity(self, root):
poly = self.copy()
multiplicity = 1
while True:
# If the root is also a root of the derivative, then its a multiple root.
poly = poly.derivative()
if poly == 0:
# Cannot test whether p'(root) = 0 because p'(x) = 0. We've exhausted the non-zero derivatives. For
# greater than the field's characteristic, we need factor to the polynomial. Here we factor out (x - root)^m,
poly = self.copy() // (Poly([1, -root], field=self.field)**multiplicity)
if poly(root) == 0:
multiplicity += 1
else:
break
return multiplicity
def derivative(self, k: int = 1) -> "Poly":
if not isinstance(k, (int, np.integer)):
raise TypeError(f"Argument `k` must be an integer, not {type(k)}.")
if not k > 0:
raise ValueError(f"Argument `k` must be a positive integer, not {k}.")
if 0 in self.nonzero_degrees:
degrees = self.nonzero_degrees[:-1] - 1
coeffs = self.nonzero_coeffs[:-1] * self.nonzero_degrees[:-1] else:
degrees = self.nonzero_degrees - 1
coeffs = self.nonzero_coeffs * self.nonzero_degrees
p_prime = Poly.Degrees(degrees, coeffs, field=self.field)
k -= 1
if k > 0:
return p_prime.derivative(k)
else:
return p_prime
def __str__(self):
return f"Poly({self.string}, {self.field.name})"
def __repr__(self):
return str(self)
def __hash__(self):
t = tuple([self.field.order,] + self.nonzero_degrees.tolist() + self.nonzero_coeffs.tolist())
return hash(t)
def __call__(self, x: FieldArray, field: Optional[FieldClass] = None, elementwise: bool = True) -> FieldArray:
if not isinstance(field, (type(None), FieldClass)):
raise TypeError(f"Argument `field` must be a Galois field array class, not {type(field)}.")
field = self.field if field is None else field
coeffs = field(self.coeffs)
x = field(x)
if elementwise:
return field._poly_evaluate(coeffs, x)
else:
if not (x.ndim == 2 and x.shape[0] == x.shape[1]):
raise ValueError(f"Argument `x` must be a square matrix when evaluating the polynomial not elementwise, not have shape {x.shape}.")
return field._poly_evaluate_matrix(coeffs, x)
def __len__(self) -> int:
return self.degree + 1
def _check_inputs_are_polys(self, a, b):
if not isinstance(a, (Poly, self.field)):
raise TypeError(f"Both operands must be a galois.Poly or a single element of its field {self.field.name}, not {type(a)}.")
if not isinstance(b, (Poly, self.field)):
raise TypeError(f"Both operands must be a galois.Poly or a single element of its field {self.field.name}, not {type(b)}.")
if (isinstance(a, Poly) and isinstance(b, Poly)) and not a.field is b.field:
raise TypeError(f"Both polynomial operands must be over the same field, not {a.field.name} and {b.field.name}.")
def _check_inputs_are_polys_or_ints(self, a, b):
if not isinstance(a, (Poly, self.field, int, np.integer)):
raise TypeError(f"Both operands must be a galois.Poly, a single element of its field {self.field.name}, or an integer, not {type(a)}.")
if not isinstance(b, (Poly, self.field, int, np.integer)):
raise TypeError(f"Both operands must be a galois.Poly, a single element of its field {self.field.name}, or an integer, not {type(b)}.")
if (isinstance(a, Poly) and isinstance(b, Poly)) and not a.field is b.field:
raise TypeError(f"Both polynomial operands must be over the same field, not {a.field.name} and {b.field.name}.")
def _convert_field_scalars_to_polys(self, a, b):
if isinstance(a, self.field):
if not a.size == 1:
raise ValueError(f"Arguments that are Galois field elements must have size 1 (equivalently a 0-degree polynomial), not size {a.size}.")
a = Poly(np.atleast_1d(a))
if isinstance(b, self.field):
if not b.size == 1:
raise ValueError(f"Arguments that are Galois field elements must have size 1 (equivalently a 0-degree polynomial), not size {b.size}.")
b = Poly(np.atleast_1d(b))
return a, b
@staticmethod
def _determine_poly_class(a, b):
if isinstance(a, SparsePoly) or isinstance(b, SparsePoly):
return SparsePoly
elif isinstance(a, BinaryPoly) or isinstance(b, BinaryPoly):
return BinaryPoly
else:
return DensePoly
def __add__(self, other):
self._check_inputs_are_polys(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
cls = self._determine_poly_class(a, b)
return cls._add(a, b)
def __radd__(self, other):
self._check_inputs_are_polys(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
cls = self._determine_poly_class(a, b)
return cls._add(b, a)
def __sub__(self, other):
self._check_inputs_are_polys(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
cls = self._determine_poly_class(a, b)
return cls._sub(a, b)
def __rsub__(self, other):
self._check_inputs_are_polys(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
cls = self._determine_poly_class(a, b)
return cls._sub(b, a)
def __mul__(self, other):
self._check_inputs_are_polys_or_ints(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
if isinstance(a, (int, np.integer)):
a, b = b, a
cls = self._determine_poly_class(a, b)
return cls._mul(a, b)
def __rmul__(self, other):
self._check_inputs_are_polys_or_ints(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
if isinstance(b, (int, np.integer)):
b, a = a, b
cls = self._determine_poly_class(a, b)
return cls._mul(b, a)
def __divmod__(self, other):
self._check_inputs_are_polys(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
cls = self._determine_poly_class(a, b)
return cls._divmod(a, b)
def __rdivmod__(self, other):
self._check_inputs_are_polys(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
cls = self._determine_poly_class(a, b)
return cls._divmod(b, a)
def __truediv__(self, other):
self._check_inputs_are_polys(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
cls = self._determine_poly_class(a, b)
return cls._divmod(a, b)[0]
def __rtruediv__(self, other):
self._check_inputs_are_polys(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
cls = self._determine_poly_class(a, b)
return cls._divmod(b, a)[0]
def __floordiv__(self, other):
self._check_inputs_are_polys(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
cls = self._determine_poly_class(a, b)
return cls._divmod(a, b)[0]
def __rfloordiv__(self, other):
self._check_inputs_are_polys(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
cls = self._determine_poly_class(a, b)
return cls._divmod(b, a)[0]
def __mod__(self, other):
self._check_inputs_are_polys(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
cls = self._determine_poly_class(a, b)
return cls._mod(a, b)
def __rmod__(self, other):
self._check_inputs_are_polys(self, other)
a, b = self._convert_field_scalars_to_polys(self, other)
cls = self._determine_poly_class(a, b)
return cls._mod(b, a)
def __pow__(self, other):
if not isinstance(other, (int, np.integer)):
raise TypeError(f"For polynomial exponentiation, the second argument must be an int, not {other}.")
if not other >= 0:
raise ValueError(f"Can only exponentiate polynomials to non-negative integers, not {other}.")
a, power = self, other
field = self.field
if power == 0:
return Poly.One(field)
c_square = a c_mult = Poly.One(field)
while power > 1:
if power % 2 == 0:
c_square *= c_square
power //= 2
else:
c_mult *= c_square
power -= 1
c = c_mult * c_square
return c
def __neg__(self):
raise NotImplementedError
def __eq__(self, other):
if isinstance(other, (int, np.integer)):
return self.degree == 0 and np.array_equal(self.coeffs, [other])
elif isinstance(other, FieldArray):
if not other.ndim == 0:
raise ValueError(f"Can only compare galois.Poly to a 0-D galois.FieldArray scalar, not shape {other.shape}.")
return self.field is type(other) and self.degree == 0 and np.array_equal(self.coeffs, np.atleast_1d(other))
elif not isinstance(other, Poly):
raise TypeError(f"Can only compare galois.Poly and galois.Poly / int / galois.FieldArray scalar objects, not {type(other)}.")
else:
return self.field is other.field and np.array_equal(self.nonzero_degrees, other.nonzero_degrees) and np.array_equal(self.nonzero_coeffs, other.nonzero_coeffs)
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def _add(cls, a, b):
raise NotImplementedError
@classmethod
def _sub(cls, a, b):
raise NotImplementedError
@classmethod
def _mul(cls, a, b):
raise NotImplementedError
@classmethod
def _divmod(cls, a, b):
raise NotImplementedError
@classmethod
def _mod(cls, a, b):
raise NotImplementedError
@property
def field(self) -> FieldClass:
raise NotImplementedError
@property
def degree(self) -> int:
raise NotImplementedError
@property
def nonzero_degrees(self) -> np.ndarray:
raise NotImplementedError
@property
def nonzero_coeffs(self) -> FieldArray:
raise NotImplementedError
@property
def degrees(self) -> np.ndarray:
raise NotImplementedError
@property
def coeffs(self) -> FieldArray:
raise NotImplementedError
@property
def integer(self) -> int:
return sparse_poly_to_integer(self.nonzero_degrees, self.nonzero_coeffs, self.field.order)
@property
def string(self) -> str:
return sparse_poly_to_str(self.nonzero_degrees, self.nonzero_coeffs)
class DensePoly(Poly):
__slots__ = ["_coeffs"]
def __new__(cls, coeffs, field=None): obj = object.__new__(cls)
obj._coeffs = coeffs
if obj._coeffs.size > 1:
# Remove leading zero coefficients
idxs = np.nonzero(obj._coeffs)[0]
if idxs.size > 0:
obj._coeffs = obj._coeffs[idxs[0]:]
else:
obj._coeffs = obj._coeffs[-1]
# Ensure the coefficient array isn't 0-dimensional
obj._coeffs = np.atleast_1d(obj._coeffs)
return obj
def copy(self):
return DensePoly(self._coeffs.copy())
def __neg__(self):
return DensePoly(-self._coeffs)
@classmethod
def _add(cls, a, b):
field = a.field
c_coeffs = field.Zeros(max(a.coeffs.size, b.coeffs.size))
c_coeffs[-a.coeffs.size:] = a.coeffs
c_coeffs[-b.coeffs.size:] += b.coeffs
return Poly(c_coeffs)
@classmethod
def _sub(cls, a, b):
field = a.field
c_coeffs = field.Zeros(max(a.coeffs.size, b.coeffs.size))
c_coeffs[-a.coeffs.size:] = a.coeffs
c_coeffs[-b.coeffs.size:] -= b.coeffs
return Poly(c_coeffs)
@classmethod
def _mul(cls, a, b):
if isinstance(b, (int, np.integer)):
c_coeffs = a.coeffs * b
else:
c_coeffs = np.convolve(a.coeffs, b.coeffs)
return Poly(c_coeffs)
@classmethod
def _divmod(cls, a, b):
field = a.field
zero = Poly.Zero(field)
if b.degree == 0:
return Poly(a.coeffs // b.coeffs), zero
elif a == 0:
return zero, zero
elif a.degree < b.degree:
return zero, a.copy()
else:
q_coeffs, r_coeffs = field._poly_divmod(a.coeffs, b.coeffs)
return Poly(q_coeffs), Poly(r_coeffs)
@classmethod
def _mod(cls, a, b):
return cls._divmod(a, b)[1]
@property
def field(self):
return type(self._coeffs)
@property
def degree(self):
return self._coeffs.size - 1
@property
def nonzero_degrees(self):
return self.degree - np.nonzero(self._coeffs)[0]
@property
def nonzero_coeffs(self):
return self._coeffs[np.nonzero(self._coeffs)[0]]
@property
def degrees(self):
return np.arange(self.degree, -1, -1)
@property
def coeffs(self):
return self._coeffs.copy()
class BinaryPoly(Poly):
__slots__ = ["_integer", "_coeffs"]
def __new__(cls, integer): if not isinstance(integer, (int, np.integer)):
raise TypeError(f"Argument `integer` must be an integer, not {type(integer)}.")
if not integer >= 0:
raise ValueError(f"Argument `integer` must be non-negative, not {integer}.")
obj = object.__new__(cls)
obj._integer = integer
obj._coeffs = None
return obj
def copy(self):
return BinaryPoly(self._integer)
def __neg__(self):
return self.copy()
@classmethod
def _add(cls, a, b):
return BinaryPoly(a.integer ^ b.integer)
@classmethod
def _sub(cls, a, b):
return BinaryPoly(a.integer ^ b.integer)
@classmethod
def _mul(cls, a, b):
if isinstance(b, (int, np.integer)):
return BinaryPoly(a.integer) if b % 2 == 1 else BinaryPoly(0)
else:
a = a.integer
b = b.integer
if b > a:
a, b = b, a
c = 0
while b > 0:
if b & 0b1:
c ^= a b >>= 1 a <<= 1
return BinaryPoly(c)
@classmethod
def _divmod(cls, a, b):
deg_a = a.degree
deg_q = a.degree - b.degree
deg_r = b.degree - 1
a = a.integer
b = b.integer
q = 0
mask = 1 << deg_a
for i in range(deg_q, -1, -1):
q <<= 1
if a & mask:
a ^= b << i
q ^= 1 assert a & mask == 0
mask >>= 1
mask = (1 << (deg_r + 1)) - 1 r = a & mask
return BinaryPoly(q), BinaryPoly(r)
@classmethod
def _mod(cls, a, b):
return cls._divmod(a, b)[1]
@property
def field(self):
return GF2
@property
def degree(self):
if self._integer == 0:
return 0
else:
return len(bin(self._integer)[2:]) - 1
@property
def nonzero_degrees(self):
return self.degree - np.nonzero(self.coeffs)[0]
@property
def nonzero_coeffs(self):
return self.coeffs[np.nonzero(self.coeffs)[0]]
@property
def degrees(self):
return np.arange(self.degree, -1, -1)
@property
def coeffs(self):
if self._coeffs is None:
binstr = bin(self._integer)[2:]
self._coeffs = GF2([int(b) for b in binstr])
return self._coeffs.copy()
@property
def integer(self):
return self._integer
class SparsePoly(Poly):
__slots__ = ["_degrees", "_coeffs"]
def __new__(cls, degrees, coeffs=None, field=None): coeffs = [1,]*len(degrees) if coeffs is None else coeffs
if not isinstance(degrees, (list, tuple, np.ndarray)):
raise TypeError(f"Argument `degrees` must be array-like, not {type(degrees)}.")
if not isinstance(coeffs, (list, tuple, np.ndarray)):
raise TypeError(f"Argument `coeffs` must be array-like, not {type(coeffs)}.")
if not len(degrees) == len(coeffs):
raise ValueError(f"Arguments `degrees` and `coeffs` must have the same length, not {len(degrees)} and {len(coeffs)}.")
if not all(degree >= 0 for degree in degrees):
raise ValueError(f"Argument `degrees` must have non-negative values, not {degrees}.")
obj = object.__new__(cls)
if isinstance(coeffs, FieldArray) and field is None:
obj._degrees = np.array(degrees)
obj._coeffs = coeffs
else:
field = GF2 if field is None else field
if isinstance(coeffs, np.ndarray):
coeffs = coeffs.tolist()
obj._degrees = np.array(degrees)
obj._coeffs = field([-field(abs(c)) if c < 0 else field(c) for c in coeffs])
idxs = np.argsort(degrees)[::-1]
obj._degrees = obj._degrees[idxs]
obj._coeffs = obj._coeffs[idxs]
idxs = np.nonzero(obj._coeffs)[0]
obj._degrees = obj._degrees[idxs]
obj._coeffs = obj._coeffs[idxs]
return obj
def copy(self):
return SparsePoly(self.degrees, self.coeffs)
def reverse(self):
return SparsePoly(self.degree - self.degrees, self.coeffs)
def __neg__(self):
return SparsePoly(self._degrees, -self._coeffs)
@classmethod
def _add(cls, a, b):
field = a.field
cc = dict(zip(a.nonzero_degrees, a.nonzero_coeffs))
for b_degree, b_coeff in zip(b.nonzero_degrees, b.nonzero_coeffs):
cc[b_degree] = cc.get(b_degree, field(0)) + b_coeff
return Poly.Degrees(list(cc.keys()), list(cc.values()), field=field)
@classmethod
def _sub(cls, a, b):
field = a.field
cc = dict(zip(a.nonzero_degrees, a.nonzero_coeffs))
for b_degree, b_coeff in zip(b.nonzero_degrees, b.nonzero_coeffs):
cc[b_degree] = cc.get(b_degree, field(0)) - b_coeff
return Poly.Degrees(list(cc.keys()), list(cc.values()), field=field)
@classmethod
def _mul(cls, a, b):
field = a.field
if isinstance(b, (int, np.integer)):
return Poly.Degrees(a.nonzero_degrees, a.nonzero_coeffs * b)
else:
cc = {}
for a_degree, a_coeff in zip(a.nonzero_degrees, a.nonzero_coeffs):
for b_degree, b_coeff in zip(b.nonzero_degrees, b.nonzero_coeffs):
cc[a_degree + b_degree] = cc.get(a_degree + b_degree, field(0)) + a_coeff*b_coeff
return Poly.Degrees(list(cc.keys()), list(cc.values()), field=field)
@classmethod
def _divmod(cls, a, b):
field = a.field
zero = Poly.Zero(field)
if b.degree == 0:
q_degrees = a.nonzero_degrees
q_coeffs = [a_coeff // b.coeffs[0] for a_coeff in a.nonzero_coeffs]
return Poly.Degrees(q_degrees, q_coeffs, field=field), zero
elif a == 0:
return zero, zero
elif a.degree < b.degree:
return zero, a.copy()
else:
aa = dict(zip(a.nonzero_degrees, a.nonzero_coeffs))
b_coeffs = b.coeffs
q_degree = a.degree - b.degree
r_degree = b.degree qq = {}
r_coeffs = field.Zeros(r_degree + 1)
for i in range(0, b.degree):
r_coeffs[1 + i] = aa.get(a.degree - i, 0)
for i in range(0, q_degree + 1):
r_coeffs = np.roll(r_coeffs, -1)
r_coeffs[-1] = aa.get(a.degree - (i + b.degree), 0)
if r_coeffs[0] > 0:
q = r_coeffs[0] // b_coeffs[0]
r_coeffs -= q*b_coeffs
qq[q_degree - i] = q
return Poly.Degrees(list(qq.keys()), list(qq.values()), field=field), Poly(r_coeffs[1:])
@classmethod
def _mod(cls, a, b):
field = a.field
zero = Poly.Zero(field)
if b.degree == 0:
return zero
elif a == 0:
return zero
elif a.degree < b.degree:
return a.copy()
else:
aa = dict(zip(a.nonzero_degrees, a.nonzero_coeffs))
b_coeffs = b.coeffs
q_degree = a.degree - b.degree
r_degree = b.degree r_coeffs = field.Zeros(r_degree + 1)
for i in range(0, b.degree):
r_coeffs[1 + i] = aa.get(a.degree - i, 0)
for i in range(0, q_degree + 1):
r_coeffs = np.roll(r_coeffs, -1)
r_coeffs[-1] = aa.get(a.degree - (i + b.degree), 0)
if r_coeffs[0] > 0:
q = r_coeffs[0] // b_coeffs[0]
r_coeffs -= q*b_coeffs
return Poly(r_coeffs[1:])
@property
def field(self):
return type(self._coeffs)
@property
def degree(self):
return 0 if self._degrees.size == 0 else int(np.max(self._degrees))
@property
def nonzero_degrees(self):
return self._degrees.copy()
@property
def nonzero_coeffs(self):
return self._coeffs.copy()
@property
def degrees(self):
return np.arange(self.degree, -1, -1)
@property
def coeffs(self):
coeffs = self.field.Zeros(self.degree + 1)
if self.nonzero_degrees.size > 0:
coeffs[self.degree - self.nonzero_degrees] = self.nonzero_coeffs
return coeffs
GF2._irreducible_poly = Poly([1, 1]) | true | true |
1c4aea5dca4be7bf3e49a80f9d93695e5aff40c9 | 8,347 | py | Python | cwmud/contrib/worldgen/terrain.py | whutch/cwmud | bee8b126a5e70edd0593dae9753a6be8d52357cf | [
"MIT"
] | 11 | 2016-03-03T03:56:59.000Z | 2021-11-19T15:38:51.000Z | cwmud/contrib/worldgen/terrain.py | whutch/atria | bee8b126a5e70edd0593dae9753a6be8d52357cf | [
"MIT"
] | 26 | 2016-08-31T23:19:45.000Z | 2019-10-19T21:50:33.000Z | cwmud/contrib/worldgen/terrain.py | whutch/atria | bee8b126a5e70edd0593dae9753a6be8d52357cf | [
"MIT"
] | 2 | 2016-01-22T21:22:34.000Z | 2016-02-09T06:03:57.000Z | # -*- coding: utf-8 -*-
"""Terrain types and management."""
# Part of Clockwork MUD Server (https://github.com/whutch/cwmud)
# :copyright: (c) 2008 - 2017 Will Hutcheson
# :license: MIT (https://github.com/whutch/cwmud/blob/master/LICENSE.txt)
from os.path import dirname, exists, join
from ...core.attributes import Unset
from ...core.entities import Attribute
from ...core.logs import get_logger
from ...core.utils.exceptions import AlreadyExists
from ...core.world import Room
log = get_logger("worldgen")
class TerrainManager:
"""A manager for terrain types."""
def __init__(self):
"""Create a new terrain manager."""
self._terrains = {}
self._point_table = {}
def __contains__(self, code):
return code in self._terrains
def __getitem__(self, code):
return self._terrains[code]
def register(self, terrain):
"""Register a terrain type by it's three letter code.
:param Terrain terrain: The terrain type to register
:returns None:
:raises AlreadyExists: If a terrain with `code` is already registered
:raises TypeError: If `terrain` is not an instance of Terrain
:raises ValueError: If `code` is not a three letter string
"""
code = terrain.code
if not isinstance(code, str) or len(code) != 3:
raise ValueError("terrain code must be 3 letter string")
if code in self._terrains:
raise AlreadyExists(code, self._terrains[code], terrain)
if not isinstance(terrain, Terrain):
raise TypeError("must be an instance Terrain to register")
self._terrains[code] = terrain
def set_terrain_for_point(self, point_data, terrain):
"""Link point data to a specific terrain.
Each value in the point data tuple should already be rounded
to their specific ranges.
:param point_data: A tuple in the form (elevation, moisture)
:param terrain: The terrain to link this point data to
:returns None:
:raises AlreadyExists: If terrain is already linked to `point_data`
"""
if point_data in self._point_table:
raise AlreadyExists(point_data, self._point_table[point_data],
terrain)
self._point_table[point_data] = terrain
def get_terrain_for_point(self, elevation, moisture, temperature):
"""Get the terrain type for the given point data.
:param float elevation: The elevation value, from -1 to 1
:param float moisture: The moisture value, from -1 to 1
:param float temperature: The temperature value, from -1 to 1
:returns Terrain: The terrain type or None if not found
"""
elevation = round(elevation, 1)
moisture = round(moisture, 1)
temperature = round(temperature, 1)
return self._point_table.get((elevation, moisture, temperature))
TERRAIN = TerrainManager()
class Terrain:
"""A terrain type."""
def __init__(self, code, room_name, symbol, room_description=Unset,
diversity_name=None, diversity_symbol=None,
diversity_minimum=None):
self.code = code
self.room_name = room_name
self.symbol = symbol
self.room_description = room_description
self.diversity_name = diversity_name
self.diversity_symbol = diversity_symbol
self.diversity_minimum = diversity_minimum
def is_diverse(self, diversity_value):
"""Return whether this terrain is diverse at a particular value.
:param float diversity_value: The diversity value to check against
:return bool: Whether the terrain is diverse or not
"""
if self.diversity_minimum is None:
return False
return diversity_value >= self.diversity_minimum
@Room.register_attr("terrain")
class RoomTerrain(Attribute):
"""A room's terrain type."""
@classmethod
def validate(cls, entity, new_value):
if not isinstance(new_value, Terrain):
raise ValueError("Room terrain must be a Terrain instance.")
return new_value
@classmethod
def serialize(cls, entity, value):
return value.code
@classmethod
def deserialize(cls, entity, value):
return TERRAIN[value]
def _parse_terrain_grid():
log.info("Loading terrain point values.")
path = join(dirname(__file__), "terrain_grid.txt")
if not exists(path):
raise IOError("cannot find terrain grid file!")
with open(path) as terrain_grid:
temperature = -1.0
for line in terrain_grid.readlines():
line = line.strip()
if not line:
continue
parts = line.split()
if len(parts) == 1:
temperature = float(parts[0])
elif len(parts) == 22:
elevation = float(parts[0])
moisture = -1.0
for code in parts[1:]:
terrain = TERRAIN[code]
point = (round(elevation, 1),
round(moisture, 1),
round(temperature, 1))
TERRAIN.set_terrain_for_point(point, terrain)
moisture += 0.1
else:
if parts[0] == "xxx":
continue
raise ValueError("malformed terrain grid! {}".format(parts))
TERRAIN.register(Terrain("bea", "Sandy Beach", "^Y."))
TERRAIN.register(Terrain("shw", "Shallow Water", "^C,"))
TERRAIN.register(Terrain("dpw", "Deep Water", "^c,"))
TERRAIN.register(Terrain("sea", "Open Sea", "^B~"))
TERRAIN.register(Terrain("oce", "Open Ocean", "^b~"))
TERRAIN.register(Terrain("mud", "Muddy Banks", "^y."))
TERRAIN.register(Terrain("frs", "Frozen Shore", "^c."))
TERRAIN.register(Terrain("mar", "Marshland", "^c&"))
TERRAIN.register(Terrain("swa", "Swamp", "^G."))
TERRAIN.register(Terrain("aup", "Austere Point", "^KA"))
TERRAIN.register(Terrain("wic", "Windswept Crags", "^w^^"))
TERRAIN.register(Terrain("deh", "Desolate Headlands", "^Kn"))
TERRAIN.register(Terrain("tun", "Bleak Tundra", "^c\""))
TERRAIN.register(Terrain("fri", "Frigid Summit", "^cA"))
TERRAIN.register(Terrain("chc", "Chilled Cliffs", "^c^^"))
TERRAIN.register(Terrain("icd", "Icy Drift", "^c~"))
TERRAIN.register(Terrain("scf", "Snow-covered Fields", "^W\""))
TERRAIN.register(Terrain("glp", "Glacial Peaks", "^CA"))
TERRAIN.register(Terrain("fra", "Frosted Alps", "^C^^"))
TERRAIN.register(Terrain("shi", "Snowy Hillside", "^Wn"))
TERRAIN.register(Terrain("bwo", "Boreal Woods", "^Wt"))
TERRAIN.register(Terrain("arr", "Arid Ridges", "^yA"))
TERRAIN.register(Terrain("dus", "Dusty Mesa", "^y^^"))
TERRAIN.register(Terrain("bsl", "Barren Slopes", "^wn"))
TERRAIN.register(Terrain("dry", "Dry Brush", "^y\""))
TERRAIN.register(Terrain("mop", "Mountain Peak", "^wA"))
TERRAIN.register(Terrain("mou", "Mountain Range", "^K^^"))
TERRAIN.register(Terrain("hil", "Rolling Hills", "^yn"))
TERRAIN.register(Terrain("gra", "Grasslands", "^G\"",
diversity_name="Tall Grass",
diversity_symbol="^g\"",
diversity_minimum=0.3))
TERRAIN.register(Terrain("snm", "Snow-capped Mountains", "^WA"))
TERRAIN.register(Terrain("whi", "Wooded Hills", "^gn"))
TERRAIN.register(Terrain("for", "Sparse Forest", "^Gt",
diversity_name="Dense Forest",
diversity_symbol="^gt",
diversity_minimum=0.3))
TERRAIN.register(Terrain("sun", "Sun-bleached Pinnacle", "^W^^"))
TERRAIN.register(Terrain("par", "Parched Rocks", "^Y%"))
TERRAIN.register(Terrain("dun", "Sand Dunes", "^Ym"))
TERRAIN.register(Terrain("des", "Desert Sands", "^Y~"))
TERRAIN.register(Terrain("sco", "Scorched Rise", "^YA"))
TERRAIN.register(Terrain("tor", "Torrid Bluffs", "^Y^^"))
TERRAIN.register(Terrain("bal", "Balmy Highlands", "^ym"))
TERRAIN.register(Terrain("gla", "Mossy Glade", "^g&"))
TERRAIN.register(Terrain("vol", "Volcanic Crown", "^rA"))
TERRAIN.register(Terrain("jun", "Jungle Mountains", "^g^^"))
TERRAIN.register(Terrain("can", "Canopied Hills", "^Gm"))
TERRAIN.register(Terrain("rai", "Dense Rainforest", "^G%"))
_parse_terrain_grid()
| 36.933628 | 77 | 0.623697 |
from os.path import dirname, exists, join
from ...core.attributes import Unset
from ...core.entities import Attribute
from ...core.logs import get_logger
from ...core.utils.exceptions import AlreadyExists
from ...core.world import Room
log = get_logger("worldgen")
class TerrainManager:
def __init__(self):
self._terrains = {}
self._point_table = {}
def __contains__(self, code):
return code in self._terrains
def __getitem__(self, code):
return self._terrains[code]
def register(self, terrain):
code = terrain.code
if not isinstance(code, str) or len(code) != 3:
raise ValueError("terrain code must be 3 letter string")
if code in self._terrains:
raise AlreadyExists(code, self._terrains[code], terrain)
if not isinstance(terrain, Terrain):
raise TypeError("must be an instance Terrain to register")
self._terrains[code] = terrain
def set_terrain_for_point(self, point_data, terrain):
if point_data in self._point_table:
raise AlreadyExists(point_data, self._point_table[point_data],
terrain)
self._point_table[point_data] = terrain
def get_terrain_for_point(self, elevation, moisture, temperature):
elevation = round(elevation, 1)
moisture = round(moisture, 1)
temperature = round(temperature, 1)
return self._point_table.get((elevation, moisture, temperature))
TERRAIN = TerrainManager()
class Terrain:
def __init__(self, code, room_name, symbol, room_description=Unset,
diversity_name=None, diversity_symbol=None,
diversity_minimum=None):
self.code = code
self.room_name = room_name
self.symbol = symbol
self.room_description = room_description
self.diversity_name = diversity_name
self.diversity_symbol = diversity_symbol
self.diversity_minimum = diversity_minimum
def is_diverse(self, diversity_value):
if self.diversity_minimum is None:
return False
return diversity_value >= self.diversity_minimum
@Room.register_attr("terrain")
class RoomTerrain(Attribute):
@classmethod
def validate(cls, entity, new_value):
if not isinstance(new_value, Terrain):
raise ValueError("Room terrain must be a Terrain instance.")
return new_value
@classmethod
def serialize(cls, entity, value):
return value.code
@classmethod
def deserialize(cls, entity, value):
return TERRAIN[value]
def _parse_terrain_grid():
log.info("Loading terrain point values.")
path = join(dirname(__file__), "terrain_grid.txt")
if not exists(path):
raise IOError("cannot find terrain grid file!")
with open(path) as terrain_grid:
temperature = -1.0
for line in terrain_grid.readlines():
line = line.strip()
if not line:
continue
parts = line.split()
if len(parts) == 1:
temperature = float(parts[0])
elif len(parts) == 22:
elevation = float(parts[0])
moisture = -1.0
for code in parts[1:]:
terrain = TERRAIN[code]
point = (round(elevation, 1),
round(moisture, 1),
round(temperature, 1))
TERRAIN.set_terrain_for_point(point, terrain)
moisture += 0.1
else:
if parts[0] == "xxx":
continue
raise ValueError("malformed terrain grid! {}".format(parts))
TERRAIN.register(Terrain("bea", "Sandy Beach", "^Y."))
TERRAIN.register(Terrain("shw", "Shallow Water", "^C,"))
TERRAIN.register(Terrain("dpw", "Deep Water", "^c,"))
TERRAIN.register(Terrain("sea", "Open Sea", "^B~"))
TERRAIN.register(Terrain("oce", "Open Ocean", "^b~"))
TERRAIN.register(Terrain("mud", "Muddy Banks", "^y."))
TERRAIN.register(Terrain("frs", "Frozen Shore", "^c."))
TERRAIN.register(Terrain("mar", "Marshland", "^c&"))
TERRAIN.register(Terrain("swa", "Swamp", "^G."))
TERRAIN.register(Terrain("aup", "Austere Point", "^KA"))
TERRAIN.register(Terrain("wic", "Windswept Crags", "^w^^"))
TERRAIN.register(Terrain("deh", "Desolate Headlands", "^Kn"))
TERRAIN.register(Terrain("tun", "Bleak Tundra", "^c\""))
TERRAIN.register(Terrain("fri", "Frigid Summit", "^cA"))
TERRAIN.register(Terrain("chc", "Chilled Cliffs", "^c^^"))
TERRAIN.register(Terrain("icd", "Icy Drift", "^c~"))
TERRAIN.register(Terrain("scf", "Snow-covered Fields", "^W\""))
TERRAIN.register(Terrain("glp", "Glacial Peaks", "^CA"))
TERRAIN.register(Terrain("fra", "Frosted Alps", "^C^^"))
TERRAIN.register(Terrain("shi", "Snowy Hillside", "^Wn"))
TERRAIN.register(Terrain("bwo", "Boreal Woods", "^Wt"))
TERRAIN.register(Terrain("arr", "Arid Ridges", "^yA"))
TERRAIN.register(Terrain("dus", "Dusty Mesa", "^y^^"))
TERRAIN.register(Terrain("bsl", "Barren Slopes", "^wn"))
TERRAIN.register(Terrain("dry", "Dry Brush", "^y\""))
TERRAIN.register(Terrain("mop", "Mountain Peak", "^wA"))
TERRAIN.register(Terrain("mou", "Mountain Range", "^K^^"))
TERRAIN.register(Terrain("hil", "Rolling Hills", "^yn"))
TERRAIN.register(Terrain("gra", "Grasslands", "^G\"",
diversity_name="Tall Grass",
diversity_symbol="^g\"",
diversity_minimum=0.3))
TERRAIN.register(Terrain("snm", "Snow-capped Mountains", "^WA"))
TERRAIN.register(Terrain("whi", "Wooded Hills", "^gn"))
TERRAIN.register(Terrain("for", "Sparse Forest", "^Gt",
diversity_name="Dense Forest",
diversity_symbol="^gt",
diversity_minimum=0.3))
TERRAIN.register(Terrain("sun", "Sun-bleached Pinnacle", "^W^^"))
TERRAIN.register(Terrain("par", "Parched Rocks", "^Y%"))
TERRAIN.register(Terrain("dun", "Sand Dunes", "^Ym"))
TERRAIN.register(Terrain("des", "Desert Sands", "^Y~"))
TERRAIN.register(Terrain("sco", "Scorched Rise", "^YA"))
TERRAIN.register(Terrain("tor", "Torrid Bluffs", "^Y^^"))
TERRAIN.register(Terrain("bal", "Balmy Highlands", "^ym"))
TERRAIN.register(Terrain("gla", "Mossy Glade", "^g&"))
TERRAIN.register(Terrain("vol", "Volcanic Crown", "^rA"))
TERRAIN.register(Terrain("jun", "Jungle Mountains", "^g^^"))
TERRAIN.register(Terrain("can", "Canopied Hills", "^Gm"))
TERRAIN.register(Terrain("rai", "Dense Rainforest", "^G%"))
_parse_terrain_grid()
| true | true |
1c4aeb13d345d58019753e98b9a30dd9ea37b2fd | 3,440 | py | Python | src/wallabag/wallabag_show.py | davidhelbig/wallabag-cli | 0324138593c836e7371450262c14f207445bb921 | [
"MIT"
] | 3 | 2020-04-22T05:08:49.000Z | 2020-07-30T18:43:10.000Z | src/wallabag/wallabag_show.py | davidhelbig/wallabag-cli | 0324138593c836e7371450262c14f207445bb921 | [
"MIT"
] | null | null | null | src/wallabag/wallabag_show.py | davidhelbig/wallabag-cli | 0324138593c836e7371450262c14f207445bb921 | [
"MIT"
] | 3 | 2020-04-23T18:30:04.000Z | 2020-10-14T14:57:59.000Z | """
Show a wallabag entry
"""
import io
import formatter
import json
import os
from sys import exit
import sys
from bs4 import BeautifulSoup
from . import api
from . import conf
from . import entry
def show(entry_id, colors=True, raw=False, html=False):
"""
Main function for showing an entry.
"""
conf.load()
try:
request = api.api_get_entry(entry_id)
__handle_request_error(request)
entr = entry.Entry(json.loads(request.response))
except api.OAuthException as ex:
print("Error: {0}".format(ex.text))
print()
exit(-1)
title = entr.title
try:
delimiter = "".ljust(os.get_terminal_size().columns, '=')
# piped output to file or other process
except OSError:
delimiter = "\n"
article = entr.content
if not html:
article = html2text(article, colors)
output = "{0}\n{1}\n{2}".format(title, delimiter, article)
if not raw:
output = __format_text(output)
print(output)
def html2text(html, colors=True):
soup = BeautifulSoup(html, "html.parser")
# Color h1-h3
if colors:
h1colors = '\033[93m'
h1colore = '\033[0m'
else:
h1colors = h1colore = ""
for h1 in soup.findAll('h1'):
h1.string = "\n{0}{1}{2}".format(h1colors, h1.string, h1colore)
for h2 in soup.findAll('h2'):
h2.string = "\n{0}{1}{2}".format(h1colors, h2.string, h1colore)
for h3 in soup.findAll('h3'):
h3.string = "\n{0}{1}{2}".format(h1colors, h3.string, h1colore)
if colors:
# Color bold texts
bcolors = '\033[92m'
bcolore = '\033[0m'
for bold in soup.findAll('b'):
bold.string = "{0}{1}{2}".format(bcolors, bold.string, bcolore)
for bold in soup.findAll('strong'):
bold.string = "{0}{1}{2}".format(bcolors, bold.string, bcolore)
# Replace hr with visual lines
try:
hrstring = "".ljust(os.get_terminal_size().columns, '-')
# piped output to file or other process
except OSError:
hrstring = "-----"
for hr in soup.findAll('hr'):
replace = soup.new_tag('p')
replace.string = hrstring
hr.insert_after(replace)
hr.unwrap()
# Replace images by information-texts
for img in soup.findAll('img'):
replace = soup.new_tag('p')
try:
alt = " \"{0}\"".format(img['alt'])
except KeyError:
alt = ""
replace.string = "[IMAGE{0}]\n".format(alt)
img.insert_after(replace)
img.unwrap()
return soup.text
def __format_text(text):
try:
maxcol = os.get_terminal_size().columns
# piped output to file or other process
except OSError:
maxcol = sys.maxsize
ret = ""
for line in text.splitlines():
ios = io.StringIO()
writer = formatter.DumbWriter(ios, maxcol=maxcol)
writer.send_flowing_data(line)
ret = "{0}{1}\n".format(ret, ios.getvalue())
ios.close()
return ret
def __handle_request_error(request):
if request.has_error():
if request.error == api.Error.http_forbidden or request.error == api.Error.http_not_found:
print("Error: Invalid entry id.")
print()
exit(-1)
print("Error: {0} - {1}".format(request.error_text,
request.error_description))
exit(-1)
| 26.461538 | 98 | 0.581977 | import io
import formatter
import json
import os
from sys import exit
import sys
from bs4 import BeautifulSoup
from . import api
from . import conf
from . import entry
def show(entry_id, colors=True, raw=False, html=False):
conf.load()
try:
request = api.api_get_entry(entry_id)
__handle_request_error(request)
entr = entry.Entry(json.loads(request.response))
except api.OAuthException as ex:
print("Error: {0}".format(ex.text))
print()
exit(-1)
title = entr.title
try:
delimiter = "".ljust(os.get_terminal_size().columns, '=')
except OSError:
delimiter = "\n"
article = entr.content
if not html:
article = html2text(article, colors)
output = "{0}\n{1}\n{2}".format(title, delimiter, article)
if not raw:
output = __format_text(output)
print(output)
def html2text(html, colors=True):
soup = BeautifulSoup(html, "html.parser")
if colors:
h1colors = '\033[93m'
h1colore = '\033[0m'
else:
h1colors = h1colore = ""
for h1 in soup.findAll('h1'):
h1.string = "\n{0}{1}{2}".format(h1colors, h1.string, h1colore)
for h2 in soup.findAll('h2'):
h2.string = "\n{0}{1}{2}".format(h1colors, h2.string, h1colore)
for h3 in soup.findAll('h3'):
h3.string = "\n{0}{1}{2}".format(h1colors, h3.string, h1colore)
if colors:
bcolors = '\033[92m'
bcolore = '\033[0m'
for bold in soup.findAll('b'):
bold.string = "{0}{1}{2}".format(bcolors, bold.string, bcolore)
for bold in soup.findAll('strong'):
bold.string = "{0}{1}{2}".format(bcolors, bold.string, bcolore)
try:
hrstring = "".ljust(os.get_terminal_size().columns, '-')
except OSError:
hrstring = "-----"
for hr in soup.findAll('hr'):
replace = soup.new_tag('p')
replace.string = hrstring
hr.insert_after(replace)
hr.unwrap()
for img in soup.findAll('img'):
replace = soup.new_tag('p')
try:
alt = " \"{0}\"".format(img['alt'])
except KeyError:
alt = ""
replace.string = "[IMAGE{0}]\n".format(alt)
img.insert_after(replace)
img.unwrap()
return soup.text
def __format_text(text):
try:
maxcol = os.get_terminal_size().columns
except OSError:
maxcol = sys.maxsize
ret = ""
for line in text.splitlines():
ios = io.StringIO()
writer = formatter.DumbWriter(ios, maxcol=maxcol)
writer.send_flowing_data(line)
ret = "{0}{1}\n".format(ret, ios.getvalue())
ios.close()
return ret
def __handle_request_error(request):
if request.has_error():
if request.error == api.Error.http_forbidden or request.error == api.Error.http_not_found:
print("Error: Invalid entry id.")
print()
exit(-1)
print("Error: {0} - {1}".format(request.error_text,
request.error_description))
exit(-1)
| true | true |
1c4aec00bc23d2f212b4cd3654c3fd79517543b8 | 1,672 | py | Python | tests/cpu/test_layer_norm.py | Manny27nyc/intel-extension-for-pytorch | b40faedf6b00d520f6483d519d2e82bce0a6c0d1 | [
"Apache-2.0"
] | 322 | 2020-05-08T04:03:51.000Z | 2022-03-30T13:01:31.000Z | tests/cpu/test_layer_norm.py | Manny27nyc/intel-extension-for-pytorch | b40faedf6b00d520f6483d519d2e82bce0a6c0d1 | [
"Apache-2.0"
] | 159 | 2020-05-09T02:55:40.000Z | 2022-03-30T13:43:04.000Z | tests/cpu/test_layer_norm.py | Manny27nyc/intel-extension-for-pytorch | b40faedf6b00d520f6483d519d2e82bce0a6c0d1 | [
"Apache-2.0"
] | 64 | 2020-05-08T03:49:27.000Z | 2022-03-22T09:50:23.000Z | import unittest
import torch
import intel_extension_for_pytorch as ipex
from common_utils import TestCase
class M1(torch.nn.Module):
def __init__(self):
super(M1, self).__init__()
self.conv = torch.nn.Conv2d(5, 5, 1, stride=1, bias=False)
self.layer_norm = torch.nn.LayerNorm(10)
def forward(self, x):
x = self.conv(x)
x = self.layer_norm(x)
return x
class M2(torch.nn.Module):
def __init__(self):
super(M2, self).__init__()
self.layer_norm = torch.nn.LayerNorm(10)
def forward(self, x):
x = self.layer_norm(x)
return x
class LayerNormTester(TestCase):
def test_layer_norm(self):
# autocast inference path. layer_norm is fallthrough.
with torch.cpu.amp.autocast(), torch.no_grad():
x = torch.randn(20, 5, 10, 10)
# layernorm input is bfloat16
model = M1().eval()
trace_model = torch.jit.trace(model, x)
y1_bf16 = model(x)
y2_bf16 = trace_model(x)
self.assertEqual(y1_bf16.dtype, torch.bfloat16)
self.assertEqual(y2_bf16.dtype, torch.bfloat16)
self.assertEqual(y1_bf16, y2_bf16)
# layernorm input is fp32
model = M2().eval()
trace_model = torch.jit.trace(model, x)
y1_fp32 = model(x)
y2_fp32 = trace_model(x)
self.assertEqual(y1_fp32.dtype, torch.float32)
self.assertEqual(y2_fp32.dtype, torch.float32)
self.assertEqual(y1_fp32, y2_fp32)
if __name__ == '__main__':
test = unittest.main()
| 31.54717 | 67 | 0.585526 | import unittest
import torch
import intel_extension_for_pytorch as ipex
from common_utils import TestCase
class M1(torch.nn.Module):
def __init__(self):
super(M1, self).__init__()
self.conv = torch.nn.Conv2d(5, 5, 1, stride=1, bias=False)
self.layer_norm = torch.nn.LayerNorm(10)
def forward(self, x):
x = self.conv(x)
x = self.layer_norm(x)
return x
class M2(torch.nn.Module):
def __init__(self):
super(M2, self).__init__()
self.layer_norm = torch.nn.LayerNorm(10)
def forward(self, x):
x = self.layer_norm(x)
return x
class LayerNormTester(TestCase):
def test_layer_norm(self):
with torch.cpu.amp.autocast(), torch.no_grad():
x = torch.randn(20, 5, 10, 10)
model = M1().eval()
trace_model = torch.jit.trace(model, x)
y1_bf16 = model(x)
y2_bf16 = trace_model(x)
self.assertEqual(y1_bf16.dtype, torch.bfloat16)
self.assertEqual(y2_bf16.dtype, torch.bfloat16)
self.assertEqual(y1_bf16, y2_bf16)
model = M2().eval()
trace_model = torch.jit.trace(model, x)
y1_fp32 = model(x)
y2_fp32 = trace_model(x)
self.assertEqual(y1_fp32.dtype, torch.float32)
self.assertEqual(y2_fp32.dtype, torch.float32)
self.assertEqual(y1_fp32, y2_fp32)
if __name__ == '__main__':
test = unittest.main()
| true | true |
1c4aec3a579fd4b1fab905c16e68050e23f9625c | 1,087 | py | Python | route/user_count_edit.py | k0000k/openNAMU | b5862a7e5a1f1a2a6bee5eec5b3d9784528f42e8 | [
"BSD-3-Clause"
] | 126 | 2019-01-16T01:02:34.000Z | 2021-07-04T14:16:40.000Z | route/user_count_edit.py | k0000k/openNAMU | b5862a7e5a1f1a2a6bee5eec5b3d9784528f42e8 | [
"BSD-3-Clause"
] | 454 | 2018-12-02T10:03:37.000Z | 2021-07-13T11:31:42.000Z | route/user_count_edit.py | k0000k/openNAMU | b5862a7e5a1f1a2a6bee5eec5b3d9784528f42e8 | [
"BSD-3-Clause"
] | 102 | 2018-12-02T11:49:54.000Z | 2021-06-30T10:52:53.000Z | from .tool.func import *
def user_count_edit_2(conn, name):
curs = conn.cursor()
if name == None:
that = ip_check()
else:
that = name
curs.execute(db_change("select count(*) from history where ip = ?"), [that])
count = curs.fetchall()
if count:
data = count[0][0]
else:
data = 0
curs.execute(db_change("select count(*) from topic where ip = ?"), [that])
count = curs.fetchall()
if count:
t_data = count[0][0]
else:
t_data = 0
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('count'), wiki_set(), wiki_custom(), wiki_css([0, 0])],
data = '''
<ul class="inside_ul">
<li><a href="/record/''' + url_pas(that) + '''">''' + load_lang('edit_record') + '''</a> : ''' + str(data) + '''</li>
<li><a href="/record/topic/''' + url_pas(that) + '''">''' + load_lang('discussion_record') + '''</a> : ''' + str(t_data) + '''</a></li>
</ul>
''',
menu = [['user', load_lang('return')]]
)) | 31.970588 | 151 | 0.50598 | from .tool.func import *
def user_count_edit_2(conn, name):
curs = conn.cursor()
if name == None:
that = ip_check()
else:
that = name
curs.execute(db_change("select count(*) from history where ip = ?"), [that])
count = curs.fetchall()
if count:
data = count[0][0]
else:
data = 0
curs.execute(db_change("select count(*) from topic where ip = ?"), [that])
count = curs.fetchall()
if count:
t_data = count[0][0]
else:
t_data = 0
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('count'), wiki_set(), wiki_custom(), wiki_css([0, 0])],
data = '''
<ul class="inside_ul">
<li><a href="/record/''' + url_pas(that) + '''">''' + load_lang('edit_record') + '''</a> : ''' + str(data) + '''</li>
<li><a href="/record/topic/''' + url_pas(that) + '''">''' + load_lang('discussion_record') + '''</a> : ''' + str(t_data) + '''</a></li>
</ul>
''',
menu = [['user', load_lang('return')]]
)) | true | true |
1c4aecf3ee7f936904e6cbea697a7afb16be293b | 12,649 | py | Python | rebuild_test.py | thobbs/cassandra-dtest | 71c0c31258577033a591abad4bf6fdcfdc9f723b | [
"Apache-2.0"
] | null | null | null | rebuild_test.py | thobbs/cassandra-dtest | 71c0c31258577033a591abad4bf6fdcfdc9f723b | [
"Apache-2.0"
] | null | null | null | rebuild_test.py | thobbs/cassandra-dtest | 71c0c31258577033a591abad4bf6fdcfdc9f723b | [
"Apache-2.0"
] | null | null | null | import time
from threading import Thread
from cassandra import ConsistencyLevel
from ccmlib.node import ToolError
from dtest import Tester, debug
from tools import insert_c1c2, query_c1c2, since
class TestRebuild(Tester):
def __init__(self, *args, **kwargs):
kwargs['cluster_options'] = {'start_rpc': 'true'}
# Ignore these log patterns:
self.ignore_log_patterns = [
# This one occurs when trying to send the migration to a
# node that hasn't started yet, and when it does, it gets
# replayed and everything is fine.
r'Can\'t send migration request: node.*is down',
# ignore streaming error during bootstrap
r'Exception encountered during startup',
r'Streaming error occurred'
]
Tester.__init__(self, *args, **kwargs)
def simple_rebuild_test(self):
"""
@jira_ticket CASSANDRA-9119
Test rebuild from other dc works as expected.
"""
keys = 1000
cluster = self.cluster
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
node1 = cluster.create_node('node1', False,
('127.0.0.1', 9160),
('127.0.0.1', 7000),
'7100', '2000', None,
binary_interface=('127.0.0.1', 9042))
cluster.add(node1, True, data_center='dc1')
# start node in dc1
node1.start(wait_for_binary_proto=True)
# populate data in dc1
session = self.patient_exclusive_cql_connection(node1)
self.create_ks(session, 'ks', {'dc1': 1})
self.create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=keys, consistency=ConsistencyLevel.LOCAL_ONE)
# check data
for i in xrange(0, keys):
query_c1c2(session, i, ConsistencyLevel.LOCAL_ONE)
session.shutdown()
# Bootstrapping a new node in dc2 with auto_bootstrap: false
node2 = cluster.create_node('node2', False,
('127.0.0.2', 9160),
('127.0.0.2', 7000),
'7200', '2001', None,
binary_interface=('127.0.0.2', 9042))
cluster.add(node2, False, data_center='dc2')
node2.start(wait_other_notice=True, wait_for_binary_proto=True)
# wait for snitch to reload
time.sleep(60)
# alter keyspace to replicate to dc2
session = self.patient_exclusive_cql_connection(node2)
session.execute("ALTER KEYSPACE ks WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
# alter system_auth -- rebuilding it no longer possible after
# CASSANDRA-11848 prevented local node from being considered a source
session.execute("ALTER KEYSPACE system_auth WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
session.execute('USE ks')
self.rebuild_errors = 0
# rebuild dc2 from dc1
def rebuild():
try:
node2.nodetool('rebuild dc1')
except ToolError as e:
if 'Node is still rebuilding' in e.stdout:
self.rebuild_errors += 1
else:
raise e
class Runner(Thread):
def __init__(self, func):
Thread.__init__(self)
self.func = func
self.thread_exc_info = None
def run(self):
"""
Closes over self to catch any exceptions raised by func and
register them at self.thread_exc_info
Based on http://stackoverflow.com/a/1854263
"""
try:
self.func()
except Exception:
import sys
self.thread_exc_info = sys.exc_info()
cmd1 = Runner(rebuild)
cmd1.start()
# concurrent rebuild should not be allowed (CASSANDRA-9119)
# (following sleep is needed to avoid conflict in 'nodetool()' method setting up env.)
time.sleep(.1)
# we don't need to manually raise exeptions here -- already handled
rebuild()
cmd1.join()
# manually raise exception from cmd1 thread
# see http://stackoverflow.com/a/1854263
if cmd1.thread_exc_info is not None:
raise cmd1.thread_exc_info[1], None, cmd1.thread_exc_info[2]
# exactly 1 of the two nodetool calls should fail
# usually it will be the one in the main thread,
# but occasionally it wins the race with the one in the secondary thread,
# so we check that one succeeded and the other failed
self.assertEqual(self.rebuild_errors, 1,
msg='rebuild errors should be 1, but found {}. Concurrent rebuild should not be allowed, but one rebuild command should have succeeded.'.format(self.rebuild_errors))
# check data
for i in xrange(0, keys):
query_c1c2(session, i, ConsistencyLevel.LOCAL_ONE)
@since('2.2')
def resumable_rebuild_test(self):
"""
@jira_ticket CASSANDRA-10810
Test rebuild operation is resumable
"""
self.ignore_log_patterns = self.ignore_log_patterns[:] + [r'Error while rebuilding node',
r'Streaming error occurred on session with peer 127.0.0.3',
r'Remote peer 127.0.0.3 failed stream session']
cluster = self.cluster
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
# Create 2 nodes on dc1
node1 = cluster.create_node('node1', False,
('127.0.0.1', 9160),
('127.0.0.1', 7000),
'7100', '2000', None,
binary_interface=('127.0.0.1', 9042))
node2 = cluster.create_node('node2', False,
('127.0.0.2', 9160),
('127.0.0.2', 7000),
'7200', '2001', None,
binary_interface=('127.0.0.2', 9042))
cluster.add(node1, True, data_center='dc1')
cluster.add(node2, True, data_center='dc1')
node1.start(wait_for_binary_proto=True)
node2.start(wait_for_binary_proto=True)
# Insert data into node1 and node2
session = self.patient_exclusive_cql_connection(node1)
self.create_ks(session, 'ks', {'dc1': 1})
self.create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ALL)
key = list(range(10000, 20000))
session = self.patient_exclusive_cql_connection(node2)
session.execute('USE ks')
insert_c1c2(session, keys=key, consistency=ConsistencyLevel.ALL)
session.shutdown()
# Create a new node3 on dc2
node3 = cluster.create_node('node3', False,
('127.0.0.3', 9160),
('127.0.0.3', 7000),
'7300', '2002', None,
binary_interface=('127.0.0.3', 9042),
byteman_port='8300')
cluster.add(node3, False, data_center='dc2')
node3.start(wait_other_notice=False, wait_for_binary_proto=True)
# Wait for snitch to be refreshed
time.sleep(5)
# Alter necessary keyspace for rebuild operation
session = self.patient_exclusive_cql_connection(node3)
session.execute("ALTER KEYSPACE ks WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
session.execute("ALTER KEYSPACE system_auth WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
# Path to byteman script which makes node2 throw an exception making rebuild fail
script = ['./rebuild_failure_inject.btm']
node3.byteman_submit(script)
# First rebuild must fail and data must be incomplete
with self.assertRaises(ToolError, msg='Unexpected: SUCCEED'):
debug('Executing first rebuild -> '),
node3.nodetool('rebuild dc1')
debug('Expected: FAILED')
session.execute('USE ks')
with self.assertRaises(AssertionError, msg='Unexpected: COMPLETE'):
debug('Checking data is complete -> '),
for i in xrange(0, 20000):
query_c1c2(session, i, ConsistencyLevel.LOCAL_ONE)
debug('Expected: INCOMPLETE')
debug('Executing second rebuild -> '),
node3.nodetool('rebuild dc1')
debug('Expected: SUCCEED')
# Check all streaming sessions completed, streamed ranges are skipped and verify streamed data
node3.watch_log_for('All sessions completed')
node3.watch_log_for('Skipping streaming those ranges.')
debug('Checking data is complete -> '),
for i in xrange(0, 20000):
query_c1c2(session, i, ConsistencyLevel.LOCAL_ONE)
debug('Expected: COMPLETE')
@since('3.6')
def rebuild_ranges_test(self):
"""
@jira_ticket CASSANDRA-10406
"""
keys = 1000
cluster = self.cluster
tokens = cluster.balanced_tokens_across_dcs(['dc1', 'dc2'])
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
cluster.set_configuration_options(values={'num_tokens': 1})
node1 = cluster.create_node('node1', False,
('127.0.0.1', 9160),
('127.0.0.1', 7000),
'7100', '2000', tokens[0],
binary_interface=('127.0.0.1', 9042))
node1.set_configuration_options(values={'initial_token': tokens[0]})
cluster.add(node1, True, data_center='dc1')
node1 = cluster.nodelist()[0]
# start node in dc1
node1.start(wait_for_binary_proto=True)
# populate data in dc1
session = self.patient_exclusive_cql_connection(node1)
# ks1 will be rebuilt in node2
self.create_ks(session, 'ks1', {'dc1': 1})
self.create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=keys, consistency=ConsistencyLevel.ALL)
# ks2 will not be rebuilt in node2
self.create_ks(session, 'ks2', {'dc1': 1})
self.create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=keys, consistency=ConsistencyLevel.ALL)
session.shutdown()
# Bootstraping a new node in dc2 with auto_bootstrap: false
node2 = cluster.create_node('node2', False,
('127.0.0.2', 9160),
('127.0.0.2', 7000),
'7200', '2001', tokens[1],
binary_interface=('127.0.0.2', 9042))
node2.set_configuration_options(values={'initial_token': tokens[1]})
cluster.add(node2, False, data_center='dc2')
node2.start(wait_other_notice=True, wait_for_binary_proto=True)
# wait for snitch to reload
time.sleep(60)
# alter keyspace to replicate to dc2
session = self.patient_exclusive_cql_connection(node2)
session.execute("ALTER KEYSPACE ks1 WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
session.execute("ALTER KEYSPACE ks2 WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
session.execute('USE ks1')
# rebuild only ks1 with range that is node1's replica
node2.nodetool('rebuild -ks ks1 -ts (%s,%s] dc1' % (tokens[1], str(pow(2, 63) - 1)))
# check data is sent by stopping node1
node1.stop()
for i in xrange(0, keys):
query_c1c2(session, i, ConsistencyLevel.ONE)
# ks2 should not be streamed
session.execute('USE ks2')
for i in xrange(0, keys):
query_c1c2(session, i, ConsistencyLevel.ONE, tolerate_missing=True, must_be_missing=True)
| 43.617241 | 190 | 0.570875 | import time
from threading import Thread
from cassandra import ConsistencyLevel
from ccmlib.node import ToolError
from dtest import Tester, debug
from tools import insert_c1c2, query_c1c2, since
class TestRebuild(Tester):
def __init__(self, *args, **kwargs):
kwargs['cluster_options'] = {'start_rpc': 'true'}
self.ignore_log_patterns = [
# replayed and everything is fine.
r'Can\'t send migration request: node.*is down',
r'Exception encountered during startup',
r'Streaming error occurred'
]
Tester.__init__(self, *args, **kwargs)
def simple_rebuild_test(self):
"""
@jira_ticket CASSANDRA-9119
Test rebuild from other dc works as expected.
"""
keys = 1000
cluster = self.cluster
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
node1 = cluster.create_node('node1', False,
('127.0.0.1', 9160),
('127.0.0.1', 7000),
'7100', '2000', None,
binary_interface=('127.0.0.1', 9042))
cluster.add(node1, True, data_center='dc1')
node1.start(wait_for_binary_proto=True)
session = self.patient_exclusive_cql_connection(node1)
self.create_ks(session, 'ks', {'dc1': 1})
self.create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=keys, consistency=ConsistencyLevel.LOCAL_ONE)
for i in xrange(0, keys):
query_c1c2(session, i, ConsistencyLevel.LOCAL_ONE)
session.shutdown()
node2 = cluster.create_node('node2', False,
('127.0.0.2', 9160),
('127.0.0.2', 7000),
'7200', '2001', None,
binary_interface=('127.0.0.2', 9042))
cluster.add(node2, False, data_center='dc2')
node2.start(wait_other_notice=True, wait_for_binary_proto=True)
time.sleep(60)
session = self.patient_exclusive_cql_connection(node2)
session.execute("ALTER KEYSPACE ks WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
session.execute("ALTER KEYSPACE system_auth WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
session.execute('USE ks')
self.rebuild_errors = 0
def rebuild():
try:
node2.nodetool('rebuild dc1')
except ToolError as e:
if 'Node is still rebuilding' in e.stdout:
self.rebuild_errors += 1
else:
raise e
class Runner(Thread):
def __init__(self, func):
Thread.__init__(self)
self.func = func
self.thread_exc_info = None
def run(self):
"""
Closes over self to catch any exceptions raised by func and
register them at self.thread_exc_info
Based on http://stackoverflow.com/a/1854263
"""
try:
self.func()
except Exception:
import sys
self.thread_exc_info = sys.exc_info()
cmd1 = Runner(rebuild)
cmd1.start()
time.sleep(.1)
rebuild()
cmd1.join()
# manually raise exception from cmd1 thread
# see http://stackoverflow.com/a/1854263
if cmd1.thread_exc_info is not None:
raise cmd1.thread_exc_info[1], None, cmd1.thread_exc_info[2]
# exactly 1 of the two nodetool calls should fail
# usually it will be the one in the main thread,
# but occasionally it wins the race with the one in the secondary thread,
# so we check that one succeeded and the other failed
self.assertEqual(self.rebuild_errors, 1,
msg='rebuild errors should be 1, but found {}. Concurrent rebuild should not be allowed, but one rebuild command should have succeeded.'.format(self.rebuild_errors))
# check data
for i in xrange(0, keys):
query_c1c2(session, i, ConsistencyLevel.LOCAL_ONE)
@since('2.2')
def resumable_rebuild_test(self):
"""
@jira_ticket CASSANDRA-10810
Test rebuild operation is resumable
"""
self.ignore_log_patterns = self.ignore_log_patterns[:] + [r'Error while rebuilding node',
r'Streaming error occurred on session with peer 127.0.0.3',
r'Remote peer 127.0.0.3 failed stream session']
cluster = self.cluster
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
# Create 2 nodes on dc1
node1 = cluster.create_node('node1', False,
('127.0.0.1', 9160),
('127.0.0.1', 7000),
'7100', '2000', None,
binary_interface=('127.0.0.1', 9042))
node2 = cluster.create_node('node2', False,
('127.0.0.2', 9160),
('127.0.0.2', 7000),
'7200', '2001', None,
binary_interface=('127.0.0.2', 9042))
cluster.add(node1, True, data_center='dc1')
cluster.add(node2, True, data_center='dc1')
node1.start(wait_for_binary_proto=True)
node2.start(wait_for_binary_proto=True)
# Insert data into node1 and node2
session = self.patient_exclusive_cql_connection(node1)
self.create_ks(session, 'ks', {'dc1': 1})
self.create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ALL)
key = list(range(10000, 20000))
session = self.patient_exclusive_cql_connection(node2)
session.execute('USE ks')
insert_c1c2(session, keys=key, consistency=ConsistencyLevel.ALL)
session.shutdown()
# Create a new node3 on dc2
node3 = cluster.create_node('node3', False,
('127.0.0.3', 9160),
('127.0.0.3', 7000),
'7300', '2002', None,
binary_interface=('127.0.0.3', 9042),
byteman_port='8300')
cluster.add(node3, False, data_center='dc2')
node3.start(wait_other_notice=False, wait_for_binary_proto=True)
# Wait for snitch to be refreshed
time.sleep(5)
# Alter necessary keyspace for rebuild operation
session = self.patient_exclusive_cql_connection(node3)
session.execute("ALTER KEYSPACE ks WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
session.execute("ALTER KEYSPACE system_auth WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
# Path to byteman script which makes node2 throw an exception making rebuild fail
script = ['./rebuild_failure_inject.btm']
node3.byteman_submit(script)
# First rebuild must fail and data must be incomplete
with self.assertRaises(ToolError, msg='Unexpected: SUCCEED'):
debug('Executing first rebuild -> '),
node3.nodetool('rebuild dc1')
debug('Expected: FAILED')
session.execute('USE ks')
with self.assertRaises(AssertionError, msg='Unexpected: COMPLETE'):
debug('Checking data is complete -> '),
for i in xrange(0, 20000):
query_c1c2(session, i, ConsistencyLevel.LOCAL_ONE)
debug('Expected: INCOMPLETE')
debug('Executing second rebuild -> '),
node3.nodetool('rebuild dc1')
debug('Expected: SUCCEED')
# Check all streaming sessions completed, streamed ranges are skipped and verify streamed data
node3.watch_log_for('All sessions completed')
node3.watch_log_for('Skipping streaming those ranges.')
debug('Checking data is complete -> '),
for i in xrange(0, 20000):
query_c1c2(session, i, ConsistencyLevel.LOCAL_ONE)
debug('Expected: COMPLETE')
@since('3.6')
def rebuild_ranges_test(self):
"""
@jira_ticket CASSANDRA-10406
"""
keys = 1000
cluster = self.cluster
tokens = cluster.balanced_tokens_across_dcs(['dc1', 'dc2'])
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
cluster.set_configuration_options(values={'num_tokens': 1})
node1 = cluster.create_node('node1', False,
('127.0.0.1', 9160),
('127.0.0.1', 7000),
'7100', '2000', tokens[0],
binary_interface=('127.0.0.1', 9042))
node1.set_configuration_options(values={'initial_token': tokens[0]})
cluster.add(node1, True, data_center='dc1')
node1 = cluster.nodelist()[0]
# start node in dc1
node1.start(wait_for_binary_proto=True)
# populate data in dc1
session = self.patient_exclusive_cql_connection(node1)
# ks1 will be rebuilt in node2
self.create_ks(session, 'ks1', {'dc1': 1})
self.create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=keys, consistency=ConsistencyLevel.ALL)
# ks2 will not be rebuilt in node2
self.create_ks(session, 'ks2', {'dc1': 1})
self.create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=keys, consistency=ConsistencyLevel.ALL)
session.shutdown()
# Bootstraping a new node in dc2 with auto_bootstrap: false
node2 = cluster.create_node('node2', False,
('127.0.0.2', 9160),
('127.0.0.2', 7000),
'7200', '2001', tokens[1],
binary_interface=('127.0.0.2', 9042))
node2.set_configuration_options(values={'initial_token': tokens[1]})
cluster.add(node2, False, data_center='dc2')
node2.start(wait_other_notice=True, wait_for_binary_proto=True)
# wait for snitch to reload
time.sleep(60)
# alter keyspace to replicate to dc2
session = self.patient_exclusive_cql_connection(node2)
session.execute("ALTER KEYSPACE ks1 WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
session.execute("ALTER KEYSPACE ks2 WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
session.execute('USE ks1')
# rebuild only ks1 with range that is node1's replica
node2.nodetool('rebuild -ks ks1 -ts (%s,%s] dc1' % (tokens[1], str(pow(2, 63) - 1)))
node1.stop()
for i in xrange(0, keys):
query_c1c2(session, i, ConsistencyLevel.ONE)
session.execute('USE ks2')
for i in xrange(0, keys):
query_c1c2(session, i, ConsistencyLevel.ONE, tolerate_missing=True, must_be_missing=True)
| false | true |
1c4aee0bd841408284417e7a4b3eeb4380477e57 | 5,814 | py | Python | eval.py | Luoyadan/BGNN-AAAI | 16bd260b93009be27932415e74ce1b3128215d92 | [
"MIT"
] | 52 | 2019-12-11T08:25:11.000Z | 2021-12-26T09:32:56.000Z | eval.py | UQMM/BGNN-AAAI | 16bd260b93009be27932415e74ce1b3128215d92 | [
"MIT"
] | 3 | 2020-02-17T03:37:17.000Z | 2021-06-09T09:22:10.000Z | eval.py | UQMM/BGNN-AAAI | 16bd260b93009be27932415e74ce1b3128215d92 | [
"MIT"
] | 16 | 2020-01-15T06:42:30.000Z | 2021-08-30T05:59:43.000Z | import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
from torchtools import *
from data import MiniImagenetLoader, TieredImagenetLoader
from model import EmbeddingImagenet, GraphNetwork, ConvNet
import shutil
import os
import random
from train import ModelTrainer
if __name__ == '__main__':
tt.arg.test_model = 'D-tiered_edge_N-5_K-1_U-0_L-3_B-64_C-8_T-True_SEED-222' if tt.arg.test_model is None else tt.arg.test_model
list1 = tt.arg.test_model.split("_")
param = {}
tt.arg.arch = None
for i in range(len(list1)):
if 'att' in list1[i]:
tt.arg.arch = 'att'
continue
elif 'node' in list1[i] or 'loss' in list1[i] :
tt.arg.arch = 'node_loss'
continue
elif 'edge' in list1[i]:
tt.arg.arch = 'edge'
continue
param[list1[i].split("-", 1)[0]] = list1[i].split("-", 1)[1]
tt.arg.dataset = param['D']
tt.arg.num_ways = int(param['N'])
tt.arg.num_shots = int(param['K'])
tt.arg.num_unlabeled = int(param['U'])
tt.arg.num_layers = int(param['L'])
tt.arg.meta_batch_size = int(param['B'])
tt.arg.transductive = False if 'False' in param['T'] else True
tt.arg.num_cell = 8
####################
tt.arg.device = 'cuda:0' if tt.arg.device is None else tt.arg.device
# replace dataset_root with your own
tt.arg.dataset_root = '/media/bigdata/uqyluo/egnn_dataset'
tt.arg.dataset = 'mini' if tt.arg.dataset is None else tt.arg.dataset
tt.arg.num_ways = 5 if tt.arg.num_ways is None else tt.arg.num_ways
tt.arg.num_shots = 1 if tt.arg.num_shots is None else tt.arg.num_shots
tt.arg.num_unlabeled = 0 if tt.arg.num_unlabeled is None else tt.arg.num_unlabeled
tt.arg.num_layers = 3 if tt.arg.num_layers is None else tt.arg.num_layers
tt.arg.meta_batch_size = 40 if tt.arg.meta_batch_size is None else tt.arg.meta_batch_size
tt.arg.transductive = False if tt.arg.transductive is None else tt.arg.transductive
tt.arg.seed = 222 if tt.arg.seed is None else tt.arg.seed
tt.arg.num_gpus = 2 if tt.arg.num_gpus is None else tt.arg.num_gpus
tt.arg.num_ways_train = tt.arg.num_ways
tt.arg.num_ways_test = tt.arg.num_ways
tt.arg.num_shots_train = tt.arg.num_shots
tt.arg.num_shots_test = tt.arg.num_shots
tt.arg.train_transductive = tt.arg.transductive
tt.arg.test_transductive = tt.arg.transductive
# model parameter related
tt.arg.num_edge_features = 96
tt.arg.num_node_features = 96
tt.arg.emb_size = 128
# train, test parameters
tt.arg.train_iteration = 100000 if tt.arg.dataset == 'mini' else 200000
tt.arg.test_iteration = 10000
tt.arg.test_interval = 5000
tt.arg.test_batch_size = 32
tt.arg.log_step = 1000
tt.arg.lr = 1e-3
tt.arg.grad_clip = 5
tt.arg.weight_decay = 1e-6
tt.arg.dec_lr = 15000 if tt.arg.dataset == 'mini' else 30000
tt.arg.dropout = 0.1 if tt.arg.dataset == 'mini' else 0.0
#set random seed
np.random.seed(tt.arg.seed)
torch.manual_seed(tt.arg.seed)
torch.cuda.manual_seed_all(tt.arg.seed)
random.seed(tt.arg.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
enc_module = EmbeddingImagenet(emb_size=tt.arg.emb_size).cuda()
# set random seed
np.random.seed(tt.arg.seed)
torch.manual_seed(tt.arg.seed)
torch.cuda.manual_seed_all(tt.arg.seed)
random.seed(tt.arg.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# to check
exp_name = 'D-{}'.format(tt.arg.dataset)
if tt.arg.arch is not None:
exp_name += '_{}'.format(tt.arg.arch)
exp_name += '_N-{}_K-{}_U-{}'.format(tt.arg.num_ways, tt.arg.num_shots, tt.arg.num_unlabeled)
exp_name += '_L-{}_B-{}'.format(tt.arg.num_layers, tt.arg.meta_batch_size)
exp_name += '_C-{}'.format(tt.arg.num_cell)
exp_name += '_T-{}_SEED-222'.format(tt.arg.transductive)
if not exp_name == tt.arg.test_model:
print(exp_name)
print(tt.arg.test_model)
print('Test model and input arguments are mismatched!')
AssertionError()
gnn_module = GraphNetwork(in_features=tt.arg.emb_size,
node_features=tt.arg.num_edge_features,
edge_features=tt.arg.num_node_features,
num_layers=tt.arg.num_layers,
num_cell=tt.arg.num_cell,
dropout=tt.arg.dropout, arch=tt.arg.arch).cuda()
if tt.arg.dataset == 'mini':
test_loader = MiniImagenetLoader(root=tt.arg.dataset_root, partition='test')
elif tt.arg.dataset == 'tiered':
test_loader = TieredImagenetLoader(root=tt.arg.dataset_root, partition='test')
else:
print('Unknown dataset!')
data_loader = {'test': test_loader}
# create trainer
tester = ModelTrainer(enc_module=enc_module,
gnn_module=gnn_module,
data_loader=data_loader)
checkpoint = torch.load('asset/checkpoints/{}/'.format(exp_name) + 'model_best.pth.tar')
# checkpoint = torch.load('./trained_models/{}/'.format(exp_name) + 'model_best.pth.tar')
enc_module = nn.DataParallel(enc_module)
gnn_module = nn.DataParallel(gnn_module)
tester.enc_module.load_state_dict(checkpoint['enc_module_state_dict'])
print("load pre-trained enc_nn done!")
# initialize gnn pre-trained
tester.gnn_module.load_state_dict(checkpoint['gnn_module_state_dict'])
print("load pre-trained egnn done!")
tester.val_acc = checkpoint['val_acc']
tester.global_step = checkpoint['iteration']
print(tester.global_step)
tester.eval(partition='test')
| 35.668712 | 132 | 0.659615 | import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
from torchtools import *
from data import MiniImagenetLoader, TieredImagenetLoader
from model import EmbeddingImagenet, GraphNetwork, ConvNet
import shutil
import os
import random
from train import ModelTrainer
if __name__ == '__main__':
tt.arg.test_model = 'D-tiered_edge_N-5_K-1_U-0_L-3_B-64_C-8_T-True_SEED-222' if tt.arg.test_model is None else tt.arg.test_model
list1 = tt.arg.test_model.split("_")
param = {}
tt.arg.arch = None
for i in range(len(list1)):
if 'att' in list1[i]:
tt.arg.arch = 'att'
continue
elif 'node' in list1[i] or 'loss' in list1[i] :
tt.arg.arch = 'node_loss'
continue
elif 'edge' in list1[i]:
tt.arg.arch = 'edge'
continue
param[list1[i].split("-", 1)[0]] = list1[i].split("-", 1)[1]
tt.arg.dataset = param['D']
tt.arg.num_ways = int(param['N'])
tt.arg.num_shots = int(param['K'])
tt.arg.num_unlabeled = int(param['U'])
tt.arg.num_layers = int(param['L'])
tt.arg.meta_batch_size = int(param['B'])
tt.arg.transductive = False if 'False' in param['T'] else True
tt.arg.num_cell = 8
tt.arg.device = 'cuda:0' if tt.arg.device is None else tt.arg.device
tt.arg.dataset_root = '/media/bigdata/uqyluo/egnn_dataset'
tt.arg.dataset = 'mini' if tt.arg.dataset is None else tt.arg.dataset
tt.arg.num_ways = 5 if tt.arg.num_ways is None else tt.arg.num_ways
tt.arg.num_shots = 1 if tt.arg.num_shots is None else tt.arg.num_shots
tt.arg.num_unlabeled = 0 if tt.arg.num_unlabeled is None else tt.arg.num_unlabeled
tt.arg.num_layers = 3 if tt.arg.num_layers is None else tt.arg.num_layers
tt.arg.meta_batch_size = 40 if tt.arg.meta_batch_size is None else tt.arg.meta_batch_size
tt.arg.transductive = False if tt.arg.transductive is None else tt.arg.transductive
tt.arg.seed = 222 if tt.arg.seed is None else tt.arg.seed
tt.arg.num_gpus = 2 if tt.arg.num_gpus is None else tt.arg.num_gpus
tt.arg.num_ways_train = tt.arg.num_ways
tt.arg.num_ways_test = tt.arg.num_ways
tt.arg.num_shots_train = tt.arg.num_shots
tt.arg.num_shots_test = tt.arg.num_shots
tt.arg.train_transductive = tt.arg.transductive
tt.arg.test_transductive = tt.arg.transductive
tt.arg.num_edge_features = 96
tt.arg.num_node_features = 96
tt.arg.emb_size = 128
tt.arg.train_iteration = 100000 if tt.arg.dataset == 'mini' else 200000
tt.arg.test_iteration = 10000
tt.arg.test_interval = 5000
tt.arg.test_batch_size = 32
tt.arg.log_step = 1000
tt.arg.lr = 1e-3
tt.arg.grad_clip = 5
tt.arg.weight_decay = 1e-6
tt.arg.dec_lr = 15000 if tt.arg.dataset == 'mini' else 30000
tt.arg.dropout = 0.1 if tt.arg.dataset == 'mini' else 0.0
np.random.seed(tt.arg.seed)
torch.manual_seed(tt.arg.seed)
torch.cuda.manual_seed_all(tt.arg.seed)
random.seed(tt.arg.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
enc_module = EmbeddingImagenet(emb_size=tt.arg.emb_size).cuda()
np.random.seed(tt.arg.seed)
torch.manual_seed(tt.arg.seed)
torch.cuda.manual_seed_all(tt.arg.seed)
random.seed(tt.arg.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
exp_name = 'D-{}'.format(tt.arg.dataset)
if tt.arg.arch is not None:
exp_name += '_{}'.format(tt.arg.arch)
exp_name += '_N-{}_K-{}_U-{}'.format(tt.arg.num_ways, tt.arg.num_shots, tt.arg.num_unlabeled)
exp_name += '_L-{}_B-{}'.format(tt.arg.num_layers, tt.arg.meta_batch_size)
exp_name += '_C-{}'.format(tt.arg.num_cell)
exp_name += '_T-{}_SEED-222'.format(tt.arg.transductive)
if not exp_name == tt.arg.test_model:
print(exp_name)
print(tt.arg.test_model)
print('Test model and input arguments are mismatched!')
AssertionError()
gnn_module = GraphNetwork(in_features=tt.arg.emb_size,
node_features=tt.arg.num_edge_features,
edge_features=tt.arg.num_node_features,
num_layers=tt.arg.num_layers,
num_cell=tt.arg.num_cell,
dropout=tt.arg.dropout, arch=tt.arg.arch).cuda()
if tt.arg.dataset == 'mini':
test_loader = MiniImagenetLoader(root=tt.arg.dataset_root, partition='test')
elif tt.arg.dataset == 'tiered':
test_loader = TieredImagenetLoader(root=tt.arg.dataset_root, partition='test')
else:
print('Unknown dataset!')
data_loader = {'test': test_loader}
tester = ModelTrainer(enc_module=enc_module,
gnn_module=gnn_module,
data_loader=data_loader)
checkpoint = torch.load('asset/checkpoints/{}/'.format(exp_name) + 'model_best.pth.tar')
enc_module = nn.DataParallel(enc_module)
gnn_module = nn.DataParallel(gnn_module)
tester.enc_module.load_state_dict(checkpoint['enc_module_state_dict'])
print("load pre-trained enc_nn done!")
tester.gnn_module.load_state_dict(checkpoint['gnn_module_state_dict'])
print("load pre-trained egnn done!")
tester.val_acc = checkpoint['val_acc']
tester.global_step = checkpoint['iteration']
print(tester.global_step)
tester.eval(partition='test')
| true | true |
1c4aee7999da36da005471a52b1815e649767373 | 66,724 | py | Python | feapder/utils/tools.py | gyco/feapder | 6d7f6f318b3dd93168cbd76d9ba165b04285a05e | [
"MIT"
] | null | null | null | feapder/utils/tools.py | gyco/feapder | 6d7f6f318b3dd93168cbd76d9ba165b04285a05e | [
"MIT"
] | null | null | null | feapder/utils/tools.py | gyco/feapder | 6d7f6f318b3dd93168cbd76d9ba165b04285a05e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on 2018-09-06 14:21
---------
@summary: 工具
---------
@author: Boris
@email: [email protected]
"""
import asyncio
import base64
import calendar
import codecs
import configparser # 读配置文件的
import datetime
import functools
import hashlib
import html
import json
import os
import pickle
import random
import re
import signal
import socket
import ssl
import string
import sys
import time
import traceback
import urllib
import urllib.parse
import uuid
import weakref
from functools import partial, wraps
from hashlib import md5
from pprint import pformat
from pprint import pprint
from urllib import request
from urllib.parse import urljoin
import execjs # pip install PyExecJS
import redis
import requests
import six
from requests.cookies import RequestsCookieJar
from w3lib.url import canonicalize_url as _canonicalize_url
import feapder.setting as setting
from feapder.db.redisdb import RedisDB
from feapder.utils.email_sender import EmailSender
from feapder.utils.log import log
os.environ["EXECJS_RUNTIME"] = "Node" # 设置使用node执行js
# 全局取消ssl证书验证
ssl._create_default_https_context = ssl._create_unverified_context
TIME_OUT = 30
TIMER_TIME = 5
redisdb = None
def get_redisdb():
global redisdb
if not redisdb:
redisdb = RedisDB()
return redisdb
# 装饰器
class Singleton(object):
def __init__(self, cls):
self._cls = cls
self._instance = {}
def __call__(self, *args, **kwargs):
if self._cls not in self._instance:
self._instance[self._cls] = self._cls(*args, **kwargs)
return self._instance[self._cls]
def log_function_time(func):
try:
@functools.wraps(func) # 将函数的原来属性付给新函数
def calculate_time(*args, **kw):
began_time = time.time()
callfunc = func(*args, **kw)
end_time = time.time()
log.debug(func.__name__ + " run time = " + str(end_time - began_time))
return callfunc
return calculate_time
except:
log.debug("求取时间无效 因为函数参数不符")
return func
def run_safe_model(module_name):
def inner_run_safe_model(func):
try:
@functools.wraps(func) # 将函数的原来属性付给新函数
def run_func(*args, **kw):
callfunc = None
try:
callfunc = func(*args, **kw)
except Exception as e:
log.error(module_name + ": " + func.__name__ + " - " + str(e))
traceback.print_exc()
return callfunc
return run_func
except Exception as e:
log.error(module_name + ": " + func.__name__ + " - " + str(e))
traceback.print_exc()
return func
return inner_run_safe_model
def memoizemethod_noargs(method):
"""Decorator to cache the result of a method (without arguments) using a
weak reference to its object
"""
cache = weakref.WeakKeyDictionary()
@functools.wraps(method)
def new_method(self, *args, **kwargs):
if self not in cache:
cache[self] = method(self, *args, **kwargs)
return cache[self]
return new_method
def retry(retry_times=3, interval=0):
"""
普通函数的重试装饰器
Args:
retry_times: 重试次数
interval: 每次重试之间的间隔
Returns:
"""
def _retry(func):
@functools.wraps(func) # 将函数的原来属性付给新函数
def wapper(*args, **kwargs):
for i in range(retry_times):
try:
return func(*args, **kwargs)
except Exception as e:
log.error(
"函数 {} 执行失败 重试 {} 次. error {}".format(func.__name__, i + 1, e)
)
time.sleep(interval)
if i + 1 >= retry_times:
raise e
return wapper
return _retry
def retry_asyncio(retry_times=3, interval=0):
"""
协程的重试装饰器
Args:
retry_times: 重试次数
interval: 每次重试之间的间隔
Returns:
"""
def _retry(func):
@functools.wraps(func) # 将函数的原来属性付给新函数
async def wapper(*args, **kwargs):
for i in range(retry_times):
try:
return await func(*args, **kwargs)
except Exception as e:
log.error(
"函数 {} 执行失败 重试 {} 次. error {}".format(func.__name__, i + 1, e)
)
await asyncio.sleep(interval)
if i + 1 >= retry_times:
raise e
return wapper
return _retry
def func_timeout(timeout):
"""
函数运行时间限制装饰器
注: 不支持window
Args:
timeout: 超时的时间
Eg:
@set_timeout(3)
def test():
...
Returns:
"""
def wapper(func):
def handle(
signum, frame
): # 收到信号 SIGALRM 后的回调函数,第一个参数是信号的数字,第二个参数是the interrupted stack frame.
raise TimeoutError
def new_method(*args, **kwargs):
signal.signal(signal.SIGALRM, handle) # 设置信号和回调函数
signal.alarm(timeout) # 设置 timeout 秒的闹钟
r = func(*args, **kwargs)
signal.alarm(0) # 关闭闹钟
return r
return new_method
return wapper
########################【网页解析相关】###############################
# @log_function_time
def get_html_by_requests(
url, headers=None, code="utf-8", data=None, proxies={}, with_response=False
):
html = ""
r = None
try:
if data:
r = requests.post(
url, headers=headers, timeout=TIME_OUT, data=data, proxies=proxies
)
else:
r = requests.get(url, headers=headers, timeout=TIME_OUT, proxies=proxies)
if code:
r.encoding = code
html = r.text
except Exception as e:
log.error(e)
finally:
r and r.close()
if with_response:
return html, r
else:
return html
def get_json_by_requests(
url,
params=None,
headers=None,
data=None,
proxies={},
with_response=False,
cookies=None,
):
json = {}
response = None
try:
# response = requests.get(url, params = params)
if data:
response = requests.post(
url,
headers=headers,
data=data,
params=params,
timeout=TIME_OUT,
proxies=proxies,
cookies=cookies,
)
else:
response = requests.get(
url,
headers=headers,
params=params,
timeout=TIME_OUT,
proxies=proxies,
cookies=cookies,
)
response.encoding = "utf-8"
json = response.json()
except Exception as e:
log.error(e)
finally:
response and response.close()
if with_response:
return json, response
else:
return json
def get_cookies(response):
cookies = requests.utils.dict_from_cookiejar(response.cookies)
return cookies
def get_cookies_from_str(cookie_str):
"""
>>> get_cookies_from_str("key=value; key2=value2; key3=; key4=; ")
{'key': 'value', 'key2': 'value2', 'key3': '', 'key4': ''}
Args:
cookie_str: key=value; key2=value2; key3=; key4=
Returns:
"""
cookies = {}
for cookie in cookie_str.split(";"):
cookie = cookie.strip()
if not cookie:
continue
key, value = cookie.split("=", 1)
key = key.strip()
value = value.strip()
cookies[key] = value
return cookies
def get_cookies_jar(cookies):
"""
@summary: 适用于selenium生成的cookies转requests的cookies
requests.get(xxx, cookies=jar)
参考:https://www.cnblogs.com/small-bud/p/9064674.html
---------
@param cookies: [{},{}]
---------
@result: cookie jar
"""
cookie_jar = RequestsCookieJar()
for cookie in cookies:
cookie_jar.set(cookie["name"], cookie["value"])
return cookie_jar
def get_cookies_from_selenium_cookie(cookies):
"""
@summary: 适用于selenium生成的cookies转requests的cookies
requests.get(xxx, cookies=jar)
参考:https://www.cnblogs.com/small-bud/p/9064674.html
---------
@param cookies: [{},{}]
---------
@result: cookie jar
"""
cookie_dict = {}
for cookie in cookies:
if cookie.get("name"):
cookie_dict[cookie["name"]] = cookie["value"]
return cookie_dict
def cookiesjar2str(cookies):
str_cookie = ""
for k, v in requests.utils.dict_from_cookiejar(cookies).items():
str_cookie += k
str_cookie += "="
str_cookie += v
str_cookie += "; "
return str_cookie
def cookies2str(cookies):
str_cookie = ""
for k, v in cookies.items():
str_cookie += k
str_cookie += "="
str_cookie += v
str_cookie += "; "
return str_cookie
def get_urls(
html,
stop_urls=(
"javascript",
"+",
".css",
".js",
".rar",
".xls",
".exe",
".apk",
".doc",
".jpg",
".png",
".flv",
".mp4",
),
):
# 不匹配javascript、 +、 # 这样的url
regex = r'<a.*?href.*?=.*?["|\'](.*?)["|\']'
urls = get_info(html, regex)
urls = sorted(set(urls), key=urls.index)
if stop_urls:
stop_urls = isinstance(stop_urls, str) and [stop_urls] or stop_urls
use_urls = []
for url in urls:
for stop_url in stop_urls:
if stop_url in url:
break
else:
use_urls.append(url)
urls = use_urls
return urls
def get_full_url(root_url, sub_url):
"""
@summary: 得到完整的ur
---------
@param root_url: 根url (网页的url)
@param sub_url: 子url (带有相对路径的 可以拼接成完整的)
---------
@result: 返回完整的url
"""
return urljoin(root_url, sub_url)
def joint_url(url, params):
# param_str = "?"
# for key, value in params.items():
# value = isinstance(value, str) and value or str(value)
# param_str += key + "=" + value + "&"
#
# return url + param_str[:-1]
if not params:
return url
params = urlencode(params)
separator = "?" if "?" not in url else "&"
return url + separator + params
def canonicalize_url(url):
"""
url 归一化 会参数排序 及去掉锚点
"""
return _canonicalize_url(url)
def get_url_md5(url):
url = canonicalize_url(url)
url = re.sub("^http://", "https://", url)
return get_md5(url)
def fit_url(urls, identis):
identis = isinstance(identis, str) and [identis] or identis
fit_urls = []
for link in urls:
for identi in identis:
if identi in link:
fit_urls.append(link)
return list(set(fit_urls))
def get_param(url, key):
match = re.search(f"{key}=([^&]+)", url)
if match:
return match.group(1)
return None
def urlencode(params):
"""
字典类型的参数转为字符串
@param params:
{
'a': 1,
'b': 2
}
@return: a=1&b=2
"""
return urllib.parse.urlencode(params)
def urldecode(url):
"""
将字符串类型的参数转为json
@param url: xxx?a=1&b=2
@return:
{
'a': 1,
'b': 2
}
"""
params_json = {}
params = url.split("?")[-1].split("&")
for param in params:
key, value = param.split("=")
params_json[key] = unquote_url(value)
return params_json
def unquote_url(url, encoding="utf-8"):
"""
@summary: 将url解码
---------
@param url:
---------
@result:
"""
return urllib.parse.unquote(url, encoding=encoding)
def quote_url(url, encoding="utf-8"):
"""
@summary: 将url编码 编码意思http://www.w3school.com.cn/tags/html_ref_urlencode.html
---------
@param url:
---------
@result:
"""
return urllib.parse.quote(url, safe="%;/?:@&=+$,", encoding=encoding)
def quote_chinese_word(text, encoding="utf-8"):
def quote_chinese_word_func(text):
chinese_word = text.group(0)
return urllib.parse.quote(chinese_word, encoding=encoding)
return re.sub("([\u4e00-\u9fa5]+)", quote_chinese_word_func, text, flags=re.S)
def unescape(str):
"""
反转译
"""
return html.unescape(str)
def excape(str):
"""
转译
"""
return html.escape(str)
_regexs = {}
# @log_function_time
def get_info(html, regexs, allow_repeat=True, fetch_one=False, split=None):
regexs = isinstance(regexs, str) and [regexs] or regexs
infos = []
for regex in regexs:
if regex == "":
continue
if regex not in _regexs.keys():
_regexs[regex] = re.compile(regex, re.S)
if fetch_one:
infos = _regexs[regex].search(html)
if infos:
infos = infos.groups()
else:
continue
else:
infos = _regexs[regex].findall(str(html))
if len(infos) > 0:
# print(regex)
break
if fetch_one:
infos = infos if infos else ("",)
return infos if len(infos) > 1 else infos[0]
else:
infos = allow_repeat and infos or sorted(set(infos), key=infos.index)
infos = split.join(infos) if split else infos
return infos
def table_json(table, save_one_blank=True):
"""
将表格转为json 适应于 key:value 在一行类的表格
@param table: 使用selector封装后的具有xpath的selector
@param save_one_blank: 保留一个空白符
@return:
"""
data = {}
trs = table.xpath(".//tr")
for tr in trs:
tds = tr.xpath("./td|./th")
for i in range(0, len(tds), 2):
if i + 1 > len(tds) - 1:
break
key = tds[i].xpath("string(.)").extract_first(default="").strip()
value = tds[i + 1].xpath("string(.)").extract_first(default="").strip()
value = replace_str(value, "[\f\n\r\t\v]", "")
value = replace_str(value, " +", " " if save_one_blank else "")
if key:
data[key] = value
return data
def get_table_row_data(table):
"""
获取表格里每一行数据
@param table: 使用selector封装后的具有xpath的selector
@return: [[],[]..]
"""
datas = []
rows = table.xpath(".//tr")
for row in rows:
cols = row.xpath("./td|./th")
row_datas = []
for col in cols:
data = col.xpath("string(.)").extract_first(default="").strip()
row_datas.append(data)
datas.append(row_datas)
return datas
def rows2json(rows, keys=None):
"""
将行数据转为json
@param rows: 每一行的数据
@param keys: json的key,空时将rows的第一行作为key
@return:
"""
data_start_pos = 0 if keys else 1
datas = []
keys = keys or rows[0]
for values in rows[data_start_pos:]:
datas.append(dict(zip(keys, values)))
return datas
def get_form_data(form):
"""
提取form中提交的数据
:param form: 使用selector封装后的具有xpath的selector
:return:
"""
data = {}
inputs = form.xpath(".//input")
for input in inputs:
name = input.xpath("./@name").extract_first()
value = input.xpath("./@value").extract_first()
if name:
data[name] = value
return data
# mac上不好使
# def get_domain(url):
# domain = ''
# try:
# domain = get_tld(url)
# except Exception as e:
# log.debug(e)
# return domain
def get_domain(url):
proto, rest = urllib.parse.splittype(url)
domain, rest = urllib.parse.splithost(rest)
return domain
def get_index_url(url):
return "/".join(url.split("/")[:3])
def get_ip(domain):
ip = socket.getaddrinfo(domain, "http")[0][4][0]
return ip
def get_localhost_ip():
"""
利用 UDP 协议来实现的,生成一个UDP包,把自己的 IP 放如到 UDP 协议头中,然后从UDP包中获取本机的IP。
这个方法并不会真实的向外部发包,所以用抓包工具是看不到的
:return:
"""
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
except:
ip = ""
finally:
if s:
s.close()
return ip
def ip_to_num(ip):
import struct
ip_num = socket.ntohl(struct.unpack("I", socket.inet_aton(str(ip)))[0])
return ip_num
def is_valid_proxy(proxy, check_url=None):
"""
检验代理是否有效
@param proxy: xxx.xxx.xxx:xxx
@param check_url: 利用目标网站检查,目标网站url。默认为None, 使用代理服务器的socket检查, 但不能排除Connection closed by foreign host
@return: True / False
"""
is_valid = False
if check_url:
proxies = {"http": f"http://{proxy}", "https": f"https://{proxy}"}
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36"
}
response = None
try:
response = requests.get(
check_url, headers=headers, proxies=proxies, stream=True, timeout=20
)
is_valid = True
except Exception as e:
log.error("check proxy failed: {} {}".format(e, proxy))
finally:
if response:
response.close()
else:
ip, port = proxy.split(":")
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sk:
sk.settimeout(7)
try:
sk.connect((ip, int(port))) # 检查代理服务器是否开着
is_valid = True
except Exception as e:
log.error("check proxy failed: {} {}:{}".format(e, ip, port))
return is_valid
def is_valid_url(url):
"""
验证url是否合法
:param url:
:return:
"""
if re.match(r"(^https?:/{2}\w.+$)|(ftp://)", url):
return True
else:
return False
def get_text(soup, *args):
try:
return soup.get_text()
except Exception as e:
log.error(e)
return ""
def del_html_tag(content, except_line_break=False, save_img=False, white_replaced=" "):
"""
删除html标签
@param content: html内容
@param except_line_break: 保留p标签
@param save_img: 保留图片
@param white_replaced: 空白符替换
@return:
"""
content = replace_str(content, "(?i)<script(.|\n)*?</script>") # (?)忽略大小写
content = replace_str(content, "(?i)<style(.|\n)*?</style>")
content = replace_str(content, "<!--(.|\n)*?-->")
content = replace_str(
content, "(?!&[a-z]+=)&[a-z]+;?"
) # 干掉 等无用的字符 但&xxx= 这种表示参数的除外
if except_line_break:
content = content.replace("</p>", "/p")
content = replace_str(content, "<[^p].*?>")
content = content.replace("/p", "</p>")
content = replace_str(content, "[ \f\r\t\v]")
elif save_img:
content = replace_str(content, "(?!<img.+?>)<.+?>") # 替换掉除图片外的其他标签
content = replace_str(content, "(?! +)\s+", "\n") # 保留空格
content = content.strip()
else:
content = replace_str(content, "<(.|\n)*?>")
content = replace_str(content, "\s+", white_replaced)
content = content.strip()
return content
def del_html_js_css(content):
content = replace_str(content, "(?i)<script(.|\n)*?</script>") # (?)忽略大小写
content = replace_str(content, "(?i)<style(.|\n)*?</style>")
content = replace_str(content, "<!--(.|\n)*?-->")
return content
def is_have_chinese(content):
regex = "[\u4e00-\u9fa5]+"
chinese_word = get_info(content, regex)
return chinese_word and True or False
def is_have_english(content):
regex = "[a-zA-Z]+"
english_words = get_info(content, regex)
return english_words and True or False
def get_chinese_word(content):
regex = "[\u4e00-\u9fa5]+"
chinese_word = get_info(content, regex)
return chinese_word
def get_english_words(content):
regex = "[a-zA-Z]+"
english_words = get_info(content, regex)
return english_words or ""
##################################################
def get_json(json_str):
"""
@summary: 取json对象
---------
@param json_str: json格式的字符串
---------
@result: 返回json对象
"""
try:
return json.loads(json_str) if json_str else {}
except Exception as e1:
try:
json_str = json_str.strip()
json_str = json_str.replace("'", '"')
keys = get_info(json_str, "(\w+):")
for key in keys:
json_str = json_str.replace(key, '"%s"' % key)
return json.loads(json_str) if json_str else {}
except Exception as e2:
log.error(
"""
e1: %s
format json_str: %s
e2: %s
"""
% (e1, json_str, e2)
)
return {}
def jsonp2json(jsonp):
"""
将jsonp转为json
@param jsonp: jQuery172013600082560040794_1553230569815({})
@return:
"""
try:
return json.loads(re.match(".*?({.*}).*", jsonp, re.S).group(1))
except:
raise ValueError("Invalid Input")
def dumps_json(data, indent=4, sort_keys=False):
"""
@summary: 格式化json 用于打印
---------
@param data: json格式的字符串或json对象
---------
@result: 格式化后的字符串
"""
try:
if isinstance(data, str):
data = get_json(data)
data = json.dumps(
data,
ensure_ascii=False,
indent=indent,
skipkeys=True,
sort_keys=sort_keys,
default=str,
)
except Exception as e:
data = pformat(data)
return data
def get_json_value(json_object, key):
"""
@summary:
---------
@param json_object: json对象或json格式的字符串
@param key: 建值 如果在多个层级目录下 可写 key1.key2 如{'key1':{'key2':3}}
---------
@result: 返回对应的值,如果没有,返回''
"""
current_key = ""
value = ""
try:
json_object = (
isinstance(json_object, str) and get_json(json_object) or json_object
)
current_key = key.split(".")[0]
value = json_object[current_key]
key = key[key.find(".") + 1 :]
except Exception as e:
return value
if key == current_key:
return value
else:
return get_json_value(value, key)
def get_all_keys(datas, depth=None, current_depth=0):
"""
@summary: 获取json李所有的key
---------
@param datas: dict / list
@param depth: 字典key的层级 默认不限制层级 层级从1开始
@param current_depth: 字典key的当前层级 不用传参
---------
@result: 返回json所有的key
"""
keys = []
if depth and current_depth >= depth:
return keys
if isinstance(datas, list):
for data in datas:
keys.extend(get_all_keys(data, depth, current_depth=current_depth + 1))
elif isinstance(datas, dict):
for key, value in datas.items():
keys.append(key)
if isinstance(value, dict):
keys.extend(get_all_keys(value, depth, current_depth=current_depth + 1))
return keys
def to_chinese(unicode_str):
format_str = json.loads('{"chinese":"%s"}' % unicode_str)
return format_str["chinese"]
##################################################
def replace_str(source_str, regex, replace_str=""):
"""
@summary: 替换字符串
---------
@param source_str: 原字符串
@param regex: 正则
@param replace_str: 用什么来替换 默认为''
---------
@result: 返回替换后的字符串
"""
str_info = re.compile(regex)
return str_info.sub(replace_str, source_str)
def del_redundant_blank_character(text):
"""
删除冗余的空白符, 只保留一个
:param text:
:return:
"""
return re.sub("\s+", " ", text)
##################################################
def get_conf_value(config_file, section, key):
cp = configparser.ConfigParser(allow_no_value=True)
with codecs.open(config_file, "r", encoding="utf-8") as f:
cp.read_file(f)
return cp.get(section, key)
def mkdir(path):
try:
if not os.path.exists(path):
os.makedirs(path)
except OSError as exc: # Python >2.5
pass
def write_file(filename, content, mode="w", encoding="utf-8"):
"""
@summary: 写文件
---------
@param filename: 文件名(有路径)
@param content: 内容
@param mode: 模式 w/w+ (覆盖/追加)
---------
@result:
"""
directory = os.path.dirname(filename)
mkdir(directory)
with open(filename, mode, encoding=encoding) as file:
file.writelines(content)
def read_file(filename, readlines=False, encoding="utf-8"):
"""
@summary: 读文件
---------
@param filename: 文件名(有路径)
@param readlines: 按行读取 (默认False)
---------
@result: 按行读取返回List,否则返回字符串
"""
content = None
try:
with open(filename, "r", encoding=encoding) as file:
content = file.readlines() if readlines else file.read()
except Exception as e:
log.error(e)
return content
def get_oss_file_list(oss_handler, prefix, date_range_min, date_range_max=None):
"""
获取文件列表
@param prefix: 路径前缀 如 data/car_service_line/yiche/yiche_serial_zongshu_info
@param date_range_min: 时间范围 最小值 日期分隔符为/ 如 2019/03/01 或 2019/03/01/00/00/00
@param date_range_max: 时间范围 最大值 日期分隔符为/ 如 2019/03/01 或 2019/03/01/00/00/00
@return: 每个文件路径 如 html/e_commerce_service_line/alibaba/alibaba_shop_info/2019/03/22/15/53/15/8ca8b9e4-4c77-11e9-9dee-acde48001122.json.snappy
"""
# 计算时间范围
date_range_max = date_range_max or date_range_min
date_format = "/".join(
["%Y", "%m", "%d", "%H", "%M", "%S"][: date_range_min.count("/") + 1]
)
time_interval = [
{"days": 365},
{"days": 31},
{"days": 1},
{"hours": 1},
{"minutes": 1},
{"seconds": 1},
][date_range_min.count("/")]
date_range = get_between_date(
date_range_min, date_range_max, date_format=date_format, **time_interval
)
for date in date_range:
file_folder_path = os.path.join(prefix, date)
objs = oss_handler.list(prefix=file_folder_path)
for obj in objs:
filename = obj.key
yield filename
def is_html(url):
if not url:
return False
try:
content_type = request.urlopen(url).info().get("Content-Type", "")
if "text/html" in content_type:
return True
else:
return False
except Exception as e:
log.error(e)
return False
def is_exist(file_path):
"""
@summary: 文件是否存在
---------
@param file_path:
---------
@result:
"""
return os.path.exists(file_path)
def download_file(url, file_path, *, call_func=None, proxies=None, data=None):
"""
下载文件,会自动创建文件存储目录
Args:
url: 地址
file_path: 文件存储地址
call_func: 下载成功的回调
proxies: 代理
data: 请求体
Returns:
"""
directory = os.path.dirname(file_path)
mkdir(directory)
# 进度条
def progress_callfunc(blocknum, blocksize, totalsize):
"""回调函数
@blocknum : 已经下载的数据块
@blocksize : 数据块的大小
@totalsize: 远程文件的大小
"""
percent = 100.0 * blocknum * blocksize / totalsize
if percent > 100:
percent = 100
# print ('进度条 %.2f%%' % percent, end = '\r')
sys.stdout.write("进度条 %.2f%%" % percent + "\r")
sys.stdout.flush()
if url:
try:
if proxies:
# create the object, assign it to a variable
proxy = request.ProxyHandler(proxies)
# construct a new opener using your proxy settings
opener = request.build_opener(proxy)
# install the openen on the module-level
request.install_opener(opener)
request.urlretrieve(url, file_path, progress_callfunc, data)
if callable(call_func):
call_func()
return 1
except Exception as e:
log.error(e)
return 0
else:
return 0
def get_file_list(path, ignore=[]):
templist = path.split("*")
path = templist[0]
file_type = templist[1] if len(templist) >= 2 else ""
# 递归遍历文件
def get_file_list_(path, file_type, ignore, all_file=[]):
file_list = os.listdir(path)
for file_name in file_list:
if file_name in ignore:
continue
file_path = os.path.join(path, file_name)
if os.path.isdir(file_path):
get_file_list_(file_path, file_type, ignore, all_file)
else:
if not file_type or file_name.endswith(file_type):
all_file.append(file_path)
return all_file
return get_file_list_(path, file_type, ignore) if os.path.isdir(path) else [path]
def rename_file(old_name, new_name):
os.rename(old_name, new_name)
def del_file(path, ignore=()):
files = get_file_list(path, ignore)
for file in files:
try:
os.remove(file)
except Exception as e:
log.error(
"""
删除出错: %s
Exception : %s
"""
% (file, str(e))
)
finally:
pass
def get_file_type(file_name):
"""
@summary: 取文件后缀名
---------
@param file_name:
---------
@result:
"""
try:
return os.path.splitext(file_name)[1]
except Exception as e:
log.exception(e)
def get_file_path(file_path):
"""
@summary: 取文件路径
---------
@param file_path: /root/a.py
---------
@result: /root
"""
try:
return os.path.split(file_path)[0]
except Exception as e:
log.exception(e)
#############################################
def exec_js(js_code):
"""
@summary: 执行js代码
---------
@param js_code: js代码
---------
@result: 返回执行结果
"""
return execjs.eval(js_code)
def compile_js(js_func):
"""
@summary: 编译js函数
---------
@param js_func:js函数
---------
@result: 返回函数对象 调用 fun('js_funName', param1,param2)
"""
ctx = execjs.compile(js_func)
return ctx.call
###############################################
#############################################
def date_to_timestamp(date, time_format="%Y-%m-%d %H:%M:%S"):
"""
@summary:
---------
@param date:将"2011-09-28 10:00:00"时间格式转化为时间戳
@param format:时间格式
---------
@result: 返回时间戳
"""
timestamp = time.mktime(time.strptime(date, time_format))
return int(timestamp)
def timestamp_to_date(timestamp, time_format="%Y-%m-%d %H:%M:%S"):
"""
@summary:
---------
@param timestamp: 将时间戳转化为日期
@param format: 日期格式
---------
@result: 返回日期
"""
if timestamp is None:
raise ValueError("timestamp is null")
date = time.localtime(timestamp)
return time.strftime(time_format, date)
def get_current_timestamp():
return int(time.time())
def get_current_date(date_format="%Y-%m-%d %H:%M:%S"):
return datetime.datetime.now().strftime(date_format)
# return time.strftime(date_format, time.localtime(time.time()))
def get_date_number(year=None, month=None, day=None):
"""
@summary: 获取指定日期对应的日期数
默认当前周
---------
@param year: 2010
@param month: 6
@param day: 16
---------
@result: (年号,第几周,第几天) 如 (2010, 24, 3)
"""
if year and month and day:
return datetime.date(year, month, day).isocalendar()
elif not any([year, month, day]):
return datetime.datetime.now().isocalendar()
else:
assert year, "year 不能为空"
assert month, "month 不能为空"
assert day, "day 不能为空"
def get_between_date(
begin_date, end_date=None, date_format="%Y-%m-%d", **time_interval
):
"""
@summary: 获取一段时间间隔内的日期,默认为每一天
---------
@param begin_date: 开始日期 str 如 2018-10-01
@param end_date: 默认为今日
@param date_format: 日期格式,应与begin_date的日期格式相对应
@param time_interval: 时间间隔 默认一天 支持 days、seconds、microseconds、milliseconds、minutes、hours、weeks
---------
@result: list 值为字符串
"""
date_list = []
begin_date = datetime.datetime.strptime(begin_date, date_format)
end_date = (
datetime.datetime.strptime(end_date, date_format)
if end_date
else datetime.datetime.strptime(
time.strftime(date_format, time.localtime(time.time())), date_format
)
)
time_interval = time_interval or dict(days=1)
while begin_date <= end_date:
date_str = begin_date.strftime(date_format)
date_list.append(date_str)
begin_date += datetime.timedelta(**time_interval)
if end_date.strftime(date_format) not in date_list:
date_list.append(end_date.strftime(date_format))
return date_list
def get_between_months(begin_date, end_date=None):
"""
@summary: 获取一段时间间隔内的月份
需要满一整月
---------
@param begin_date: 开始时间 如 2018-01-01
@param end_date: 默认当前时间
---------
@result: 列表 如 ['2018-01', '2018-02']
"""
def add_months(dt, months):
month = dt.month - 1 + months
year = dt.year + month // 12
month = month % 12 + 1
day = min(dt.day, calendar.monthrange(year, month)[1])
return dt.replace(year=year, month=month, day=day)
date_list = []
begin_date = datetime.datetime.strptime(begin_date, "%Y-%m-%d")
end_date = (
datetime.datetime.strptime(end_date, "%Y-%m-%d")
if end_date
else datetime.datetime.strptime(
time.strftime("%Y-%m-%d", time.localtime(time.time())), "%Y-%m-%d"
)
)
while begin_date <= end_date:
date_str = begin_date.strftime("%Y-%m")
date_list.append(date_str)
begin_date = add_months(begin_date, 1)
return date_list
def get_today_of_day(day_offset=0):
return str(datetime.date.today() + datetime.timedelta(days=day_offset))
def get_days_of_month(year, month):
"""
返回天数
"""
return calendar.monthrange(year, month)[1]
def get_firstday_of_month(date):
"""''
date format = "YYYY-MM-DD"
"""
year, month, day = date.split("-")
year, month, day = int(year), int(month), int(day)
days = "01"
if int(month) < 10:
month = "0" + str(int(month))
arr = (year, month, days)
return "-".join("%s" % i for i in arr)
def get_lastday_of_month(date):
"""''
get the last day of month
date format = "YYYY-MM-DD"
"""
year, month, day = date.split("-")
year, month, day = int(year), int(month), int(day)
days = calendar.monthrange(year, month)[1]
month = add_zero(month)
arr = (year, month, days)
return "-".join("%s" % i for i in arr)
def get_firstday_month(month_offset=0):
"""''
get the first day of month from today
month_offset is how many months
"""
(y, m, d) = get_year_month_and_days(month_offset)
d = "01"
arr = (y, m, d)
return "-".join("%s" % i for i in arr)
def get_lastday_month(month_offset=0):
"""''
get the last day of month from today
month_offset is how many months
"""
return "-".join("%s" % i for i in get_year_month_and_days(month_offset))
def get_last_month(month_offset=0):
"""''
get the last day of month from today
month_offset is how many months
"""
return "-".join("%s" % i for i in get_year_month_and_days(month_offset)[:2])
def get_year_month_and_days(month_offset=0):
"""
@summary:
---------
@param month_offset: 月份偏移量
---------
@result: ('2019', '04', '30')
"""
today = datetime.datetime.now()
year, month = today.year, today.month
this_year = int(year)
this_month = int(month)
total_month = this_month + month_offset
if month_offset >= 0:
if total_month <= 12:
days = str(get_days_of_month(this_year, total_month))
total_month = add_zero(total_month)
return (year, total_month, days)
else:
i = total_month // 12
j = total_month % 12
if j == 0:
i -= 1
j = 12
this_year += i
days = str(get_days_of_month(this_year, j))
j = add_zero(j)
return (str(this_year), str(j), days)
else:
if (total_month > 0) and (total_month < 12):
days = str(get_days_of_month(this_year, total_month))
total_month = add_zero(total_month)
return (year, total_month, days)
else:
i = total_month // 12
j = total_month % 12
if j == 0:
i -= 1
j = 12
this_year += i
days = str(get_days_of_month(this_year, j))
j = add_zero(j)
return (str(this_year), str(j), days)
def add_zero(n):
return "%02d" % n
def get_month(month_offset=0):
"""''
获取当前日期前后N月的日期
if month_offset>0, 获取当前日期前N月的日期
if month_offset<0, 获取当前日期后N月的日期
date format = "YYYY-MM-DD"
"""
today = datetime.datetime.now()
day = add_zero(today.day)
(y, m, d) = get_year_month_and_days(month_offset)
arr = (y, m, d)
if int(day) < int(d):
arr = (y, m, day)
return "-".join("%s" % i for i in arr)
@run_safe_model("format_date")
def format_date(date, old_format="", new_format="%Y-%m-%d %H:%M:%S"):
"""
@summary: 格式化日期格式
---------
@param date: 日期 eg:2017年4月17日 3时27分12秒
@param old_format: 原来的日期格式 如 '%Y年%m月%d日 %H时%M分%S秒'
%y 两位数的年份表示(00-99)
%Y 四位数的年份表示(000-9999)
%m 月份(01-12)
%d 月内中的一天(0-31)
%H 24小时制小时数(0-23)
%I 12小时制小时数(01-12)
%M 分钟数(00-59)
%S 秒(00-59)
@param new_format: 输出的日期格式
---------
@result: 格式化后的日期,类型为字符串 如2017-4-17 03:27:12
"""
if not date:
return ""
if not old_format:
regex = "(\d+)"
numbers = get_info(date, regex, allow_repeat=True)
formats = ["%Y", "%m", "%d", "%H", "%M", "%S"]
old_format = date
for i, number in enumerate(numbers[:6]):
if i == 0 and len(number) == 2: # 年份可能是两位 用小%y
old_format = old_format.replace(
number, formats[i].lower(), 1
) # 替换一次 '2017年11月30日 11:49' 防止替换11月时,替换11小时
else:
old_format = old_format.replace(number, formats[i], 1) # 替换一次
try:
date_obj = datetime.datetime.strptime(date, old_format)
if "T" in date and "Z" in date:
date_obj += datetime.timedelta(hours=8)
date_str = date_obj.strftime("%Y-%m-%d %H:%M:%S")
else:
date_str = datetime.datetime.strftime(date_obj, new_format)
except Exception as e:
log.error("日期格式化出错,old_format = %s 不符合 %s 格式" % (old_format, date))
date_str = date
return date_str
def transform_lower_num(data_str: str):
num_map = {
"一": "1",
"二": "2",
"两": "2",
"三": "3",
"四": "4",
"五": "5",
"六": "6",
"七": "7",
"八": "8",
"九": "9",
"十": "0",
}
pattern = f'[{"|".join(num_map.keys())}|零]'
res = re.search(pattern, data_str)
if not res:
# 如果字符串中没有包含中文数字 不做处理 直接返回
return data_str
data_str = data_str.replace("0", "零")
for n in num_map:
data_str = data_str.replace(n, num_map[n])
re_data_str = re.findall("\d+", data_str)
for i in re_data_str:
if len(i) == 3:
new_i = i.replace("0", "")
data_str = data_str.replace(i, new_i, 1)
elif len(i) == 4:
new_i = i.replace("10", "")
data_str = data_str.replace(i, new_i, 1)
elif len(i) == 2 and int(i) < 10:
new_i = int(i) + 10
data_str = data_str.replace(i, str(new_i), 1)
elif len(i) == 1 and int(i) == 0:
new_i = int(i) + 10
data_str = data_str.replace(i, str(new_i), 1)
return data_str.replace("零", "0")
@run_safe_model("format_time")
def format_time(release_time, date_format="%Y-%m-%d %H:%M:%S"):
"""
>>> format_time("2个月前")
'2021-08-15 16:24:21'
>>> format_time("2月前")
'2021-08-15 16:24:36'
"""
release_time = transform_lower_num(release_time)
release_time = release_time.replace("日", "天").replace("/", "-")
if "年前" in release_time:
years = re.compile("(\d+)\s*年前").findall(release_time)
years_ago = datetime.datetime.now() - datetime.timedelta(
days=int(years[0]) * 365
)
release_time = years_ago.strftime("%Y-%m-%d %H:%M:%S")
elif "月前" in release_time:
months = re.compile("(\d+)[\s个]*月前").findall(release_time)
months_ago = datetime.datetime.now() - datetime.timedelta(
days=int(months[0]) * 30
)
release_time = months_ago.strftime("%Y-%m-%d %H:%M:%S")
elif "周前" in release_time:
weeks = re.compile("(\d+)\s*周前").findall(release_time)
weeks_ago = datetime.datetime.now() - datetime.timedelta(days=int(weeks[0]) * 7)
release_time = weeks_ago.strftime("%Y-%m-%d %H:%M:%S")
elif "天前" in release_time:
ndays = re.compile("(\d+)\s*天前").findall(release_time)
days_ago = datetime.datetime.now() - datetime.timedelta(days=int(ndays[0]))
release_time = days_ago.strftime("%Y-%m-%d %H:%M:%S")
elif "小时前" in release_time:
nhours = re.compile("(\d+)\s*小时前").findall(release_time)
hours_ago = datetime.datetime.now() - datetime.timedelta(hours=int(nhours[0]))
release_time = hours_ago.strftime("%Y-%m-%d %H:%M:%S")
elif "分钟前" in release_time:
nminutes = re.compile("(\d+)\s*分钟前").findall(release_time)
minutes_ago = datetime.datetime.now() - datetime.timedelta(
minutes=int(nminutes[0])
)
release_time = minutes_ago.strftime("%Y-%m-%d %H:%M:%S")
elif "前天" in release_time:
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=2)
release_time = release_time.replace("前天", str(yesterday))
elif "昨天" in release_time:
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
release_time = release_time.replace("昨天", str(yesterday))
elif "今天" in release_time:
release_time = release_time.replace("今天", get_current_date("%Y-%m-%d"))
elif "刚刚" in release_time:
release_time = get_current_date()
elif re.search("^\d\d:\d\d", release_time):
release_time = get_current_date("%Y-%m-%d") + " " + release_time
elif not re.compile("\d{4}").findall(release_time):
month = re.compile("\d{1,2}").findall(release_time)
if month and int(month[0]) <= int(get_current_date("%m")):
release_time = get_current_date("%Y") + "-" + release_time
else:
release_time = str(int(get_current_date("%Y")) - 1) + "-" + release_time
# 把日和小时粘在一起的拆开
template = re.compile("(\d{4}-\d{1,2}-\d{2})(\d{1,2})")
release_time = re.sub(template, r"\1 \2", release_time)
release_time = format_date(release_time, new_format=date_format)
return release_time
def to_date(date_str, date_format="%Y-%m-%d %H:%M:%S"):
return datetime.datetime.strptime(date_str, date_format)
def get_before_date(
current_date,
days,
current_date_format="%Y-%m-%d %H:%M:%S",
return_date_format="%Y-%m-%d %H:%M:%S",
):
"""
@summary: 获取之前时间
---------
@param current_date: 当前时间 str类型
@param days: 时间间隔 -1 表示前一天 1 表示后一天
@param days: 返回的时间格式
---------
@result: 字符串
"""
current_date = to_date(current_date, current_date_format)
date_obj = current_date + datetime.timedelta(days=days)
return datetime.datetime.strftime(date_obj, return_date_format)
def delay_time(sleep_time=60):
"""
@summary: 睡眠 默认1分钟
---------
@param sleep_time: 以秒为单位
---------
@result:
"""
time.sleep(sleep_time)
def format_seconds(seconds):
"""
@summary: 将秒转为时分秒
---------
@param seconds:
---------
@result: 2天3小时2分49秒
"""
seconds = int(seconds + 0.5) # 向上取整
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
times = ""
if d:
times += "{}天".format(d)
if h:
times += "{}小时".format(h)
if m:
times += "{}分".format(m)
if s:
times += "{}秒".format(s)
return times
################################################
def get_md5(*args):
"""
@summary: 获取唯一的32位md5
---------
@param *args: 参与联合去重的值
---------
@result: 7c8684bcbdfcea6697650aa53d7b1405
"""
m = hashlib.md5()
for arg in args:
m.update(str(arg).encode())
return m.hexdigest()
def get_sha1(*args):
"""
@summary: 获取唯一的40位值, 用于获取唯一的id
---------
@param *args: 参与联合去重的值
---------
@result: ba4868b3f277c8e387b55d9e3d0be7c045cdd89e
"""
sha1 = hashlib.sha1()
for arg in args:
sha1.update(str(arg).encode())
return sha1.hexdigest() # 40位
def get_base64(data):
if data is None:
return data
return base64.b64encode(str(data).encode()).decode("utf8")
def get_uuid(key1="", key2=""):
"""
@summary: 计算uuid值
可用于将两个字符串组成唯一的值。如可将域名和新闻标题组成uuid,形成联合索引
---------
@param key1:str
@param key2:str
---------
@result:
"""
uuid_object = ""
if not key1 and not key2:
uuid_object = uuid.uuid1()
else:
hash = md5(bytes(key1, "utf-8") + bytes(key2, "utf-8")).digest()
uuid_object = uuid.UUID(bytes=hash[:16], version=3)
return str(uuid_object)
def get_hash(text):
return hash(text)
##################################################
def cut_string(text, length):
"""
@summary: 将文本按指定长度拆分
---------
@param text: 文本
@param length: 拆分长度
---------
@result: 返回按指定长度拆分后形成的list
"""
text_list = re.findall(".{%d}" % length, text, re.S)
leave_text = text[len(text_list) * length :]
if leave_text:
text_list.append(leave_text)
return text_list
def get_random_string(length=1):
random_string = "".join(random.sample(string.ascii_letters + string.digits, length))
return random_string
def get_random_password(length=8, special_characters=""):
"""
@summary: 创建随机密码 默认长度为8,包含大写字母、小写字母、数字
---------
@param length: 密码长度 默认8
@param special_characters: 特殊字符
---------
@result: 指定长度的密码
"""
while True:
random_password = "".join(
random.sample(
string.ascii_letters + string.digits + special_characters, length
)
)
if (
re.search("[0-9]", random_password)
and re.search("[A-Z]", random_password)
and re.search("[a-z]", random_password)
):
if not special_characters:
break
elif set(random_password).intersection(special_characters):
break
return random_password
def get_random_email(length=None, email_types: list = None, special_characters=""):
"""
随机生成邮箱
:param length: 邮箱长度
:param email_types: 邮箱类型
:param special_characters: 特殊字符
:return:
"""
if not length:
length = random.randint(4, 12)
if not email_types:
email_types = [
"qq.com",
"163.com",
"gmail.com",
"yahoo.com",
"hotmail.com",
"yeah.net",
"126.com",
"139.com",
"sohu.com",
]
email_body = get_random_password(length, special_characters)
email_type = random.choice(email_types)
email = email_body + "@" + email_type
return email
#################################
def dumps_obj(obj):
return pickle.dumps(obj)
def loads_obj(obj_str):
return pickle.loads(obj_str)
def get_method(obj, name):
name = str(name)
try:
return getattr(obj, name)
except AttributeError:
log.error("Method %r not found in: %s" % (name, obj))
return None
def witch_workspace(project_path):
"""
@summary:
---------
@param project_path:
---------
@result:
"""
os.chdir(project_path) # 切换工作路经
############### 数据库相关 #######################
def format_sql_value(value):
if isinstance(value, str):
value = value.strip()
elif isinstance(value, (list, dict)):
value = dumps_json(value, indent=None)
elif isinstance(value, (datetime.date, datetime.time)):
value = str(value)
elif isinstance(value, bool):
value = int(value)
return value
def list2str(datas):
"""
列表转字符串
:param datas: [1, 2]
:return: (1, 2)
"""
data_str = str(tuple(datas))
data_str = re.sub(",\)$", ")", data_str)
return data_str
def make_insert_sql(
table, data, auto_update=False, update_columns=(), insert_ignore=False
):
"""
@summary: 适用于mysql, oracle数据库时间需要to_date 处理(TODO)
---------
@param table:
@param data: 表数据 json格式
@param auto_update: 使用的是replace into, 为完全覆盖已存在的数据
@param update_columns: 需要更新的列 默认全部,当指定值时,auto_update设置无效,当duplicate key冲突时更新指定的列
@param insert_ignore: 数据存在忽略
---------
@result:
"""
keys = ["`{}`".format(key) for key in data.keys()]
keys = list2str(keys).replace("'", "")
values = [format_sql_value(value) for value in data.values()]
values = list2str(values)
if update_columns:
if not isinstance(update_columns, (tuple, list)):
update_columns = [update_columns]
update_columns_ = ", ".join(
["{key}=values({key})".format(key=key) for key in update_columns]
)
sql = (
"insert%s into `{table}` {keys} values {values} on duplicate key update %s"
% (" ignore" if insert_ignore else "", update_columns_)
)
elif auto_update:
sql = "replace into `{table}` {keys} values {values}"
else:
sql = "insert%s into `{table}` {keys} values {values}" % (
" ignore" if insert_ignore else ""
)
sql = sql.format(table=table, keys=keys, values=values).replace("None", "null")
return sql
def make_update_sql(table, data, condition):
"""
@summary: 适用于mysql, oracle数据库时间需要to_date 处理(TODO)
---------
@param table:
@param data: 表数据 json格式
@param condition: where 条件
---------
@result:
"""
key_values = []
for key, value in data.items():
value = format_sql_value(value)
if isinstance(value, str):
key_values.append("`{}`={}".format(key, repr(value)))
elif value is None:
key_values.append("`{}`={}".format(key, "null"))
else:
key_values.append("`{}`={}".format(key, value))
key_values = ", ".join(key_values)
sql = "update `{table}` set {key_values} where {condition}"
sql = sql.format(table=table, key_values=key_values, condition=condition)
return sql
def make_batch_sql(
table, datas, auto_update=False, update_columns=(), update_columns_value=()
):
"""
@summary: 生产批量的sql
---------
@param table:
@param datas: 表数据 [{...}]
@param auto_update: 使用的是replace into, 为完全覆盖已存在的数据
@param update_columns: 需要更新的列 默认全部,当指定值时,auto_update设置无效,当duplicate key冲突时更新指定的列
@param update_columns_value: 需要更新的列的值 默认为datas里边对应的值, 注意 如果值为字符串类型 需要主动加单引号, 如 update_columns_value=("'test'",)
---------
@result:
"""
if not datas:
return
keys = list(datas[0].keys())
values_placeholder = ["%s"] * len(keys)
values = []
for data in datas:
value = []
for key in keys:
current_data = data.get(key)
current_data = format_sql_value(current_data)
value.append(current_data)
values.append(value)
keys = ["`{}`".format(key) for key in keys]
keys = list2str(keys).replace("'", "")
values_placeholder = list2str(values_placeholder).replace("'", "")
if update_columns:
if not isinstance(update_columns, (tuple, list)):
update_columns = [update_columns]
if update_columns_value:
update_columns_ = ", ".join(
[
"`{key}`={value}".format(key=key, value=value)
for key, value in zip(update_columns, update_columns_value)
]
)
else:
update_columns_ = ", ".join(
["`{key}`=values(`{key}`)".format(key=key) for key in update_columns]
)
sql = "insert into `{table}` {keys} values {values_placeholder} on duplicate key update {update_columns}".format(
table=table,
keys=keys,
values_placeholder=values_placeholder,
update_columns=update_columns_,
)
elif auto_update:
sql = "replace into `{table}` {keys} values {values_placeholder}".format(
table=table, keys=keys, values_placeholder=values_placeholder
)
else:
sql = "insert ignore into `{table}` {keys} values {values_placeholder}".format(
table=table, keys=keys, values_placeholder=values_placeholder
)
return sql, values
############### json相关 #######################
def key2underline(key: str, strict=True):
"""
>>> key2underline("HelloWord")
'hello_word'
>>> key2underline("SHData", strict=True)
's_h_data'
>>> key2underline("SHData", strict=False)
'sh_data'
>>> key2underline("SHDataHi", strict=False)
'sh_data_hi'
>>> key2underline("SHDataHi", strict=True)
's_h_data_hi'
>>> key2underline("dataHi", strict=True)
'data_hi'
"""
regex = "[A-Z]*" if not strict else "[A-Z]"
capitals = re.findall(regex, key)
if capitals:
for capital in capitals:
if not capital:
continue
if key.startswith(capital):
if len(capital) > 1:
key = key.replace(
capital, capital[:-1].lower() + "_" + capital[-1].lower(), 1
)
else:
key = key.replace(capital, capital.lower(), 1)
else:
if len(capital) > 1:
key = key.replace(capital, "_" + capital.lower() + "_", 1)
else:
key = key.replace(capital, "_" + capital.lower(), 1)
return key.strip("_")
def key2hump(key):
"""
下划线试变成首字母大写
"""
return key.title().replace("_", "")
def format_json_key(json_data):
json_data_correct = {}
for key, value in json_data.items():
key = key2underline(key)
json_data_correct[key] = value
return json_data_correct
def quick_to_json(text):
"""
@summary: 可快速将浏览器上的header转为json格式
---------
@param text:
---------
@result:
"""
contents = text.split("\n")
json = {}
for content in contents:
if content == "\n":
continue
content = content.strip()
regex = ["(:?.*?):(.*)", "(.*?):? +(.*)", "([^:]*)"]
result = get_info(content, regex)
result = result[0] if isinstance(result[0], tuple) else result
try:
json[result[0]] = eval(result[1].strip())
except:
json[result[0]] = result[1].strip()
return json
##############################
def print_pretty(object):
pprint(object)
def print_params2json(url):
params_json = {}
params = url.split("?")[-1].split("&")
for param in params:
key_value = param.split("=", 1)
params_json[key_value[0]] = key_value[1]
print(dumps_json(params_json))
def print_cookie2json(cookie_str_or_list):
if isinstance(cookie_str_or_list, str):
cookie_json = {}
cookies = cookie_str_or_list.split("; ")
for cookie in cookies:
name, value = cookie.split("=")
cookie_json[name] = value
else:
cookie_json = get_cookies_from_selenium_cookie(cookie_str_or_list)
print(dumps_json(cookie_json))
###############################
def flatten(x):
"""flatten(sequence) -> list
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]
>>> flatten(["foo", "bar"])
['foo', 'bar']
>>> flatten(["foo", ["baz", 42], "bar"])
['foo', 'baz', 42, 'bar']
"""
return list(iflatten(x))
def iflatten(x):
"""iflatten(sequence) -> iterator
Similar to ``.flatten()``, but returns iterator instead"""
for el in x:
if _is_listlike(el):
for el_ in flatten(el):
yield el_
else:
yield el
def _is_listlike(x):
"""
>>> _is_listlike("foo")
False
>>> _is_listlike(5)
False
>>> _is_listlike(b"foo")
False
>>> _is_listlike([b"foo"])
True
>>> _is_listlike((b"foo",))
True
>>> _is_listlike({})
True
>>> _is_listlike(set())
True
>>> _is_listlike((x for x in range(3)))
True
>>> _is_listlike(six.moves.xrange(5))
True
"""
return hasattr(x, "__iter__") and not isinstance(x, (six.text_type, bytes))
###################
def re_def_supper_class(obj, supper_class):
"""
重新定义父类
@param obj: 类 如 class A: 则obj为A 或者 A的实例 a.__class__
@param supper_class: 父类
@return:
"""
obj.__bases__ = (supper_class,)
###################
freq_limit_record = {}
def reach_freq_limit(rate_limit, *key):
"""
频率限制
:param rate_limit: 限制时间 单位秒
:param key: 频率限制的key
:return: True / False
"""
if rate_limit == 0:
return False
msg_md5 = get_md5(*key)
key = "rate_limit:{}".format(msg_md5)
try:
if get_redisdb().strget(key):
return True
get_redisdb().strset(key, time.time(), ex=rate_limit)
except redis.exceptions.ConnectionError as e:
# 使用内存做频率限制
global freq_limit_record
if key not in freq_limit_record:
freq_limit_record[key] = time.time()
return False
if time.time() - freq_limit_record.get(key) < rate_limit:
return True
else:
freq_limit_record[key] = time.time()
return False
def dingding_warning(
message, message_prefix=None, rate_limit=None, url=None, user_phone=None
):
# 为了加载最新的配置
rate_limit = rate_limit if rate_limit is not None else setting.WARNING_INTERVAL
url = url or setting.DINGDING_WARNING_URL
user_phone = user_phone or setting.DINGDING_WARNING_PHONE
if not all([url, message]):
return
if reach_freq_limit(rate_limit, url, user_phone, message_prefix or message):
log.info("报警时间间隔过短,此次报警忽略。 内容 {}".format(message))
return
if isinstance(user_phone, str):
user_phone = [user_phone] if user_phone else []
data = {
"msgtype": "text",
"text": {"content": message},
"at": {"atMobiles": user_phone, "isAtAll": setting.DINGDING_WARNING_ALL},
}
headers = {"Content-Type": "application/json"}
try:
response = requests.post(
url, headers=headers, data=json.dumps(data).encode("utf8")
)
result = response.json()
response.close()
if result.get("errcode") == 0:
return True
else:
raise Exception(result.get("errmsg"))
except Exception as e:
log.error("报警发送失败。 报警内容 {}, error: {}".format(message, e))
return False
def email_warning(
message,
title,
message_prefix=None,
email_sender=None,
email_password=None,
email_receiver=None,
email_smtpserver=None,
rate_limit=None,
):
# 为了加载最新的配置
email_sender = email_sender or setting.EMAIL_SENDER
email_password = email_password or setting.EMAIL_PASSWORD
email_receiver = email_receiver or setting.EMAIL_RECEIVER
email_smtpserver = email_smtpserver or setting.EMAIL_SMTPSERVER
rate_limit = rate_limit if rate_limit is not None else setting.WARNING_INTERVAL
if not all([message, email_sender, email_password, email_receiver]):
return
if reach_freq_limit(
rate_limit, email_receiver, email_sender, message_prefix or message
):
log.info("报警时间间隔过短,此次报警忽略。 内容 {}".format(message))
return
if isinstance(email_receiver, str):
email_receiver = [email_receiver]
with EmailSender(
username=email_sender, password=email_password, smtpserver=email_smtpserver
) as email:
return email.send(receivers=email_receiver, title=title, content=message)
def linkedsee_warning(message, rate_limit=3600, message_prefix=None, token=None):
"""
灵犀电话报警
Args:
message:
rate_limit:
message_prefix:
token:
Returns:
"""
if not token:
log.info("未设置灵犀token,不支持报警")
return
if reach_freq_limit(rate_limit, token, message_prefix or message):
log.info("报警时间间隔过短,此次报警忽略。 内容 {}".format(message))
return
headers = {"servicetoken": token, "Content-Type": "application/json"}
url = "http://www.linkedsee.com/alarm/zabbix"
data = {"content": message}
response = requests.post(url, data=json.dumps(data), headers=headers)
return response
def wechat_warning(
message,
message_prefix=None,
rate_limit=None,
url=None,
user_phone=None,
all_users: bool = None,
):
"""企业微信报警"""
# 为了加载最新的配置
rate_limit = rate_limit if rate_limit is not None else setting.WARNING_INTERVAL
url = url or setting.WECHAT_WARNING_URL
user_phone = user_phone or setting.WECHAT_WARNING_PHONE
all_users = all_users if all_users is not None else setting.WECHAT_WARNING_ALL
if isinstance(user_phone, str):
user_phone = [user_phone] if user_phone else []
if all_users is True or not user_phone:
user_phone = ["@all"]
if not all([url, message]):
return
if reach_freq_limit(rate_limit, url, user_phone, message_prefix or message):
log.info("报警时间间隔过短,此次报警忽略。 内容 {}".format(message))
return
data = {
"msgtype": "text",
"text": {"content": message, "mentioned_mobile_list": user_phone},
}
headers = {"Content-Type": "application/json"}
try:
response = requests.post(
url, headers=headers, data=json.dumps(data).encode("utf8")
)
result = response.json()
response.close()
if result.get("errcode") == 0:
return True
else:
raise Exception(result.get("errmsg"))
except Exception as e:
log.error("报警发送失败。 报警内容 {}, error: {}".format(message, e))
return False
def feishu_warning(message, message_prefix=None, rate_limit=None, url=None, user=None):
"""
Args:
message:
message_prefix:
rate_limit:
url:
user: {"open_id":"ou_xxxxx", "name":"xxxx"} 或 [{"open_id":"ou_xxxxx", "name":"xxxx"}]
Returns:
"""
# 为了加载最新的配置
rate_limit = rate_limit if rate_limit is not None else setting.WARNING_INTERVAL
url = url or setting.FEISHU_WARNING_URL
user = user or setting.FEISHU_WARNING_USER
if not all([url, message]):
return
if reach_freq_limit(rate_limit, url, user, message_prefix or message):
log.info("报警时间间隔过短,此次报警忽略。 内容 {}".format(message))
return
if isinstance(user, dict):
user = [user] if user else []
at = ""
if setting.FEISHU_WARNING_ALL:
at = '<at user_id="all">所有人</at>'
elif user:
at = " ".join(
[f'<at user_id="{u.get("open_id")}">{u.get("name")}</at>' for u in user]
)
data = {"msg_type": "text", "content": {"text": at + message}}
headers = {"Content-Type": "application/json"}
try:
response = requests.post(
url, headers=headers, data=json.dumps(data).encode("utf8")
)
result = response.json()
response.close()
if result.get("StatusCode") == 0:
return True
else:
raise Exception(result.get("msg"))
except Exception as e:
log.error("报警发送失败。 报警内容 {}, error: {}".format(message, e))
return False
def send_msg(msg, level="DEBUG", message_prefix=""):
if setting.WARNING_LEVEL == "ERROR":
if level.upper() != "ERROR":
return
if setting.DINGDING_WARNING_URL:
keyword = "feapder报警系统\n"
dingding_warning(keyword + msg, message_prefix=message_prefix)
if setting.EMAIL_RECEIVER:
title = message_prefix or msg
if len(title) > 50:
title = title[:50] + "..."
email_warning(msg, message_prefix=message_prefix, title=title)
if setting.WECHAT_WARNING_URL:
keyword = "feapder报警系统\n"
wechat_warning(keyword + msg, message_prefix=message_prefix)
if setting.FEISHU_WARNING_URL:
keyword = "feapder报警系统\n"
feishu_warning(keyword + msg, message_prefix=message_prefix)
###################
def make_item(cls, data: dict):
"""提供Item类与原数据,快速构建Item实例
:param cls: Item类
:param data: 字典格式的数据
"""
item = cls()
for key, val in data.items():
setattr(item, key, val)
return item
###################
def aio_wrap(loop=None, executor=None):
"""
wrap a normal sync version of a function to an async version
"""
outer_loop = loop
outer_executor = executor
def wrap(fn):
@wraps(fn)
async def run(*args, loop=None, executor=None, **kwargs):
if loop is None:
if outer_loop is None:
loop = asyncio.get_event_loop()
else:
loop = outer_loop
if executor is None:
executor = outer_executor
pfunc = partial(fn, *args, **kwargs)
return await loop.run_in_executor(executor, pfunc)
return run
return wrap
######### number ##########
def ensure_int(n):
"""
>>> ensure_int(None)
0
>>> ensure_int(False)
0
>>> ensure_int(12)
12
>>> ensure_int("72")
72
>>> ensure_int('')
0
>>> ensure_int('1')
1
"""
if not n:
return 0
return int(n)
def ensure_float(n):
"""
>>> ensure_float(None)
0.0
>>> ensure_float(False)
0.0
>>> ensure_float(12)
12.0
>>> ensure_float("72")
72.0
"""
if not n:
return 0.0
return float(n)
| 24.795243 | 145 | 0.562601 | import asyncio
import base64
import calendar
import codecs
import configparser import datetime
import functools
import hashlib
import html
import json
import os
import pickle
import random
import re
import signal
import socket
import ssl
import string
import sys
import time
import traceback
import urllib
import urllib.parse
import uuid
import weakref
from functools import partial, wraps
from hashlib import md5
from pprint import pformat
from pprint import pprint
from urllib import request
from urllib.parse import urljoin
import execjs import redis
import requests
import six
from requests.cookies import RequestsCookieJar
from w3lib.url import canonicalize_url as _canonicalize_url
import feapder.setting as setting
from feapder.db.redisdb import RedisDB
from feapder.utils.email_sender import EmailSender
from feapder.utils.log import log
os.environ["EXECJS_RUNTIME"] = "Node"
ssl._create_default_https_context = ssl._create_unverified_context
TIME_OUT = 30
TIMER_TIME = 5
redisdb = None
def get_redisdb():
global redisdb
if not redisdb:
redisdb = RedisDB()
return redisdb
class Singleton(object):
def __init__(self, cls):
self._cls = cls
self._instance = {}
def __call__(self, *args, **kwargs):
if self._cls not in self._instance:
self._instance[self._cls] = self._cls(*args, **kwargs)
return self._instance[self._cls]
def log_function_time(func):
try:
@functools.wraps(func) def calculate_time(*args, **kw):
began_time = time.time()
callfunc = func(*args, **kw)
end_time = time.time()
log.debug(func.__name__ + " run time = " + str(end_time - began_time))
return callfunc
return calculate_time
except:
log.debug("求取时间无效 因为函数参数不符")
return func
def run_safe_model(module_name):
def inner_run_safe_model(func):
try:
@functools.wraps(func) def run_func(*args, **kw):
callfunc = None
try:
callfunc = func(*args, **kw)
except Exception as e:
log.error(module_name + ": " + func.__name__ + " - " + str(e))
traceback.print_exc()
return callfunc
return run_func
except Exception as e:
log.error(module_name + ": " + func.__name__ + " - " + str(e))
traceback.print_exc()
return func
return inner_run_safe_model
def memoizemethod_noargs(method):
cache = weakref.WeakKeyDictionary()
@functools.wraps(method)
def new_method(self, *args, **kwargs):
if self not in cache:
cache[self] = method(self, *args, **kwargs)
return cache[self]
return new_method
def retry(retry_times=3, interval=0):
def _retry(func):
@functools.wraps(func) def wapper(*args, **kwargs):
for i in range(retry_times):
try:
return func(*args, **kwargs)
except Exception as e:
log.error(
"函数 {} 执行失败 重试 {} 次. error {}".format(func.__name__, i + 1, e)
)
time.sleep(interval)
if i + 1 >= retry_times:
raise e
return wapper
return _retry
def retry_asyncio(retry_times=3, interval=0):
def _retry(func):
@functools.wraps(func) async def wapper(*args, **kwargs):
for i in range(retry_times):
try:
return await func(*args, **kwargs)
except Exception as e:
log.error(
"函数 {} 执行失败 重试 {} 次. error {}".format(func.__name__, i + 1, e)
)
await asyncio.sleep(interval)
if i + 1 >= retry_times:
raise e
return wapper
return _retry
def func_timeout(timeout):
def wapper(func):
def handle(
signum, frame
): raise TimeoutError
def new_method(*args, **kwargs):
signal.signal(signal.SIGALRM, handle) signal.alarm(timeout) r = func(*args, **kwargs)
signal.alarm(0) return r
return new_method
return wapper
def get_html_by_requests(
url, headers=None, code="utf-8", data=None, proxies={}, with_response=False
):
html = ""
r = None
try:
if data:
r = requests.post(
url, headers=headers, timeout=TIME_OUT, data=data, proxies=proxies
)
else:
r = requests.get(url, headers=headers, timeout=TIME_OUT, proxies=proxies)
if code:
r.encoding = code
html = r.text
except Exception as e:
log.error(e)
finally:
r and r.close()
if with_response:
return html, r
else:
return html
def get_json_by_requests(
url,
params=None,
headers=None,
data=None,
proxies={},
with_response=False,
cookies=None,
):
json = {}
response = None
try:
if data:
response = requests.post(
url,
headers=headers,
data=data,
params=params,
timeout=TIME_OUT,
proxies=proxies,
cookies=cookies,
)
else:
response = requests.get(
url,
headers=headers,
params=params,
timeout=TIME_OUT,
proxies=proxies,
cookies=cookies,
)
response.encoding = "utf-8"
json = response.json()
except Exception as e:
log.error(e)
finally:
response and response.close()
if with_response:
return json, response
else:
return json
def get_cookies(response):
cookies = requests.utils.dict_from_cookiejar(response.cookies)
return cookies
def get_cookies_from_str(cookie_str):
cookies = {}
for cookie in cookie_str.split(";"):
cookie = cookie.strip()
if not cookie:
continue
key, value = cookie.split("=", 1)
key = key.strip()
value = value.strip()
cookies[key] = value
return cookies
def get_cookies_jar(cookies):
cookie_jar = RequestsCookieJar()
for cookie in cookies:
cookie_jar.set(cookie["name"], cookie["value"])
return cookie_jar
def get_cookies_from_selenium_cookie(cookies):
cookie_dict = {}
for cookie in cookies:
if cookie.get("name"):
cookie_dict[cookie["name"]] = cookie["value"]
return cookie_dict
def cookiesjar2str(cookies):
str_cookie = ""
for k, v in requests.utils.dict_from_cookiejar(cookies).items():
str_cookie += k
str_cookie += "="
str_cookie += v
str_cookie += "; "
return str_cookie
def cookies2str(cookies):
str_cookie = ""
for k, v in cookies.items():
str_cookie += k
str_cookie += "="
str_cookie += v
str_cookie += "; "
return str_cookie
def get_urls(
html,
stop_urls=(
"javascript",
"+",
".css",
".js",
".rar",
".xls",
".exe",
".apk",
".doc",
".jpg",
".png",
".flv",
".mp4",
),
):
regex = r'<a.*?href.*?=.*?["|\'](.*?)["|\']'
urls = get_info(html, regex)
urls = sorted(set(urls), key=urls.index)
if stop_urls:
stop_urls = isinstance(stop_urls, str) and [stop_urls] or stop_urls
use_urls = []
for url in urls:
for stop_url in stop_urls:
if stop_url in url:
break
else:
use_urls.append(url)
urls = use_urls
return urls
def get_full_url(root_url, sub_url):
return urljoin(root_url, sub_url)
def joint_url(url, params):
if not params:
return url
params = urlencode(params)
separator = "?" if "?" not in url else "&"
return url + separator + params
def canonicalize_url(url):
return _canonicalize_url(url)
def get_url_md5(url):
url = canonicalize_url(url)
url = re.sub("^http://", "https://", url)
return get_md5(url)
def fit_url(urls, identis):
identis = isinstance(identis, str) and [identis] or identis
fit_urls = []
for link in urls:
for identi in identis:
if identi in link:
fit_urls.append(link)
return list(set(fit_urls))
def get_param(url, key):
match = re.search(f"{key}=([^&]+)", url)
if match:
return match.group(1)
return None
def urlencode(params):
return urllib.parse.urlencode(params)
def urldecode(url):
params_json = {}
params = url.split("?")[-1].split("&")
for param in params:
key, value = param.split("=")
params_json[key] = unquote_url(value)
return params_json
def unquote_url(url, encoding="utf-8"):
return urllib.parse.unquote(url, encoding=encoding)
def quote_url(url, encoding="utf-8"):
return urllib.parse.quote(url, safe="%;/?:@&=+$,", encoding=encoding)
def quote_chinese_word(text, encoding="utf-8"):
def quote_chinese_word_func(text):
chinese_word = text.group(0)
return urllib.parse.quote(chinese_word, encoding=encoding)
return re.sub("([\u4e00-\u9fa5]+)", quote_chinese_word_func, text, flags=re.S)
def unescape(str):
return html.unescape(str)
def excape(str):
return html.escape(str)
_regexs = {}
def get_info(html, regexs, allow_repeat=True, fetch_one=False, split=None):
regexs = isinstance(regexs, str) and [regexs] or regexs
infos = []
for regex in regexs:
if regex == "":
continue
if regex not in _regexs.keys():
_regexs[regex] = re.compile(regex, re.S)
if fetch_one:
infos = _regexs[regex].search(html)
if infos:
infos = infos.groups()
else:
continue
else:
infos = _regexs[regex].findall(str(html))
if len(infos) > 0:
break
if fetch_one:
infos = infos if infos else ("",)
return infos if len(infos) > 1 else infos[0]
else:
infos = allow_repeat and infos or sorted(set(infos), key=infos.index)
infos = split.join(infos) if split else infos
return infos
def table_json(table, save_one_blank=True):
data = {}
trs = table.xpath(".//tr")
for tr in trs:
tds = tr.xpath("./td|./th")
for i in range(0, len(tds), 2):
if i + 1 > len(tds) - 1:
break
key = tds[i].xpath("string(.)").extract_first(default="").strip()
value = tds[i + 1].xpath("string(.)").extract_first(default="").strip()
value = replace_str(value, "[\f\n\r\t\v]", "")
value = replace_str(value, " +", " " if save_one_blank else "")
if key:
data[key] = value
return data
def get_table_row_data(table):
datas = []
rows = table.xpath(".//tr")
for row in rows:
cols = row.xpath("./td|./th")
row_datas = []
for col in cols:
data = col.xpath("string(.)").extract_first(default="").strip()
row_datas.append(data)
datas.append(row_datas)
return datas
def rows2json(rows, keys=None):
data_start_pos = 0 if keys else 1
datas = []
keys = keys or rows[0]
for values in rows[data_start_pos:]:
datas.append(dict(zip(keys, values)))
return datas
def get_form_data(form):
data = {}
inputs = form.xpath(".//input")
for input in inputs:
name = input.xpath("./@name").extract_first()
value = input.xpath("./@value").extract_first()
if name:
data[name] = value
return data
def get_domain(url):
proto, rest = urllib.parse.splittype(url)
domain, rest = urllib.parse.splithost(rest)
return domain
def get_index_url(url):
return "/".join(url.split("/")[:3])
def get_ip(domain):
ip = socket.getaddrinfo(domain, "http")[0][4][0]
return ip
def get_localhost_ip():
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
except:
ip = ""
finally:
if s:
s.close()
return ip
def ip_to_num(ip):
import struct
ip_num = socket.ntohl(struct.unpack("I", socket.inet_aton(str(ip)))[0])
return ip_num
def is_valid_proxy(proxy, check_url=None):
is_valid = False
if check_url:
proxies = {"http": f"http://{proxy}", "https": f"https://{proxy}"}
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36"
}
response = None
try:
response = requests.get(
check_url, headers=headers, proxies=proxies, stream=True, timeout=20
)
is_valid = True
except Exception as e:
log.error("check proxy failed: {} {}".format(e, proxy))
finally:
if response:
response.close()
else:
ip, port = proxy.split(":")
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sk:
sk.settimeout(7)
try:
sk.connect((ip, int(port))) is_valid = True
except Exception as e:
log.error("check proxy failed: {} {}:{}".format(e, ip, port))
return is_valid
def is_valid_url(url):
if re.match(r"(^https?:/{2}\w.+$)|(ftp://)", url):
return True
else:
return False
def get_text(soup, *args):
try:
return soup.get_text()
except Exception as e:
log.error(e)
return ""
def del_html_tag(content, except_line_break=False, save_img=False, white_replaced=" "):
content = replace_str(content, "(?i)<script(.|\n)*?</script>") content = replace_str(content, "(?i)<style(.|\n)*?</style>")
content = replace_str(content, "<!--(.|\n)*?-->")
content = replace_str(
content, "(?!&[a-z]+=)&[a-z]+;?"
) if except_line_break:
content = content.replace("</p>", "/p")
content = replace_str(content, "<[^p].*?>")
content = content.replace("/p", "</p>")
content = replace_str(content, "[ \f\r\t\v]")
elif save_img:
content = replace_str(content, "(?!<img.+?>)<.+?>") content = replace_str(content, "(?! +)\s+", "\n") content = content.strip()
else:
content = replace_str(content, "<(.|\n)*?>")
content = replace_str(content, "\s+", white_replaced)
content = content.strip()
return content
def del_html_js_css(content):
content = replace_str(content, "(?i)<script(.|\n)*?</script>") content = replace_str(content, "(?i)<style(.|\n)*?</style>")
content = replace_str(content, "<!--(.|\n)*?-->")
return content
def is_have_chinese(content):
regex = "[\u4e00-\u9fa5]+"
chinese_word = get_info(content, regex)
return chinese_word and True or False
def is_have_english(content):
regex = "[a-zA-Z]+"
english_words = get_info(content, regex)
return english_words and True or False
def get_chinese_word(content):
regex = "[\u4e00-\u9fa5]+"
chinese_word = get_info(content, regex)
return chinese_word
def get_english_words(content):
regex = "[a-zA-Z]+"
english_words = get_info(content, regex)
return english_words or ""
def get_json(json_str):
try:
return json.loads(json_str) if json_str else {}
except Exception as e1:
try:
json_str = json_str.strip()
json_str = json_str.replace("'", '"')
keys = get_info(json_str, "(\w+):")
for key in keys:
json_str = json_str.replace(key, '"%s"' % key)
return json.loads(json_str) if json_str else {}
except Exception as e2:
log.error(
"""
e1: %s
format json_str: %s
e2: %s
"""
% (e1, json_str, e2)
)
return {}
def jsonp2json(jsonp):
try:
return json.loads(re.match(".*?({.*}).*", jsonp, re.S).group(1))
except:
raise ValueError("Invalid Input")
def dumps_json(data, indent=4, sort_keys=False):
try:
if isinstance(data, str):
data = get_json(data)
data = json.dumps(
data,
ensure_ascii=False,
indent=indent,
skipkeys=True,
sort_keys=sort_keys,
default=str,
)
except Exception as e:
data = pformat(data)
return data
def get_json_value(json_object, key):
current_key = ""
value = ""
try:
json_object = (
isinstance(json_object, str) and get_json(json_object) or json_object
)
current_key = key.split(".")[0]
value = json_object[current_key]
key = key[key.find(".") + 1 :]
except Exception as e:
return value
if key == current_key:
return value
else:
return get_json_value(value, key)
def get_all_keys(datas, depth=None, current_depth=0):
keys = []
if depth and current_depth >= depth:
return keys
if isinstance(datas, list):
for data in datas:
keys.extend(get_all_keys(data, depth, current_depth=current_depth + 1))
elif isinstance(datas, dict):
for key, value in datas.items():
keys.append(key)
if isinstance(value, dict):
keys.extend(get_all_keys(value, depth, current_depth=current_depth + 1))
return keys
def to_chinese(unicode_str):
format_str = json.loads('{"chinese":"%s"}' % unicode_str)
return format_str["chinese"]
##################################################
def replace_str(source_str, regex, replace_str=""):
str_info = re.compile(regex)
return str_info.sub(replace_str, source_str)
def del_redundant_blank_character(text):
return re.sub("\s+", " ", text)
##################################################
def get_conf_value(config_file, section, key):
cp = configparser.ConfigParser(allow_no_value=True)
with codecs.open(config_file, "r", encoding="utf-8") as f:
cp.read_file(f)
return cp.get(section, key)
def mkdir(path):
try:
if not os.path.exists(path):
os.makedirs(path)
except OSError as exc: # Python >2.5
pass
def write_file(filename, content, mode="w", encoding="utf-8"):
directory = os.path.dirname(filename)
mkdir(directory)
with open(filename, mode, encoding=encoding) as file:
file.writelines(content)
def read_file(filename, readlines=False, encoding="utf-8"):
content = None
try:
with open(filename, "r", encoding=encoding) as file:
content = file.readlines() if readlines else file.read()
except Exception as e:
log.error(e)
return content
def get_oss_file_list(oss_handler, prefix, date_range_min, date_range_max=None):
# 计算时间范围
date_range_max = date_range_max or date_range_min
date_format = "/".join(
["%Y", "%m", "%d", "%H", "%M", "%S"][: date_range_min.count("/") + 1]
)
time_interval = [
{"days": 365},
{"days": 31},
{"days": 1},
{"hours": 1},
{"minutes": 1},
{"seconds": 1},
][date_range_min.count("/")]
date_range = get_between_date(
date_range_min, date_range_max, date_format=date_format, **time_interval
)
for date in date_range:
file_folder_path = os.path.join(prefix, date)
objs = oss_handler.list(prefix=file_folder_path)
for obj in objs:
filename = obj.key
yield filename
def is_html(url):
if not url:
return False
try:
content_type = request.urlopen(url).info().get("Content-Type", "")
if "text/html" in content_type:
return True
else:
return False
except Exception as e:
log.error(e)
return False
def is_exist(file_path):
return os.path.exists(file_path)
def download_file(url, file_path, *, call_func=None, proxies=None, data=None):
directory = os.path.dirname(file_path)
mkdir(directory)
# 进度条
def progress_callfunc(blocknum, blocksize, totalsize):
percent = 100.0 * blocknum * blocksize / totalsize
if percent > 100:
percent = 100
# print ('进度条 %.2f%%' % percent, end = '\r')
sys.stdout.write("进度条 %.2f%%" % percent + "\r")
sys.stdout.flush()
if url:
try:
if proxies:
# create the object, assign it to a variable
proxy = request.ProxyHandler(proxies)
# construct a new opener using your proxy settings
opener = request.build_opener(proxy)
# install the openen on the module-level
request.install_opener(opener)
request.urlretrieve(url, file_path, progress_callfunc, data)
if callable(call_func):
call_func()
return 1
except Exception as e:
log.error(e)
return 0
else:
return 0
def get_file_list(path, ignore=[]):
templist = path.split("*")
path = templist[0]
file_type = templist[1] if len(templist) >= 2 else ""
# 递归遍历文件
def get_file_list_(path, file_type, ignore, all_file=[]):
file_list = os.listdir(path)
for file_name in file_list:
if file_name in ignore:
continue
file_path = os.path.join(path, file_name)
if os.path.isdir(file_path):
get_file_list_(file_path, file_type, ignore, all_file)
else:
if not file_type or file_name.endswith(file_type):
all_file.append(file_path)
return all_file
return get_file_list_(path, file_type, ignore) if os.path.isdir(path) else [path]
def rename_file(old_name, new_name):
os.rename(old_name, new_name)
def del_file(path, ignore=()):
files = get_file_list(path, ignore)
for file in files:
try:
os.remove(file)
except Exception as e:
log.error(
"""
删除出错: %s
Exception : %s
"""
% (file, str(e))
)
finally:
pass
def get_file_type(file_name):
try:
return os.path.splitext(file_name)[1]
except Exception as e:
log.exception(e)
def get_file_path(file_path):
try:
return os.path.split(file_path)[0]
except Exception as e:
log.exception(e)
#############################################
def exec_js(js_code):
return execjs.eval(js_code)
def compile_js(js_func):
ctx = execjs.compile(js_func)
return ctx.call
###############################################
#############################################
def date_to_timestamp(date, time_format="%Y-%m-%d %H:%M:%S"):
timestamp = time.mktime(time.strptime(date, time_format))
return int(timestamp)
def timestamp_to_date(timestamp, time_format="%Y-%m-%d %H:%M:%S"):
if timestamp is None:
raise ValueError("timestamp is null")
date = time.localtime(timestamp)
return time.strftime(time_format, date)
def get_current_timestamp():
return int(time.time())
def get_current_date(date_format="%Y-%m-%d %H:%M:%S"):
return datetime.datetime.now().strftime(date_format)
# return time.strftime(date_format, time.localtime(time.time()))
def get_date_number(year=None, month=None, day=None):
if year and month and day:
return datetime.date(year, month, day).isocalendar()
elif not any([year, month, day]):
return datetime.datetime.now().isocalendar()
else:
assert year, "year 不能为空"
assert month, "month 不能为空"
assert day, "day 不能为空"
def get_between_date(
begin_date, end_date=None, date_format="%Y-%m-%d", **time_interval
):
date_list = []
begin_date = datetime.datetime.strptime(begin_date, date_format)
end_date = (
datetime.datetime.strptime(end_date, date_format)
if end_date
else datetime.datetime.strptime(
time.strftime(date_format, time.localtime(time.time())), date_format
)
)
time_interval = time_interval or dict(days=1)
while begin_date <= end_date:
date_str = begin_date.strftime(date_format)
date_list.append(date_str)
begin_date += datetime.timedelta(**time_interval)
if end_date.strftime(date_format) not in date_list:
date_list.append(end_date.strftime(date_format))
return date_list
def get_between_months(begin_date, end_date=None):
def add_months(dt, months):
month = dt.month - 1 + months
year = dt.year + month // 12
month = month % 12 + 1
day = min(dt.day, calendar.monthrange(year, month)[1])
return dt.replace(year=year, month=month, day=day)
date_list = []
begin_date = datetime.datetime.strptime(begin_date, "%Y-%m-%d")
end_date = (
datetime.datetime.strptime(end_date, "%Y-%m-%d")
if end_date
else datetime.datetime.strptime(
time.strftime("%Y-%m-%d", time.localtime(time.time())), "%Y-%m-%d"
)
)
while begin_date <= end_date:
date_str = begin_date.strftime("%Y-%m")
date_list.append(date_str)
begin_date = add_months(begin_date, 1)
return date_list
def get_today_of_day(day_offset=0):
return str(datetime.date.today() + datetime.timedelta(days=day_offset))
def get_days_of_month(year, month):
return calendar.monthrange(year, month)[1]
def get_firstday_of_month(date):
year, month, day = date.split("-")
year, month, day = int(year), int(month), int(day)
days = "01"
if int(month) < 10:
month = "0" + str(int(month))
arr = (year, month, days)
return "-".join("%s" % i for i in arr)
def get_lastday_of_month(date):
year, month, day = date.split("-")
year, month, day = int(year), int(month), int(day)
days = calendar.monthrange(year, month)[1]
month = add_zero(month)
arr = (year, month, days)
return "-".join("%s" % i for i in arr)
def get_firstday_month(month_offset=0):
(y, m, d) = get_year_month_and_days(month_offset)
d = "01"
arr = (y, m, d)
return "-".join("%s" % i for i in arr)
def get_lastday_month(month_offset=0):
return "-".join("%s" % i for i in get_year_month_and_days(month_offset))
def get_last_month(month_offset=0):
return "-".join("%s" % i for i in get_year_month_and_days(month_offset)[:2])
def get_year_month_and_days(month_offset=0):
today = datetime.datetime.now()
year, month = today.year, today.month
this_year = int(year)
this_month = int(month)
total_month = this_month + month_offset
if month_offset >= 0:
if total_month <= 12:
days = str(get_days_of_month(this_year, total_month))
total_month = add_zero(total_month)
return (year, total_month, days)
else:
i = total_month // 12
j = total_month % 12
if j == 0:
i -= 1
j = 12
this_year += i
days = str(get_days_of_month(this_year, j))
j = add_zero(j)
return (str(this_year), str(j), days)
else:
if (total_month > 0) and (total_month < 12):
days = str(get_days_of_month(this_year, total_month))
total_month = add_zero(total_month)
return (year, total_month, days)
else:
i = total_month // 12
j = total_month % 12
if j == 0:
i -= 1
j = 12
this_year += i
days = str(get_days_of_month(this_year, j))
j = add_zero(j)
return (str(this_year), str(j), days)
def add_zero(n):
return "%02d" % n
def get_month(month_offset=0):
today = datetime.datetime.now()
day = add_zero(today.day)
(y, m, d) = get_year_month_and_days(month_offset)
arr = (y, m, d)
if int(day) < int(d):
arr = (y, m, day)
return "-".join("%s" % i for i in arr)
@run_safe_model("format_date")
def format_date(date, old_format="", new_format="%Y-%m-%d %H:%M:%S"):
if not date:
return ""
if not old_format:
regex = "(\d+)"
numbers = get_info(date, regex, allow_repeat=True)
formats = ["%Y", "%m", "%d", "%H", "%M", "%S"]
old_format = date
for i, number in enumerate(numbers[:6]):
if i == 0 and len(number) == 2: # 年份可能是两位 用小%y
old_format = old_format.replace(
number, formats[i].lower(), 1
) # 替换一次 '2017年11月30日 11:49' 防止替换11月时,替换11小时
else:
old_format = old_format.replace(number, formats[i], 1) # 替换一次
try:
date_obj = datetime.datetime.strptime(date, old_format)
if "T" in date and "Z" in date:
date_obj += datetime.timedelta(hours=8)
date_str = date_obj.strftime("%Y-%m-%d %H:%M:%S")
else:
date_str = datetime.datetime.strftime(date_obj, new_format)
except Exception as e:
log.error("日期格式化出错,old_format = %s 不符合 %s 格式" % (old_format, date))
date_str = date
return date_str
def transform_lower_num(data_str: str):
num_map = {
"一": "1",
"二": "2",
"两": "2",
"三": "3",
"四": "4",
"五": "5",
"六": "6",
"七": "7",
"八": "8",
"九": "9",
"十": "0",
}
pattern = f'[{"|".join(num_map.keys())}|零]'
res = re.search(pattern, data_str)
if not res:
# 如果字符串中没有包含中文数字 不做处理 直接返回
return data_str
data_str = data_str.replace("0", "零")
for n in num_map:
data_str = data_str.replace(n, num_map[n])
re_data_str = re.findall("\d+", data_str)
for i in re_data_str:
if len(i) == 3:
new_i = i.replace("0", "")
data_str = data_str.replace(i, new_i, 1)
elif len(i) == 4:
new_i = i.replace("10", "")
data_str = data_str.replace(i, new_i, 1)
elif len(i) == 2 and int(i) < 10:
new_i = int(i) + 10
data_str = data_str.replace(i, str(new_i), 1)
elif len(i) == 1 and int(i) == 0:
new_i = int(i) + 10
data_str = data_str.replace(i, str(new_i), 1)
return data_str.replace("零", "0")
@run_safe_model("format_time")
def format_time(release_time, date_format="%Y-%m-%d %H:%M:%S"):
release_time = transform_lower_num(release_time)
release_time = release_time.replace("日", "天").replace("/", "-")
if "年前" in release_time:
years = re.compile("(\d+)\s*年前").findall(release_time)
years_ago = datetime.datetime.now() - datetime.timedelta(
days=int(years[0]) * 365
)
release_time = years_ago.strftime("%Y-%m-%d %H:%M:%S")
elif "月前" in release_time:
months = re.compile("(\d+)[\s个]*月前").findall(release_time)
months_ago = datetime.datetime.now() - datetime.timedelta(
days=int(months[0]) * 30
)
release_time = months_ago.strftime("%Y-%m-%d %H:%M:%S")
elif "周前" in release_time:
weeks = re.compile("(\d+)\s*周前").findall(release_time)
weeks_ago = datetime.datetime.now() - datetime.timedelta(days=int(weeks[0]) * 7)
release_time = weeks_ago.strftime("%Y-%m-%d %H:%M:%S")
elif "天前" in release_time:
ndays = re.compile("(\d+)\s*天前").findall(release_time)
days_ago = datetime.datetime.now() - datetime.timedelta(days=int(ndays[0]))
release_time = days_ago.strftime("%Y-%m-%d %H:%M:%S")
elif "小时前" in release_time:
nhours = re.compile("(\d+)\s*小时前").findall(release_time)
hours_ago = datetime.datetime.now() - datetime.timedelta(hours=int(nhours[0]))
release_time = hours_ago.strftime("%Y-%m-%d %H:%M:%S")
elif "分钟前" in release_time:
nminutes = re.compile("(\d+)\s*分钟前").findall(release_time)
minutes_ago = datetime.datetime.now() - datetime.timedelta(
minutes=int(nminutes[0])
)
release_time = minutes_ago.strftime("%Y-%m-%d %H:%M:%S")
elif "前天" in release_time:
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=2)
release_time = release_time.replace("前天", str(yesterday))
elif "昨天" in release_time:
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
release_time = release_time.replace("昨天", str(yesterday))
elif "今天" in release_time:
release_time = release_time.replace("今天", get_current_date("%Y-%m-%d"))
elif "刚刚" in release_time:
release_time = get_current_date()
elif re.search("^\d\d:\d\d", release_time):
release_time = get_current_date("%Y-%m-%d") + " " + release_time
elif not re.compile("\d{4}").findall(release_time):
month = re.compile("\d{1,2}").findall(release_time)
if month and int(month[0]) <= int(get_current_date("%m")):
release_time = get_current_date("%Y") + "-" + release_time
else:
release_time = str(int(get_current_date("%Y")) - 1) + "-" + release_time
# 把日和小时粘在一起的拆开
template = re.compile("(\d{4}-\d{1,2}-\d{2})(\d{1,2})")
release_time = re.sub(template, r"\1 \2", release_time)
release_time = format_date(release_time, new_format=date_format)
return release_time
def to_date(date_str, date_format="%Y-%m-%d %H:%M:%S"):
return datetime.datetime.strptime(date_str, date_format)
def get_before_date(
current_date,
days,
current_date_format="%Y-%m-%d %H:%M:%S",
return_date_format="%Y-%m-%d %H:%M:%S",
):
current_date = to_date(current_date, current_date_format)
date_obj = current_date + datetime.timedelta(days=days)
return datetime.datetime.strftime(date_obj, return_date_format)
def delay_time(sleep_time=60):
time.sleep(sleep_time)
def format_seconds(seconds):
seconds = int(seconds + 0.5) # 向上取整
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
times = ""
if d:
times += "{}天".format(d)
if h:
times += "{}小时".format(h)
if m:
times += "{}分".format(m)
if s:
times += "{}秒".format(s)
return times
################################################
def get_md5(*args):
m = hashlib.md5()
for arg in args:
m.update(str(arg).encode())
return m.hexdigest()
def get_sha1(*args):
sha1 = hashlib.sha1()
for arg in args:
sha1.update(str(arg).encode())
return sha1.hexdigest() # 40位
def get_base64(data):
if data is None:
return data
return base64.b64encode(str(data).encode()).decode("utf8")
def get_uuid(key1="", key2=""):
uuid_object = ""
if not key1 and not key2:
uuid_object = uuid.uuid1()
else:
hash = md5(bytes(key1, "utf-8") + bytes(key2, "utf-8")).digest()
uuid_object = uuid.UUID(bytes=hash[:16], version=3)
return str(uuid_object)
def get_hash(text):
return hash(text)
##################################################
def cut_string(text, length):
text_list = re.findall(".{%d}" % length, text, re.S)
leave_text = text[len(text_list) * length :]
if leave_text:
text_list.append(leave_text)
return text_list
def get_random_string(length=1):
random_string = "".join(random.sample(string.ascii_letters + string.digits, length))
return random_string
def get_random_password(length=8, special_characters=""):
while True:
random_password = "".join(
random.sample(
string.ascii_letters + string.digits + special_characters, length
)
)
if (
re.search("[0-9]", random_password)
and re.search("[A-Z]", random_password)
and re.search("[a-z]", random_password)
):
if not special_characters:
break
elif set(random_password).intersection(special_characters):
break
return random_password
def get_random_email(length=None, email_types: list = None, special_characters=""):
if not length:
length = random.randint(4, 12)
if not email_types:
email_types = [
"qq.com",
"163.com",
"gmail.com",
"yahoo.com",
"hotmail.com",
"yeah.net",
"126.com",
"139.com",
"sohu.com",
]
email_body = get_random_password(length, special_characters)
email_type = random.choice(email_types)
email = email_body + "@" + email_type
return email
#################################
def dumps_obj(obj):
return pickle.dumps(obj)
def loads_obj(obj_str):
return pickle.loads(obj_str)
def get_method(obj, name):
name = str(name)
try:
return getattr(obj, name)
except AttributeError:
log.error("Method %r not found in: %s" % (name, obj))
return None
def witch_workspace(project_path):
os.chdir(project_path) # 切换工作路经
############### 数据库相关 #######################
def format_sql_value(value):
if isinstance(value, str):
value = value.strip()
elif isinstance(value, (list, dict)):
value = dumps_json(value, indent=None)
elif isinstance(value, (datetime.date, datetime.time)):
value = str(value)
elif isinstance(value, bool):
value = int(value)
return value
def list2str(datas):
data_str = str(tuple(datas))
data_str = re.sub(",\)$", ")", data_str)
return data_str
def make_insert_sql(
table, data, auto_update=False, update_columns=(), insert_ignore=False
):
keys = ["`{}`".format(key) for key in data.keys()]
keys = list2str(keys).replace("'", "")
values = [format_sql_value(value) for value in data.values()]
values = list2str(values)
if update_columns:
if not isinstance(update_columns, (tuple, list)):
update_columns = [update_columns]
update_columns_ = ", ".join(
["{key}=values({key})".format(key=key) for key in update_columns]
)
sql = (
"insert%s into `{table}` {keys} values {values} on duplicate key update %s"
% (" ignore" if insert_ignore else "", update_columns_)
)
elif auto_update:
sql = "replace into `{table}` {keys} values {values}"
else:
sql = "insert%s into `{table}` {keys} values {values}" % (
" ignore" if insert_ignore else ""
)
sql = sql.format(table=table, keys=keys, values=values).replace("None", "null")
return sql
def make_update_sql(table, data, condition):
key_values = []
for key, value in data.items():
value = format_sql_value(value)
if isinstance(value, str):
key_values.append("`{}`={}".format(key, repr(value)))
elif value is None:
key_values.append("`{}`={}".format(key, "null"))
else:
key_values.append("`{}`={}".format(key, value))
key_values = ", ".join(key_values)
sql = "update `{table}` set {key_values} where {condition}"
sql = sql.format(table=table, key_values=key_values, condition=condition)
return sql
def make_batch_sql(
table, datas, auto_update=False, update_columns=(), update_columns_value=()
):
if not datas:
return
keys = list(datas[0].keys())
values_placeholder = ["%s"] * len(keys)
values = []
for data in datas:
value = []
for key in keys:
current_data = data.get(key)
current_data = format_sql_value(current_data)
value.append(current_data)
values.append(value)
keys = ["`{}`".format(key) for key in keys]
keys = list2str(keys).replace("'", "")
values_placeholder = list2str(values_placeholder).replace("'", "")
if update_columns:
if not isinstance(update_columns, (tuple, list)):
update_columns = [update_columns]
if update_columns_value:
update_columns_ = ", ".join(
[
"`{key}`={value}".format(key=key, value=value)
for key, value in zip(update_columns, update_columns_value)
]
)
else:
update_columns_ = ", ".join(
["`{key}`=values(`{key}`)".format(key=key) for key in update_columns]
)
sql = "insert into `{table}` {keys} values {values_placeholder} on duplicate key update {update_columns}".format(
table=table,
keys=keys,
values_placeholder=values_placeholder,
update_columns=update_columns_,
)
elif auto_update:
sql = "replace into `{table}` {keys} values {values_placeholder}".format(
table=table, keys=keys, values_placeholder=values_placeholder
)
else:
sql = "insert ignore into `{table}` {keys} values {values_placeholder}".format(
table=table, keys=keys, values_placeholder=values_placeholder
)
return sql, values
############### json相关 #######################
def key2underline(key: str, strict=True):
regex = "[A-Z]*" if not strict else "[A-Z]"
capitals = re.findall(regex, key)
if capitals:
for capital in capitals:
if not capital:
continue
if key.startswith(capital):
if len(capital) > 1:
key = key.replace(
capital, capital[:-1].lower() + "_" + capital[-1].lower(), 1
)
else:
key = key.replace(capital, capital.lower(), 1)
else:
if len(capital) > 1:
key = key.replace(capital, "_" + capital.lower() + "_", 1)
else:
key = key.replace(capital, "_" + capital.lower(), 1)
return key.strip("_")
def key2hump(key):
return key.title().replace("_", "")
def format_json_key(json_data):
json_data_correct = {}
for key, value in json_data.items():
key = key2underline(key)
json_data_correct[key] = value
return json_data_correct
def quick_to_json(text):
contents = text.split("\n")
json = {}
for content in contents:
if content == "\n":
continue
content = content.strip()
regex = ["(:?.*?):(.*)", "(.*?):? +(.*)", "([^:]*)"]
result = get_info(content, regex)
result = result[0] if isinstance(result[0], tuple) else result
try:
json[result[0]] = eval(result[1].strip())
except:
json[result[0]] = result[1].strip()
return json
##############################
def print_pretty(object):
pprint(object)
def print_params2json(url):
params_json = {}
params = url.split("?")[-1].split("&")
for param in params:
key_value = param.split("=", 1)
params_json[key_value[0]] = key_value[1]
print(dumps_json(params_json))
def print_cookie2json(cookie_str_or_list):
if isinstance(cookie_str_or_list, str):
cookie_json = {}
cookies = cookie_str_or_list.split("; ")
for cookie in cookies:
name, value = cookie.split("=")
cookie_json[name] = value
else:
cookie_json = get_cookies_from_selenium_cookie(cookie_str_or_list)
print(dumps_json(cookie_json))
###############################
def flatten(x):
return list(iflatten(x))
def iflatten(x):
for el in x:
if _is_listlike(el):
for el_ in flatten(el):
yield el_
else:
yield el
def _is_listlike(x):
return hasattr(x, "__iter__") and not isinstance(x, (six.text_type, bytes))
###################
def re_def_supper_class(obj, supper_class):
obj.__bases__ = (supper_class,)
###################
freq_limit_record = {}
def reach_freq_limit(rate_limit, *key):
if rate_limit == 0:
return False
msg_md5 = get_md5(*key)
key = "rate_limit:{}".format(msg_md5)
try:
if get_redisdb().strget(key):
return True
get_redisdb().strset(key, time.time(), ex=rate_limit)
except redis.exceptions.ConnectionError as e:
# 使用内存做频率限制
global freq_limit_record
if key not in freq_limit_record:
freq_limit_record[key] = time.time()
return False
if time.time() - freq_limit_record.get(key) < rate_limit:
return True
else:
freq_limit_record[key] = time.time()
return False
def dingding_warning(
message, message_prefix=None, rate_limit=None, url=None, user_phone=None
):
# 为了加载最新的配置
rate_limit = rate_limit if rate_limit is not None else setting.WARNING_INTERVAL
url = url or setting.DINGDING_WARNING_URL
user_phone = user_phone or setting.DINGDING_WARNING_PHONE
if not all([url, message]):
return
if reach_freq_limit(rate_limit, url, user_phone, message_prefix or message):
log.info("报警时间间隔过短,此次报警忽略。 内容 {}".format(message))
return
if isinstance(user_phone, str):
user_phone = [user_phone] if user_phone else []
data = {
"msgtype": "text",
"text": {"content": message},
"at": {"atMobiles": user_phone, "isAtAll": setting.DINGDING_WARNING_ALL},
}
headers = {"Content-Type": "application/json"}
try:
response = requests.post(
url, headers=headers, data=json.dumps(data).encode("utf8")
)
result = response.json()
response.close()
if result.get("errcode") == 0:
return True
else:
raise Exception(result.get("errmsg"))
except Exception as e:
log.error("报警发送失败。 报警内容 {}, error: {}".format(message, e))
return False
def email_warning(
message,
title,
message_prefix=None,
email_sender=None,
email_password=None,
email_receiver=None,
email_smtpserver=None,
rate_limit=None,
):
# 为了加载最新的配置
email_sender = email_sender or setting.EMAIL_SENDER
email_password = email_password or setting.EMAIL_PASSWORD
email_receiver = email_receiver or setting.EMAIL_RECEIVER
email_smtpserver = email_smtpserver or setting.EMAIL_SMTPSERVER
rate_limit = rate_limit if rate_limit is not None else setting.WARNING_INTERVAL
if not all([message, email_sender, email_password, email_receiver]):
return
if reach_freq_limit(
rate_limit, email_receiver, email_sender, message_prefix or message
):
log.info("报警时间间隔过短,此次报警忽略。 内容 {}".format(message))
return
if isinstance(email_receiver, str):
email_receiver = [email_receiver]
with EmailSender(
username=email_sender, password=email_password, smtpserver=email_smtpserver
) as email:
return email.send(receivers=email_receiver, title=title, content=message)
def linkedsee_warning(message, rate_limit=3600, message_prefix=None, token=None):
if not token:
log.info("未设置灵犀token,不支持报警")
return
if reach_freq_limit(rate_limit, token, message_prefix or message):
log.info("报警时间间隔过短,此次报警忽略。 内容 {}".format(message))
return
headers = {"servicetoken": token, "Content-Type": "application/json"}
url = "http://www.linkedsee.com/alarm/zabbix"
data = {"content": message}
response = requests.post(url, data=json.dumps(data), headers=headers)
return response
def wechat_warning(
message,
message_prefix=None,
rate_limit=None,
url=None,
user_phone=None,
all_users: bool = None,
):
# 为了加载最新的配置
rate_limit = rate_limit if rate_limit is not None else setting.WARNING_INTERVAL
url = url or setting.WECHAT_WARNING_URL
user_phone = user_phone or setting.WECHAT_WARNING_PHONE
all_users = all_users if all_users is not None else setting.WECHAT_WARNING_ALL
if isinstance(user_phone, str):
user_phone = [user_phone] if user_phone else []
if all_users is True or not user_phone:
user_phone = ["@all"]
if not all([url, message]):
return
if reach_freq_limit(rate_limit, url, user_phone, message_prefix or message):
log.info("报警时间间隔过短,此次报警忽略。 内容 {}".format(message))
return
data = {
"msgtype": "text",
"text": {"content": message, "mentioned_mobile_list": user_phone},
}
headers = {"Content-Type": "application/json"}
try:
response = requests.post(
url, headers=headers, data=json.dumps(data).encode("utf8")
)
result = response.json()
response.close()
if result.get("errcode") == 0:
return True
else:
raise Exception(result.get("errmsg"))
except Exception as e:
log.error("报警发送失败。 报警内容 {}, error: {}".format(message, e))
return False
def feishu_warning(message, message_prefix=None, rate_limit=None, url=None, user=None):
# 为了加载最新的配置
rate_limit = rate_limit if rate_limit is not None else setting.WARNING_INTERVAL
url = url or setting.FEISHU_WARNING_URL
user = user or setting.FEISHU_WARNING_USER
if not all([url, message]):
return
if reach_freq_limit(rate_limit, url, user, message_prefix or message):
log.info("报警时间间隔过短,此次报警忽略。 内容 {}".format(message))
return
if isinstance(user, dict):
user = [user] if user else []
at = ""
if setting.FEISHU_WARNING_ALL:
at = '<at user_id="all">所有人</at>'
elif user:
at = " ".join(
[f'<at user_id="{u.get("open_id")}">{u.get("name")}</at>' for u in user]
)
data = {"msg_type": "text", "content": {"text": at + message}}
headers = {"Content-Type": "application/json"}
try:
response = requests.post(
url, headers=headers, data=json.dumps(data).encode("utf8")
)
result = response.json()
response.close()
if result.get("StatusCode") == 0:
return True
else:
raise Exception(result.get("msg"))
except Exception as e:
log.error("报警发送失败。 报警内容 {}, error: {}".format(message, e))
return False
def send_msg(msg, level="DEBUG", message_prefix=""):
if setting.WARNING_LEVEL == "ERROR":
if level.upper() != "ERROR":
return
if setting.DINGDING_WARNING_URL:
keyword = "feapder报警系统\n"
dingding_warning(keyword + msg, message_prefix=message_prefix)
if setting.EMAIL_RECEIVER:
title = message_prefix or msg
if len(title) > 50:
title = title[:50] + "..."
email_warning(msg, message_prefix=message_prefix, title=title)
if setting.WECHAT_WARNING_URL:
keyword = "feapder报警系统\n"
wechat_warning(keyword + msg, message_prefix=message_prefix)
if setting.FEISHU_WARNING_URL:
keyword = "feapder报警系统\n"
feishu_warning(keyword + msg, message_prefix=message_prefix)
###################
def make_item(cls, data: dict):
item = cls()
for key, val in data.items():
setattr(item, key, val)
return item
###################
def aio_wrap(loop=None, executor=None):
outer_loop = loop
outer_executor = executor
def wrap(fn):
@wraps(fn)
async def run(*args, loop=None, executor=None, **kwargs):
if loop is None:
if outer_loop is None:
loop = asyncio.get_event_loop()
else:
loop = outer_loop
if executor is None:
executor = outer_executor
pfunc = partial(fn, *args, **kwargs)
return await loop.run_in_executor(executor, pfunc)
return run
return wrap
######### number ##########
def ensure_int(n):
if not n:
return 0
return int(n)
def ensure_float(n):
if not n:
return 0.0
return float(n)
| true | true |
1c4aeeff8024ba7316b92e3705770ea7212291c0 | 2,664 | py | Python | folklore/log.py | maralla/folklore | 851aded3db130a84d3a9c1bb581cdaad12e5b5a1 | [
"MIT"
] | null | null | null | folklore/log.py | maralla/folklore | 851aded3db130a84d3a9c1bb581cdaad12e5b5a1 | [
"MIT"
] | null | null | null | folklore/log.py | maralla/folklore | 851aded3db130a84d3a9c1bb581cdaad12e5b5a1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
folklore.log
~~~~~~~~~~~~
This module implements log configuration.
Hook definition:
- init_process Config logs
"""
import sys
import logging
import logging.config
from copy import deepcopy
from .hook import define_hook
CONF = {
'version': 1,
'disable_existing_loggers': False,
'root': None,
'loggers': {},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'console',
}
},
'formatters': {
'console': {
'format': ('%(asctime)s %(levelname)-7s '
'%(name)s[%(process)d] %(message)s'),
},
'syslog': {
'format': '%(name)s[%(process)d]: %(message)s',
},
},
}
SYSLOG_HANDLER = {
'level': 'INFO',
'class': 'logging.handlers.SysLogHandler',
'address': '/dev/log',
'facility': 'local6',
'formatter': 'syslog',
}
def _logger(handlers, level='INFO', propagate=True):
return {
'handlers': handlers,
'propagate': propagate,
'level': level
}
def _console(name):
conf = deepcopy(CONF)
conf['root'] = _logger(['console'])
conf['loggers'][name] = _logger(['console'], propagate=False)
return conf
def _syslog(name):
conf = deepcopy(CONF)
conf['root'] = _logger(['syslog'])
conf['loggers'][name] = _logger(['syslog'], propagate=False)
conf['handlers']['syslog'] = SYSLOG_HANDLER
return conf
@define_hook(event='after_load')
def config_log():
"""Config log according to app name and environment.
"""
from folklore_config import config
name = config.app_name
env = config.env
if env == 'dev' or sys.platform == 'darwin' or config.syslog_disabled:
conf = _console(name)
else:
conf = _syslog(name)
logging.config.dictConfig(conf)
class MetaAdapter(logging.LoggerAdapter):
"""Add meta to logging message
meta format: [{client_name}/{client_version} {client_addr} {extra}]
missing component will be filled with '-'
"""
def process(self, msg, kwargs):
if 'ctx' not in self.extra:
return super(MetaAdapter, self).process(msg, kwargs)
ctx = self.extra['ctx']
meta = ctx.get('meta', {})
components = [
'/'.join((meta.get('client_name', '-'),
meta.get('client_version', '-'))),
ctx.get('client_addr', '-'),
]
log_extra = ctx.get('log_extra')
if log_extra:
components.append(log_extra)
return '[{}] {}'.format(' '.join(components), msg), kwargs
| 23.575221 | 74 | 0.563438 |
import sys
import logging
import logging.config
from copy import deepcopy
from .hook import define_hook
CONF = {
'version': 1,
'disable_existing_loggers': False,
'root': None,
'loggers': {},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'console',
}
},
'formatters': {
'console': {
'format': ('%(asctime)s %(levelname)-7s '
'%(name)s[%(process)d] %(message)s'),
},
'syslog': {
'format': '%(name)s[%(process)d]: %(message)s',
},
},
}
SYSLOG_HANDLER = {
'level': 'INFO',
'class': 'logging.handlers.SysLogHandler',
'address': '/dev/log',
'facility': 'local6',
'formatter': 'syslog',
}
def _logger(handlers, level='INFO', propagate=True):
return {
'handlers': handlers,
'propagate': propagate,
'level': level
}
def _console(name):
conf = deepcopy(CONF)
conf['root'] = _logger(['console'])
conf['loggers'][name] = _logger(['console'], propagate=False)
return conf
def _syslog(name):
conf = deepcopy(CONF)
conf['root'] = _logger(['syslog'])
conf['loggers'][name] = _logger(['syslog'], propagate=False)
conf['handlers']['syslog'] = SYSLOG_HANDLER
return conf
@define_hook(event='after_load')
def config_log():
from folklore_config import config
name = config.app_name
env = config.env
if env == 'dev' or sys.platform == 'darwin' or config.syslog_disabled:
conf = _console(name)
else:
conf = _syslog(name)
logging.config.dictConfig(conf)
class MetaAdapter(logging.LoggerAdapter):
def process(self, msg, kwargs):
if 'ctx' not in self.extra:
return super(MetaAdapter, self).process(msg, kwargs)
ctx = self.extra['ctx']
meta = ctx.get('meta', {})
components = [
'/'.join((meta.get('client_name', '-'),
meta.get('client_version', '-'))),
ctx.get('client_addr', '-'),
]
log_extra = ctx.get('log_extra')
if log_extra:
components.append(log_extra)
return '[{}] {}'.format(' '.join(components), msg), kwargs
| true | true |
1c4aef1e83c2ef8ee4a5a277c50748c34c3b41ea | 6,388 | py | Python | casemgmt/models.py | devmonkey22/oso-casemgmt-django | 2a5d455015394f95716ba6c62daf330fbdb6fd6a | [
"MIT"
] | 3 | 2020-12-18T13:52:16.000Z | 2021-02-17T17:05:28.000Z | casemgmt/models.py | devmonkey22/oso-casemgmt-django | 2a5d455015394f95716ba6c62daf330fbdb6fd6a | [
"MIT"
] | 1 | 2021-04-13T18:58:17.000Z | 2021-04-13T18:58:17.000Z | casemgmt/models.py | devmonkey22/oso-casemgmt-django | 2a5d455015394f95716ba6c62daf330fbdb6fd6a | [
"MIT"
] | 2 | 2020-12-21T15:10:29.000Z | 2021-02-17T19:22:05.000Z | from django.contrib.auth.models import AbstractUser, Group, Permission, GroupManager
from django.db import models
from django_oso.models import AuthorizedModel
## MODELS ##
class User(AbstractUser):
"""
System users as Case Workers, Supervisors, Auditors, etc.
"""
# basic info
email = models.CharField(max_length=256)
class Role(models.Model):
"""
Job role that has associated permissions. Modeled using ``auth.Group`` model, but separated to prevent assigning
users directly to the role (group) globally. Users can be assigned to this role through scoped models like ``CaseloadRole``.
"""
name = models.CharField(max_length=150, unique=True)
permissions = models.ManyToManyField(
Permission,
blank=True,
)
objects = GroupManager()
class Meta:
verbose_name = 'Role'
verbose_name_plural = 'Roles'
def __str__(self):
return self.name
def natural_key(self):
return (self.name,)
class Client(AuthorizedModel):
"""
Client/Customer
"""
first_name = models.CharField(max_length=256)
last_name = models.CharField(max_length=256)
def __str__(self):
return f"{self.first_name} {self.last_name}"
class CaseType(AuthorizedModel):
"""
Case (Program) Type - for example, Medical, Unemployment, WorkersCompensation
"""
code = models.CharField(max_length=5, unique=True)
name = models.CharField(max_length=256)
def __str__(self):
return f"{self.name} ({self.code})"
class DocumentTemplate(AuthorizedModel):
"""
Document Template used for Documents (of a specific CaseType)
"""
code = models.CharField(max_length=10)
name = models.CharField(max_length=256)
case_type = models.ForeignKey(CaseType, related_name="document_templates", on_delete=models.CASCADE)
filename = models.CharField(max_length=1024)
class Meta:
constraints = [
models.UniqueConstraint(fields=["code", "case_type"], name="code_case_type")
]
def __str__(self):
return f"{self.name} ({self.case_type.code})"
class Document(AuthorizedModel):
"""
Individual document instances for a client based on templates.
Documents are indirectly linked to CaseTypes through its `template.case_type` field.
"""
name = models.CharField(max_length=256)
client = models.ForeignKey(Client, related_name="documents", on_delete=models.CASCADE)
template = models.ForeignKey(DocumentTemplate, related_name="documents", on_delete=models.CASCADE)
# Just as an example to store data for document. Normally would be stored in related DB models, NoSQL, etc.
content = models.TextField(null=True)
# time info
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return f"{self.name} for {self.client} ({self.template.case_type.code})"
class DocumentActivityLog(AuthorizedModel):
"""
Activity log records for related documents
"""
VERB_VIEWED = 'viewed'
VERB_CREATED = 'created'
VERB_UPDATED = 'updated'
VERB_DELETED = 'deleted'
VERB_SHARED = 'shared'
VERB_MAILED = 'mailed'
LOG_VERB_CHOICES = [
(VERB_VIEWED, 'Viewed document'),
(VERB_CREATED, 'Created document'),
(VERB_UPDATED, 'Updated document'),
(VERB_DELETED, 'Deleted document'),
(VERB_SHARED, 'Shared document'),
(VERB_MAILED, 'Mailed document'),
]
document = models.ForeignKey(Document, related_name="activities", on_delete=models.CASCADE)
date = models.DateTimeField(auto_now_add=True)
actor = models.ForeignKey(User, related_name="document_activities", null=True, on_delete=models.SET_NULL)
verb = models.CharField(max_length=10, choices=LOG_VERB_CHOICES)
description = models.CharField(max_length=255, null=True)
def __str__(self):
return f"{self.actor.username} {self.verb} '{self.document}': {self.description}"
class Meta:
ordering = ("date", "id")
class Caseload(AuthorizedModel):
"""
A caseload is a set of clients and set of casetypes.
A client may have related records to multiple different case types, not all of which may be part of this caseload.
"""
name = models.CharField(max_length=1024)
# many-to-many relationship with clients
clients = models.ManyToManyField(Client, related_name="caseloads")
# many-to-many relationship with CaseTypes
case_types = models.ManyToManyField(CaseType, related_name="caseloads")
def __str__(self):
return f"{self.name}"
## ROLE MODELS ##
class CaseloadRole(AuthorizedModel):
"""
A caseload role is a role assignment from a user/group to a caseload.
"""
caseload = models.ForeignKey(Caseload, related_name="caseload_roles", on_delete=models.CASCADE)
# Role (auth group) with permissions (not just a role name)
# For simplicity, auth.Group is being overloaded as a Role with permissions assigned (but in theory, no global members)
# For larger systems, it is recommended to create an explicit `Role` model with permissions, admin interface, etc.
role = models.ForeignKey(Role, related_name="caseload_roles", on_delete=models.CASCADE)
# many-to-many relationship with users
user = models.ForeignKey(User, blank=True, null=True, related_name="caseload_roles", on_delete=models.CASCADE)
# many-to-many relationship with groups (groups can be teams of CaseWorkers, rather than explicit `Team` model)
group = models.ForeignKey(Group, blank=True, null=True, related_name="caseload_roles", on_delete=models.CASCADE)
def __str__(self):
return f"{self.role.name} on {self.caseload}"
## Case-specific Data Models related to Documents ##
class WkcmpEligibilityData(AuthorizedModel):
"""
Data specific to Eligibility Form (Document) records.
Provides example of authorizing access based on related model (Document) and all its policies, plus any specific
policies for ourselves too.
"""
document = models.ForeignKey(Document, related_name="wkcmp_eligibility", on_delete=models.CASCADE)
current_monthly_income = models.DecimalField(max_digits=8, decimal_places=2)
employer = models.CharField(max_length=100)
num_dependents = models.PositiveSmallIntegerField()
| 32.758974 | 129 | 0.705855 | from django.contrib.auth.models import AbstractUser, Group, Permission, GroupManager
from django.db import models
from django_oso.models import AuthorizedModel
class User(AbstractUser):
email = models.CharField(max_length=256)
class Role(models.Model):
name = models.CharField(max_length=150, unique=True)
permissions = models.ManyToManyField(
Permission,
blank=True,
)
objects = GroupManager()
class Meta:
verbose_name = 'Role'
verbose_name_plural = 'Roles'
def __str__(self):
return self.name
def natural_key(self):
return (self.name,)
class Client(AuthorizedModel):
first_name = models.CharField(max_length=256)
last_name = models.CharField(max_length=256)
def __str__(self):
return f"{self.first_name} {self.last_name}"
class CaseType(AuthorizedModel):
code = models.CharField(max_length=5, unique=True)
name = models.CharField(max_length=256)
def __str__(self):
return f"{self.name} ({self.code})"
class DocumentTemplate(AuthorizedModel):
code = models.CharField(max_length=10)
name = models.CharField(max_length=256)
case_type = models.ForeignKey(CaseType, related_name="document_templates", on_delete=models.CASCADE)
filename = models.CharField(max_length=1024)
class Meta:
constraints = [
models.UniqueConstraint(fields=["code", "case_type"], name="code_case_type")
]
def __str__(self):
return f"{self.name} ({self.case_type.code})"
class Document(AuthorizedModel):
name = models.CharField(max_length=256)
client = models.ForeignKey(Client, related_name="documents", on_delete=models.CASCADE)
template = models.ForeignKey(DocumentTemplate, related_name="documents", on_delete=models.CASCADE)
content = models.TextField(null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return f"{self.name} for {self.client} ({self.template.case_type.code})"
class DocumentActivityLog(AuthorizedModel):
VERB_VIEWED = 'viewed'
VERB_CREATED = 'created'
VERB_UPDATED = 'updated'
VERB_DELETED = 'deleted'
VERB_SHARED = 'shared'
VERB_MAILED = 'mailed'
LOG_VERB_CHOICES = [
(VERB_VIEWED, 'Viewed document'),
(VERB_CREATED, 'Created document'),
(VERB_UPDATED, 'Updated document'),
(VERB_DELETED, 'Deleted document'),
(VERB_SHARED, 'Shared document'),
(VERB_MAILED, 'Mailed document'),
]
document = models.ForeignKey(Document, related_name="activities", on_delete=models.CASCADE)
date = models.DateTimeField(auto_now_add=True)
actor = models.ForeignKey(User, related_name="document_activities", null=True, on_delete=models.SET_NULL)
verb = models.CharField(max_length=10, choices=LOG_VERB_CHOICES)
description = models.CharField(max_length=255, null=True)
def __str__(self):
return f"{self.actor.username} {self.verb} '{self.document}': {self.description}"
class Meta:
ordering = ("date", "id")
class Caseload(AuthorizedModel):
name = models.CharField(max_length=1024)
clients = models.ManyToManyField(Client, related_name="caseloads")
case_types = models.ManyToManyField(CaseType, related_name="caseloads")
def __str__(self):
return f"{self.name}"
class CaseloadRole(AuthorizedModel):
caseload = models.ForeignKey(Caseload, related_name="caseload_roles", on_delete=models.CASCADE)
role = models.ForeignKey(Role, related_name="caseload_roles", on_delete=models.CASCADE)
user = models.ForeignKey(User, blank=True, null=True, related_name="caseload_roles", on_delete=models.CASCADE)
group = models.ForeignKey(Group, blank=True, null=True, related_name="caseload_roles", on_delete=models.CASCADE)
def __str__(self):
return f"{self.role.name} on {self.caseload}"
class WkcmpEligibilityData(AuthorizedModel):
document = models.ForeignKey(Document, related_name="wkcmp_eligibility", on_delete=models.CASCADE)
current_monthly_income = models.DecimalField(max_digits=8, decimal_places=2)
employer = models.CharField(max_length=100)
num_dependents = models.PositiveSmallIntegerField()
| true | true |
1c4aef25deb00cfd4a6a12c8bfa8ad3d271e90a6 | 7,367 | py | Python | onmt/train_single.py | iacercalixto/WALS | 7f4b5042591d536f6b371d5fb252616d2da7abaf | [
"MIT"
] | 1 | 2019-12-19T09:47:35.000Z | 2019-12-19T09:47:35.000Z | onmt/train_single.py | iacercalixto/WALS | 7f4b5042591d536f6b371d5fb252616d2da7abaf | [
"MIT"
] | null | null | null | onmt/train_single.py | iacercalixto/WALS | 7f4b5042591d536f6b371d5fb252616d2da7abaf | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Training on a single process
"""
from __future__ import division
import argparse
import os
import random
import torch
import onmt.opts as opts
from onmt.inputters.inputter import build_dataset_iter, lazily_load_dataset, \
_load_fields, _collect_report_features
from onmt.model_builder import build_model
from onmt.utils.optimizers import build_optim
from onmt.trainer import build_trainer
from onmt.models import build_model_saver
from onmt.utils.logging import init_logger, logger
import sqlite3
from collections import defaultdict
import numpy as np
def get_feat_values(SimulationLanguages, WalsValues, FeaturesList, ListLanguages, FeatureTypes, FeatureNames) :
FeatureValues, FeatureTensors = defaultdict(lambda: defaultdict(int)), defaultdict(lambda: defaultdict(int))
for Language in SimulationLanguages: # For each language in the simulation...
idx_language = ListLanguages.index(Language)
for FeatureType in FeatureTypes: # For each feature type...
for Feature in FeatureTypes[FeatureType]: # For each feature...
idx_feature = FeatureNames.index(Feature)
FeatureValues[Language][Feature] = WalsValues[idx_language][idx_feature+1]
FeatureTensors[Feature] = torch.from_numpy(np.array(range(FeaturesList[idx_feature][1] + 1)))
return FeatureValues, FeatureTensors
def _check_save_model_path(opt):
save_model_path = os.path.abspath(opt.save_model)
model_dirname = os.path.dirname(save_model_path)
if not os.path.exists(model_dirname):
os.makedirs(model_dirname)
def _tally_parameters(model):
n_params = sum([p.nelement() for p in model.parameters()])
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
elif 'decoder' or 'generator' in name:
dec += param.nelement()
return n_params, enc, dec
def training_opt_postprocessing(opt, device_id):
if opt.word_vec_size != -1:
opt.src_word_vec_size = opt.word_vec_size
opt.tgt_word_vec_size = opt.word_vec_size
if opt.layers != -1:
opt.enc_layers = opt.layers
opt.dec_layers = opt.layers
opt.brnn = (opt.encoder_type == "brnn")
if opt.rnn_type == "SRU" and not opt.gpu_ranks:
raise AssertionError("Using SRU requires -gpu_ranks set.")
if torch.cuda.is_available() and not opt.gpu_ranks:
logger.info("WARNING: You have a CUDA device, \
should run with -gpu_ranks")
if opt.seed > 0:
torch.manual_seed(opt.seed)
# this one is needed for torchtext random call (shuffled iterator)
# in multi gpu it ensures datasets are read in the same order
random.seed(opt.seed)
# some cudnn methods can be random even after fixing the seed
# unless you tell it to be deterministic
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
if opt.seed > 0:
# These ensure same initialization in multi gpu mode
torch.cuda.manual_seed(opt.seed)
return opt
def main(opt, device_id):
SimulationLanguages = [opt.wals_src, opt.wals_tgt]
print('Loading WALS features from databases...')
cwd = os.getcwd()
print(cwd)
db = sqlite3.connect(cwd + '/onmt/WalsValues.db')
cursor = db.cursor()
cursor.execute('SELECT * FROM WalsValues')
WalsValues = cursor.fetchall()
db = sqlite3.connect(cwd + '/onmt/FeaturesList.db')
cursor = db.cursor()
cursor.execute('SELECT * FROM FeaturesList')
FeaturesList = cursor.fetchall()
db = sqlite3.connect(cwd + '/onmt/FTInfos.db')
cursor = db.cursor()
cursor.execute('SELECT * FROM FTInfos')
FTInfos = cursor.fetchall()
db = sqlite3.connect(cwd + '/onmt/FTList.db')
cursor = db.cursor()
cursor.execute('SELECT * FROM FTList')
FTList = cursor.fetchall()
ListLanguages = []
for i in WalsValues:
ListLanguages.append(i[0])
FeatureTypes = defaultdict(lambda: defaultdict(list))
for i in FTList:
FeatureTypes[i[0]] = i[1].split(',')
FeatureNames = []
for i in FeatureTypes:
for j in FeatureTypes[i]:
FeatureNames.append(j)
FeatureTypesNames = []
for i in FeatureTypes:
FeatureTypesNames.append(i)
FeatureValues, FeatureTensors = get_feat_values(SimulationLanguages, WalsValues, FeaturesList, ListLanguages, FeatureTypes, FeatureNames)
print('WALS databases loaded!')
# FeatureValues: defaultdict with feature values, per language.
# FeatureTensors: tensor of possible outputs, per feature.
opt = training_opt_postprocessing(opt, device_id)
init_logger(opt.log_file)
# Load checkpoint if we resume from a previous training.
if opt.train_from:
logger.info('Loading checkpoint from %s' % opt.train_from)
checkpoint = torch.load(opt.train_from,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
else:
checkpoint = None
model_opt = opt
# Peek the first dataset to determine the data_type.
# (All datasets have the same data_type).
first_dataset = next(lazily_load_dataset("train", opt))
data_type = first_dataset.data_type
# Load fields generated from preprocess phase.
fields = _load_fields(first_dataset, data_type, opt, checkpoint)
# Report src/tgt features.
src_features, tgt_features = _collect_report_features(fields)
for j, feat in enumerate(src_features):
logger.info(' * src feature %d size = %d'
% (j, len(fields[feat].vocab)))
for j, feat in enumerate(tgt_features):
logger.info(' * tgt feature %d size = %d'
% (j, len(fields[feat].vocab)))
# Build model.
model = build_model(model_opt, opt, fields, checkpoint, FeatureValues, FeatureTensors, FeatureTypes, FeaturesList, FeatureNames, FTInfos, FeatureTypesNames, SimulationLanguages)
n_params, enc, dec = _tally_parameters(model)
logger.info('encoder: %d' % enc)
logger.info('decoder: %d' % dec)
logger.info('* number of parameters: %d' % n_params)
_check_save_model_path(opt)
# Build optimizer.
optim = build_optim(model, opt, checkpoint)
# Build model saver
model_saver = build_model_saver(model_opt, opt, model, fields, optim)
trainer = build_trainer(opt, device_id, model, fields,
optim, data_type, model_saver=model_saver)
def train_iter_fct(): return build_dataset_iter(
lazily_load_dataset("train", opt), fields, opt)
def valid_iter_fct(): return build_dataset_iter(
lazily_load_dataset("valid", opt), fields, opt, is_train=False)
# Do training.
trainer.train(train_iter_fct, valid_iter_fct, opt.train_steps,
opt.valid_steps)
if opt.tensorboard:
trainer.report_manager.tensorboard_writer.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='train.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
opts.add_md_help_argument(parser)
opts.model_opts(parser)
opts.train_opts(parser)
opt = parser.parse_args()
main(opt)
| 33.334842 | 181 | 0.681146 | from __future__ import division
import argparse
import os
import random
import torch
import onmt.opts as opts
from onmt.inputters.inputter import build_dataset_iter, lazily_load_dataset, \
_load_fields, _collect_report_features
from onmt.model_builder import build_model
from onmt.utils.optimizers import build_optim
from onmt.trainer import build_trainer
from onmt.models import build_model_saver
from onmt.utils.logging import init_logger, logger
import sqlite3
from collections import defaultdict
import numpy as np
def get_feat_values(SimulationLanguages, WalsValues, FeaturesList, ListLanguages, FeatureTypes, FeatureNames) :
FeatureValues, FeatureTensors = defaultdict(lambda: defaultdict(int)), defaultdict(lambda: defaultdict(int))
for Language in SimulationLanguages: idx_language = ListLanguages.index(Language)
for FeatureType in FeatureTypes: for Feature in FeatureTypes[FeatureType]: idx_feature = FeatureNames.index(Feature)
FeatureValues[Language][Feature] = WalsValues[idx_language][idx_feature+1]
FeatureTensors[Feature] = torch.from_numpy(np.array(range(FeaturesList[idx_feature][1] + 1)))
return FeatureValues, FeatureTensors
def _check_save_model_path(opt):
save_model_path = os.path.abspath(opt.save_model)
model_dirname = os.path.dirname(save_model_path)
if not os.path.exists(model_dirname):
os.makedirs(model_dirname)
def _tally_parameters(model):
n_params = sum([p.nelement() for p in model.parameters()])
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
elif 'decoder' or 'generator' in name:
dec += param.nelement()
return n_params, enc, dec
def training_opt_postprocessing(opt, device_id):
if opt.word_vec_size != -1:
opt.src_word_vec_size = opt.word_vec_size
opt.tgt_word_vec_size = opt.word_vec_size
if opt.layers != -1:
opt.enc_layers = opt.layers
opt.dec_layers = opt.layers
opt.brnn = (opt.encoder_type == "brnn")
if opt.rnn_type == "SRU" and not opt.gpu_ranks:
raise AssertionError("Using SRU requires -gpu_ranks set.")
if torch.cuda.is_available() and not opt.gpu_ranks:
logger.info("WARNING: You have a CUDA device, \
should run with -gpu_ranks")
if opt.seed > 0:
torch.manual_seed(opt.seed)
random.seed(opt.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
if opt.seed > 0:
torch.cuda.manual_seed(opt.seed)
return opt
def main(opt, device_id):
SimulationLanguages = [opt.wals_src, opt.wals_tgt]
print('Loading WALS features from databases...')
cwd = os.getcwd()
print(cwd)
db = sqlite3.connect(cwd + '/onmt/WalsValues.db')
cursor = db.cursor()
cursor.execute('SELECT * FROM WalsValues')
WalsValues = cursor.fetchall()
db = sqlite3.connect(cwd + '/onmt/FeaturesList.db')
cursor = db.cursor()
cursor.execute('SELECT * FROM FeaturesList')
FeaturesList = cursor.fetchall()
db = sqlite3.connect(cwd + '/onmt/FTInfos.db')
cursor = db.cursor()
cursor.execute('SELECT * FROM FTInfos')
FTInfos = cursor.fetchall()
db = sqlite3.connect(cwd + '/onmt/FTList.db')
cursor = db.cursor()
cursor.execute('SELECT * FROM FTList')
FTList = cursor.fetchall()
ListLanguages = []
for i in WalsValues:
ListLanguages.append(i[0])
FeatureTypes = defaultdict(lambda: defaultdict(list))
for i in FTList:
FeatureTypes[i[0]] = i[1].split(',')
FeatureNames = []
for i in FeatureTypes:
for j in FeatureTypes[i]:
FeatureNames.append(j)
FeatureTypesNames = []
for i in FeatureTypes:
FeatureTypesNames.append(i)
FeatureValues, FeatureTensors = get_feat_values(SimulationLanguages, WalsValues, FeaturesList, ListLanguages, FeatureTypes, FeatureNames)
print('WALS databases loaded!')
opt = training_opt_postprocessing(opt, device_id)
init_logger(opt.log_file)
if opt.train_from:
logger.info('Loading checkpoint from %s' % opt.train_from)
checkpoint = torch.load(opt.train_from,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
else:
checkpoint = None
model_opt = opt
first_dataset = next(lazily_load_dataset("train", opt))
data_type = first_dataset.data_type
fields = _load_fields(first_dataset, data_type, opt, checkpoint)
src_features, tgt_features = _collect_report_features(fields)
for j, feat in enumerate(src_features):
logger.info(' * src feature %d size = %d'
% (j, len(fields[feat].vocab)))
for j, feat in enumerate(tgt_features):
logger.info(' * tgt feature %d size = %d'
% (j, len(fields[feat].vocab)))
model = build_model(model_opt, opt, fields, checkpoint, FeatureValues, FeatureTensors, FeatureTypes, FeaturesList, FeatureNames, FTInfos, FeatureTypesNames, SimulationLanguages)
n_params, enc, dec = _tally_parameters(model)
logger.info('encoder: %d' % enc)
logger.info('decoder: %d' % dec)
logger.info('* number of parameters: %d' % n_params)
_check_save_model_path(opt)
optim = build_optim(model, opt, checkpoint)
model_saver = build_model_saver(model_opt, opt, model, fields, optim)
trainer = build_trainer(opt, device_id, model, fields,
optim, data_type, model_saver=model_saver)
def train_iter_fct(): return build_dataset_iter(
lazily_load_dataset("train", opt), fields, opt)
def valid_iter_fct(): return build_dataset_iter(
lazily_load_dataset("valid", opt), fields, opt, is_train=False)
trainer.train(train_iter_fct, valid_iter_fct, opt.train_steps,
opt.valid_steps)
if opt.tensorboard:
trainer.report_manager.tensorboard_writer.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='train.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
opts.add_md_help_argument(parser)
opts.model_opts(parser)
opts.train_opts(parser)
opt = parser.parse_args()
main(opt)
| true | true |
1c4aef758d67003c0f78c72def5acfed40ba9f4c | 271 | py | Python | upol_search_engine/upol_crawler/tools/blacklist.py | UPOLSearch/UPOL-Search-Engine | 791f0a4a01b4d034637ee6dba4cfd254ae8f3f50 | [
"MIT"
] | 1 | 2022-02-02T17:49:23.000Z | 2022-02-02T17:49:23.000Z | upol_search_engine/upol_crawler/tools/blacklist.py | UPOLSearch/UPOL-Search-Engine | 791f0a4a01b4d034637ee6dba4cfd254ae8f3f50 | [
"MIT"
] | 5 | 2017-10-09T09:23:32.000Z | 2017-11-13T08:17:08.000Z | upol_search_engine/upol_crawler/tools/blacklist.py | UPOLSearch/UPOL-Search-Engine | 791f0a4a01b4d034637ee6dba4cfd254ae8f3f50 | [
"MIT"
] | null | null | null | from upol_search_engine.utils import urls
def generate_blacklist(text):
return urls.load_urls_from_text(text)
def is_url_blocked(url, blacklist):
"""Check if url domain is blocked"""
if urls.domain(url) in blacklist:
return True
return False
| 19.357143 | 41 | 0.723247 | from upol_search_engine.utils import urls
def generate_blacklist(text):
return urls.load_urls_from_text(text)
def is_url_blocked(url, blacklist):
if urls.domain(url) in blacklist:
return True
return False
| true | true |
1c4af083e638bd57ce552a8c2584676de6b169bc | 2,063 | py | Python | longclaw/orders/tests.py | MstatiliS/longclaw | 874e35ece3710d1c30ebdadfec9708caa9b6553f | [
"MIT"
] | null | null | null | longclaw/orders/tests.py | MstatiliS/longclaw | 874e35ece3710d1c30ebdadfec9708caa9b6553f | [
"MIT"
] | null | null | null | longclaw/orders/tests.py | MstatiliS/longclaw | 874e35ece3710d1c30ebdadfec9708caa9b6553f | [
"MIT"
] | null | null | null | import mock
from django.test import TestCase
from django.contrib.auth.models import User
from django.urls import reverse_lazy
from django.contrib.auth.models import User
from wagtail.tests.utils import WagtailTestUtils
from longclaw.tests.utils import LongclawTestCase, OrderFactory
from longclaw.orders.wagtail_hooks import OrderModelAdmin
class OrderTests(LongclawTestCase):
def setUp(self):
self.order = OrderFactory(transaction_id="FAKE")
admin = User.objects.create_superuser('admn', '[email protected]', 'password')
self.client.force_authenticate(user=admin)
def test_fulfill_order(self):
self.post_test({}, 'longclaw_fulfill_order', urlkwargs={'pk': self.order.id})
self.order.refresh_from_db()
self.assertEqual(self.order.status, self.order.FULFILLED)
def test_total(self):
self.assertEqual(self.order.total, 0)
def test_total_items(self):
self.assertEqual(self.order.total_items, 0)
def test_refund_order(self):
self.post_test({}, 'longclaw_refund_order', urlkwargs={'pk': self.order.id})
self.order.refresh_from_db()
self.assertEqual(self.order.status, self.order.REFUNDED)
def test_cancel_order(self):
self.order.cancel()
self.order.refresh_from_db()
self.assertEqual(self.order.status, self.order.CANCELLED)
class TestOrderView(LongclawTestCase, WagtailTestUtils):
def setUp(self):
self.login()
self.model_admin = OrderModelAdmin()
def test_order_index_view(self):
"""
Test the index view
"""
name = self.model_admin.url_helper.get_action_url_name('index')
response = self.client.get(reverse_lazy(name))
self.assertEqual(response.status_code, 200)
def test_order_detail_view(self):
order = OrderFactory()
name = self.model_admin.url_helper.get_action_url_name('detail')
response = self.client.get(reverse_lazy(name, kwargs={'instance_pk': order.pk}))
self.assertEqual(response.status_code, 200)
| 34.966102 | 88 | 0.709646 | import mock
from django.test import TestCase
from django.contrib.auth.models import User
from django.urls import reverse_lazy
from django.contrib.auth.models import User
from wagtail.tests.utils import WagtailTestUtils
from longclaw.tests.utils import LongclawTestCase, OrderFactory
from longclaw.orders.wagtail_hooks import OrderModelAdmin
class OrderTests(LongclawTestCase):
def setUp(self):
self.order = OrderFactory(transaction_id="FAKE")
admin = User.objects.create_superuser('admn', '[email protected]', 'password')
self.client.force_authenticate(user=admin)
def test_fulfill_order(self):
self.post_test({}, 'longclaw_fulfill_order', urlkwargs={'pk': self.order.id})
self.order.refresh_from_db()
self.assertEqual(self.order.status, self.order.FULFILLED)
def test_total(self):
self.assertEqual(self.order.total, 0)
def test_total_items(self):
self.assertEqual(self.order.total_items, 0)
def test_refund_order(self):
self.post_test({}, 'longclaw_refund_order', urlkwargs={'pk': self.order.id})
self.order.refresh_from_db()
self.assertEqual(self.order.status, self.order.REFUNDED)
def test_cancel_order(self):
self.order.cancel()
self.order.refresh_from_db()
self.assertEqual(self.order.status, self.order.CANCELLED)
class TestOrderView(LongclawTestCase, WagtailTestUtils):
def setUp(self):
self.login()
self.model_admin = OrderModelAdmin()
def test_order_index_view(self):
name = self.model_admin.url_helper.get_action_url_name('index')
response = self.client.get(reverse_lazy(name))
self.assertEqual(response.status_code, 200)
def test_order_detail_view(self):
order = OrderFactory()
name = self.model_admin.url_helper.get_action_url_name('detail')
response = self.client.get(reverse_lazy(name, kwargs={'instance_pk': order.pk}))
self.assertEqual(response.status_code, 200)
| true | true |
1c4af096765c70ceabadb35d49d28ebfd65052eb | 3,510 | py | Python | test/functional/mempool_compatibility.py | Darrenshome40/shitecoin | a2535c8fc5a43ee21ec818d5367439f6302cd084 | [
"MIT"
] | null | null | null | test/functional/mempool_compatibility.py | Darrenshome40/shitecoin | a2535c8fc5a43ee21ec818d5367439f6302cd084 | [
"MIT"
] | null | null | null | test/functional/mempool_compatibility.py | Darrenshome40/shitecoin | a2535c8fc5a43ee21ec818d5367439f6302cd084 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The shitecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test that mempool.dat is both backward and forward compatible between versions
NOTE: The test is designed to prevent cases when compatibility is broken accidentally.
In case we need to break mempool compatibility we can continue to use the test by just bumping the version number.
Download node binaries:
test/get_previous_releases.py -b v0.19.1 v0.18.1 v0.17.2 v0.16.3 v0.15.2
Only v0.15.2 is required by this test. The rest is used in other backwards compatibility tests.
"""
import os
from test_framework.test_framework import shitecoinTestFramework
from test_framework.wallet import MiniWallet
class MempoolCompatibilityTest(shitecoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.wallet_names = [None]
def skip_test_if_missing_module(self):
self.skip_if_no_previous_releases()
def setup_network(self):
self.add_nodes(self.num_nodes, versions=[
190100, # oldest version with getmempoolinfo.loaded (used to avoid intermittent issues)
None,
])
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
def run_test(self):
self.log.info("Test that mempool.dat is compatible between versions")
old_node, new_node = self.nodes
new_wallet = MiniWallet(new_node)
new_wallet.generate(1)
new_node.generate(100)
# Sync the nodes to ensure old_node has the block that contains the coinbase that new_wallet will spend.
# Otherwise, because coinbases are only valid in a block and not as loose txns, if the nodes aren't synced
# unbroadcasted_tx won't pass old_node's `MemPoolAccept::PreChecks`.
self.connect_nodes(0, 1)
self.sync_blocks()
recipient = old_node.getnewaddress()
self.stop_node(1)
self.log.info("Add a transaction to mempool on old node and shutdown")
old_tx_hash = old_node.sendtoaddress(recipient, 0.0001)
assert old_tx_hash in old_node.getrawmempool()
self.stop_node(0)
self.log.info("Move mempool.dat from old to new node")
old_node_mempool = os.path.join(old_node.datadir, self.chain, 'mempool.dat')
new_node_mempool = os.path.join(new_node.datadir, self.chain, 'mempool.dat')
os.rename(old_node_mempool, new_node_mempool)
self.log.info("Start new node and verify mempool contains the tx")
self.start_node(1)
assert old_tx_hash in new_node.getrawmempool()
self.log.info("Add unbroadcasted tx to mempool on new node and shutdown")
unbroadcasted_tx_hash = new_wallet.send_self_transfer(from_node=new_node)['txid']
assert unbroadcasted_tx_hash in new_node.getrawmempool()
mempool = new_node.getrawmempool(True)
assert mempool[unbroadcasted_tx_hash]['unbroadcast']
self.stop_node(1)
self.log.info("Move mempool.dat from new to old node")
os.rename(new_node_mempool, old_node_mempool)
self.log.info("Start old node again and verify mempool contains both txs")
self.start_node(0, ['-nowallet'])
assert old_tx_hash in old_node.getrawmempool()
assert unbroadcasted_tx_hash in old_node.getrawmempool()
if __name__ == "__main__":
MempoolCompatibilityTest().main()
| 41.294118 | 114 | 0.71567 |
import os
from test_framework.test_framework import shitecoinTestFramework
from test_framework.wallet import MiniWallet
class MempoolCompatibilityTest(shitecoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.wallet_names = [None]
def skip_test_if_missing_module(self):
self.skip_if_no_previous_releases()
def setup_network(self):
self.add_nodes(self.num_nodes, versions=[
190100, None,
])
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
def run_test(self):
self.log.info("Test that mempool.dat is compatible between versions")
old_node, new_node = self.nodes
new_wallet = MiniWallet(new_node)
new_wallet.generate(1)
new_node.generate(100)
# unbroadcasted_tx won't pass old_node's `MemPoolAccept::PreChecks`.
self.connect_nodes(0, 1)
self.sync_blocks()
recipient = old_node.getnewaddress()
self.stop_node(1)
self.log.info("Add a transaction to mempool on old node and shutdown")
old_tx_hash = old_node.sendtoaddress(recipient, 0.0001)
assert old_tx_hash in old_node.getrawmempool()
self.stop_node(0)
self.log.info("Move mempool.dat from old to new node")
old_node_mempool = os.path.join(old_node.datadir, self.chain, 'mempool.dat')
new_node_mempool = os.path.join(new_node.datadir, self.chain, 'mempool.dat')
os.rename(old_node_mempool, new_node_mempool)
self.log.info("Start new node and verify mempool contains the tx")
self.start_node(1)
assert old_tx_hash in new_node.getrawmempool()
self.log.info("Add unbroadcasted tx to mempool on new node and shutdown")
unbroadcasted_tx_hash = new_wallet.send_self_transfer(from_node=new_node)['txid']
assert unbroadcasted_tx_hash in new_node.getrawmempool()
mempool = new_node.getrawmempool(True)
assert mempool[unbroadcasted_tx_hash]['unbroadcast']
self.stop_node(1)
self.log.info("Move mempool.dat from new to old node")
os.rename(new_node_mempool, old_node_mempool)
self.log.info("Start old node again and verify mempool contains both txs")
self.start_node(0, ['-nowallet'])
assert old_tx_hash in old_node.getrawmempool()
assert unbroadcasted_tx_hash in old_node.getrawmempool()
if __name__ == "__main__":
MempoolCompatibilityTest().main()
| true | true |
1c4af0f60e6d67d8d5a2ad58f0c9b68d92d1c96e | 3,583 | py | Python | tensorflow_toolkit/image_retrieval/image_retrieval/common.py | morkovka1337/openvino_training_extensions | 846db45c264d6b061505213f51763520b9432ba9 | [
"Apache-2.0"
] | 256 | 2020-09-09T03:27:57.000Z | 2022-03-30T10:06:06.000Z | tensorflow_toolkit/image_retrieval/image_retrieval/common.py | morkovka1337/openvino_training_extensions | 846db45c264d6b061505213f51763520b9432ba9 | [
"Apache-2.0"
] | 604 | 2020-09-08T12:29:49.000Z | 2022-03-31T21:51:08.000Z | tensorflow_toolkit/image_retrieval/image_retrieval/common.py | morkovka1337/openvino_training_extensions | 846db45c264d6b061505213f51763520b9432ba9 | [
"Apache-2.0"
] | 160 | 2020-09-09T14:06:07.000Z | 2022-03-30T14:50:48.000Z | """
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import numpy as np
import cv2
def max_central_square_crop(image):
''' Makes max-sized central squared crop. '''
height, width = image.shape[:2]
if width > height:
image = image[:, (width - height) // 2:(width - height) // 2 + height]
else:
image = image[(height - width) // 2:(height - width) // 2 + width, :]
return image
def preproces_image(image):
''' Scales and subtracts mean value from image. '''
image = image / 127.5 - 1.0
return image
def depreprocess_image(image):
''' Makes transform which is inverse to preprocessing. '''
image = (image + 1.0) * 127.5
image = image.astype(np.uint8)
return image
def fit_to_max_size(image, max_size):
''' Fits input image to max_size. '''
if image.shape[0] > max_size or image.shape[1] > max_size:
if image.shape[0] > image.shape[1]:
image = cv2.resize(image, (int(image.shape[1] / (image.shape[0] / max_size)), max_size))
else:
image = cv2.resize(image, (max_size, int(image.shape[0] / (image.shape[1] / max_size))))
return image
def crop_resize(image, input_size):
''' Makes max-sized central crop, resizes to input_size. '''
image = max_central_square_crop(image)
image = cv2.resize(image, (input_size, input_size))
return image
def crop_resize_shift_scale(image, input_size):
''' Makes max-sized central crop, resizes to input_size, scales and subtracts mean values. '''
image = crop_resize(image, input_size)
image = preproces_image(image)
image = np.expand_dims(image, axis=0)
return image
def central_crop(image, divide_by, shift):
''' Makes central crops dividing input image by number of equal cells. '''
height, width = image.shape[0:2]
image = image[height // divide_by * shift: height // divide_by * (divide_by - shift),
width // divide_by * shift: width // divide_by * (divide_by - shift)]
return image
def from_list(path, multiple_images_per_label=True):
''' Loads images list. '''
images_path = []
labels = []
is_real = []
text_label_to_class_id = {}
uniques_labels = set()
root = os.path.dirname(os.path.abspath(path))
with open(path) as opened_file:
for line in opened_file.readlines():
line = line.strip().split(' ')
if len(line) == 2:
image_path, label = line
real = False
else:
image_path, label, real = line
real = real.lower() == 'r'
text_label_to_class_id[os.path.basename(image_path).split('.')[0]] = int(label)
if not multiple_images_per_label and label in uniques_labels:
continue
uniques_labels.add(label)
is_real.append(real)
images_path.append(os.path.join(root, image_path))
labels.append(int(label))
return images_path, labels, is_real, text_label_to_class_id
| 28.895161 | 100 | 0.642757 |
import os
import numpy as np
import cv2
def max_central_square_crop(image):
height, width = image.shape[:2]
if width > height:
image = image[:, (width - height) // 2:(width - height) // 2 + height]
else:
image = image[(height - width) // 2:(height - width) // 2 + width, :]
return image
def preproces_image(image):
image = image / 127.5 - 1.0
return image
def depreprocess_image(image):
image = (image + 1.0) * 127.5
image = image.astype(np.uint8)
return image
def fit_to_max_size(image, max_size):
if image.shape[0] > max_size or image.shape[1] > max_size:
if image.shape[0] > image.shape[1]:
image = cv2.resize(image, (int(image.shape[1] / (image.shape[0] / max_size)), max_size))
else:
image = cv2.resize(image, (max_size, int(image.shape[0] / (image.shape[1] / max_size))))
return image
def crop_resize(image, input_size):
image = max_central_square_crop(image)
image = cv2.resize(image, (input_size, input_size))
return image
def crop_resize_shift_scale(image, input_size):
image = crop_resize(image, input_size)
image = preproces_image(image)
image = np.expand_dims(image, axis=0)
return image
def central_crop(image, divide_by, shift):
height, width = image.shape[0:2]
image = image[height // divide_by * shift: height // divide_by * (divide_by - shift),
width // divide_by * shift: width // divide_by * (divide_by - shift)]
return image
def from_list(path, multiple_images_per_label=True):
images_path = []
labels = []
is_real = []
text_label_to_class_id = {}
uniques_labels = set()
root = os.path.dirname(os.path.abspath(path))
with open(path) as opened_file:
for line in opened_file.readlines():
line = line.strip().split(' ')
if len(line) == 2:
image_path, label = line
real = False
else:
image_path, label, real = line
real = real.lower() == 'r'
text_label_to_class_id[os.path.basename(image_path).split('.')[0]] = int(label)
if not multiple_images_per_label and label in uniques_labels:
continue
uniques_labels.add(label)
is_real.append(real)
images_path.append(os.path.join(root, image_path))
labels.append(int(label))
return images_path, labels, is_real, text_label_to_class_id
| true | true |
1c4af275adaa05ba77c5b648c0a78fd0044d8040 | 9,891 | py | Python | src/webapp/azext_webapp/custom.py | mayank88mahajan/azure-cli-extensions | 8bd389a1877bffd14052bec5519ce75dc6fc34cf | [
"MIT"
] | null | null | null | src/webapp/azext_webapp/custom.py | mayank88mahajan/azure-cli-extensions | 8bd389a1877bffd14052bec5519ce75dc6fc34cf | [
"MIT"
] | null | null | null | src/webapp/azext_webapp/custom.py | mayank88mahajan/azure-cli-extensions | 8bd389a1877bffd14052bec5519ce75dc6fc34cf | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import json
from knack.log import get_logger
from azure.mgmt.web.models import (AppServicePlan, SkuDescription)
from azure.cli.command_modules.appservice.custom import (
enable_zip_deploy,
create_webapp,
update_app_settings,
_get_site_credential,
_get_scm_url,
get_sku_name,
list_publish_profiles,
get_site_configs)
from azure.cli.command_modules.appservice._appservice_utils import _generic_site_operation
from .create_util import (
zip_contents_from_dir,
get_runtime_version_details,
create_resource_group,
check_resource_group_exists,
check_resource_group_supports_os,
check_if_asp_exists,
check_app_exists,
get_lang_from_content,
web_client_factory
)
from ._constants import (NODE_RUNTIME_NAME, OS_DEFAULT, STATIC_RUNTIME_NAME, PYTHON_RUNTIME_NAME)
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals,too-many-statements,too-many-branches
def create_deploy_webapp(cmd, name, location=None, dryrun=False):
import os
client = web_client_factory(cmd.cli_ctx)
# the code to deploy is expected to be the current directory the command is running from
src_dir = os.getcwd()
# if dir is empty, show a message in dry run
do_deployment = False if os.listdir(src_dir) == [] else True
# determine the details for app to be created from src contents
lang_details = get_lang_from_content(src_dir)
# we support E2E create and deploy for Node & dotnetcore, any other stack, set defaults for os & runtime
# and skip deployment
if lang_details['language'] is None:
do_deployment = False
sku = 'F1'
os_val = OS_DEFAULT
detected_version = '-'
runtime_version = '-'
else:
sku = lang_details.get("default_sku")
language = lang_details.get("language")
is_skip_build = language.lower() == STATIC_RUNTIME_NAME or language.lower() == PYTHON_RUNTIME_NAME
os_val = "Linux" if language.lower() == NODE_RUNTIME_NAME \
or language.lower() == PYTHON_RUNTIME_NAME else OS_DEFAULT
# detect the version
data = get_runtime_version_details(lang_details.get('file_loc'), language)
version_used_create = data.get('to_create')
detected_version = data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
if location is None:
locs = client.list_geo_regions(sku, True)
available_locs = []
for loc in locs:
available_locs.append(loc.name)
location = available_locs[0]
# Remove spaces from the location string, incase the GeoRegion string is used
loc_name = location.replace(" ", "")
full_sku = get_sku_name(sku)
is_linux = True if os_val == 'Linux' else False
asp = "appsvc_asp_{}_{}".format(os_val, loc_name)
rg_name = "appsvc_rg_{}_{}".format(os_val, loc_name)
str_no_contents_warn = ""
if not do_deployment:
str_no_contents_warn = "[Empty directory, no deployment will be triggered]"
# Resource group: check if default RG is set
default_rg = cmd.cli_ctx.config.get('defaults', 'group', fallback=None)
if default_rg and check_resource_group_exists(cmd, default_rg) and \
check_resource_group_supports_os(cmd, default_rg, location, is_linux):
rg_name = default_rg
rg_mssg = "[Using default Resource group]"
else:
rg_mssg = ""
src_path = "{} {}".format(src_dir.replace("\\", "\\\\"), str_no_contents_warn)
rg_str = "{} {}".format(rg_name, rg_mssg)
dry_run_str = r""" {
"name" : "%s",
"serverfarm" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"version_detected": "%s",
"version_to_create": "%s"
}
""" % (name, asp, rg_str, full_sku, os_val, location, src_path,
detected_version, runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
# create RG if the RG doesn't already exist
if not check_resource_group_exists(cmd, rg_name):
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, location)
logger.warning("Resource group creation complete")
else:
logger.warning("Resource group '%s' already exists.", rg_name)
# create asp
if not check_if_asp_exists(cmd, rg_name, asp):
logger.warning("Creating App service plan '%s' ...", asp)
sku_def = SkuDescription(tier=full_sku, name=sku, capacity=(1 if is_linux else None))
plan_def = AppServicePlan(location=loc_name, app_service_plan_name=asp,
sku=sku_def, reserved=(is_linux or None))
client.app_service_plans.create_or_update(rg_name, asp, plan_def)
logger.warning("App service plan creation complete")
else:
logger.warning("App service plan '%s' already exists.", asp)
# create the app
if not check_app_exists(cmd, rg_name, name):
logger.warning("Creating app '%s' ....", name)
create_webapp(cmd, rg_name, name, asp, runtime_version if is_linux else None)
logger.warning("Webapp creation complete")
else:
logger.warning("App '%s' already exists", name)
# update create_json to include the app_url
url = _get_app_url(cmd, rg_name, name) # picks the custom domain URL incase a domain is assigned
if do_deployment:
if not is_skip_build:
# setting to build after deployment
logger.warning("Updating app settings to enable build after deployment")
update_app_settings(cmd, rg_name, name, ["SCM_DO_BUILD_DURING_DEPLOYMENT=true"])
# work around until the timeout limits issue for linux is investigated & fixed
# wakeup kudu, by making an SCM call
_ping_scm_site(cmd, rg_name, name)
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
logger.warning("Preparing to deploy %s contents to app.",
'' if is_skip_build else 'and build')
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
# Remove the file afer deployment, handling exception if user removed the file manually
try:
os.remove(zip_file_path)
except OSError:
pass
else:
logger.warning('No known package (Node, ASP.NET, .NETCORE, or Static Html) '
'found skipping zip and deploy process')
create_json.update({'app_url': url})
logger.warning("All done.")
return create_json
def _ping_scm_site(cmd, resource_group, name):
# wakeup kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
requests.get(scm_url + '/api/settings', headers=authorization)
def _get_app_url(cmd, rg_name, app_name):
site = _generic_site_operation(cmd.cli_ctx, rg_name, app_name, 'get')
return "https://" + site.enabled_host_names[0]
def _check_for_ready_tunnel(remote_debugging, tunnel_server):
default_port = tunnel_server.is_port_set_to_default()
if default_port is not remote_debugging:
return True
return False
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None):
import time
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
user_name = next(p['userName'] for p in profiles)
user_password = next(p['userPWD'] for p in profiles)
import threading
from .tunnel import TunnelServer
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
host_name = name
if slot is not None:
host_name += "-" + slot
tunnel_server = TunnelServer('', port, host_name, user_name, user_password)
config = get_site_configs(cmd, resource_group_name, name, slot)
_ping_scm_site(cmd, resource_group_name, name)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server, config.remote_debugging_enabled))
t.daemon = True
t.start()
# Wait indefinitely for CTRL-C
while True:
time.sleep(5)
def _start_tunnel(tunnel_server, remote_debugging_enabled):
import time
if not _check_for_ready_tunnel(remote_debugging_enabled, tunnel_server):
logger.warning('Tunnel is not ready yet, please wait (may take up to 1 minute)')
while True:
time.sleep(1)
logger.warning('.')
if _check_for_ready_tunnel(remote_debugging_enabled, tunnel_server):
break
if remote_debugging_enabled is False:
logger.warning('SSH is available { username: root, password: Docker! }')
tunnel_server.start_server()
| 41.2125 | 108 | 0.663937 |
from __future__ import print_function
import json
from knack.log import get_logger
from azure.mgmt.web.models import (AppServicePlan, SkuDescription)
from azure.cli.command_modules.appservice.custom import (
enable_zip_deploy,
create_webapp,
update_app_settings,
_get_site_credential,
_get_scm_url,
get_sku_name,
list_publish_profiles,
get_site_configs)
from azure.cli.command_modules.appservice._appservice_utils import _generic_site_operation
from .create_util import (
zip_contents_from_dir,
get_runtime_version_details,
create_resource_group,
check_resource_group_exists,
check_resource_group_supports_os,
check_if_asp_exists,
check_app_exists,
get_lang_from_content,
web_client_factory
)
from ._constants import (NODE_RUNTIME_NAME, OS_DEFAULT, STATIC_RUNTIME_NAME, PYTHON_RUNTIME_NAME)
logger = get_logger(__name__)
def create_deploy_webapp(cmd, name, location=None, dryrun=False):
import os
client = web_client_factory(cmd.cli_ctx)
src_dir = os.getcwd()
do_deployment = False if os.listdir(src_dir) == [] else True
lang_details = get_lang_from_content(src_dir)
if lang_details['language'] is None:
do_deployment = False
sku = 'F1'
os_val = OS_DEFAULT
detected_version = '-'
runtime_version = '-'
else:
sku = lang_details.get("default_sku")
language = lang_details.get("language")
is_skip_build = language.lower() == STATIC_RUNTIME_NAME or language.lower() == PYTHON_RUNTIME_NAME
os_val = "Linux" if language.lower() == NODE_RUNTIME_NAME \
or language.lower() == PYTHON_RUNTIME_NAME else OS_DEFAULT
data = get_runtime_version_details(lang_details.get('file_loc'), language)
version_used_create = data.get('to_create')
detected_version = data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
if location is None:
locs = client.list_geo_regions(sku, True)
available_locs = []
for loc in locs:
available_locs.append(loc.name)
location = available_locs[0]
loc_name = location.replace(" ", "")
full_sku = get_sku_name(sku)
is_linux = True if os_val == 'Linux' else False
asp = "appsvc_asp_{}_{}".format(os_val, loc_name)
rg_name = "appsvc_rg_{}_{}".format(os_val, loc_name)
str_no_contents_warn = ""
if not do_deployment:
str_no_contents_warn = "[Empty directory, no deployment will be triggered]"
default_rg = cmd.cli_ctx.config.get('defaults', 'group', fallback=None)
if default_rg and check_resource_group_exists(cmd, default_rg) and \
check_resource_group_supports_os(cmd, default_rg, location, is_linux):
rg_name = default_rg
rg_mssg = "[Using default Resource group]"
else:
rg_mssg = ""
src_path = "{} {}".format(src_dir.replace("\\", "\\\\"), str_no_contents_warn)
rg_str = "{} {}".format(rg_name, rg_mssg)
dry_run_str = r""" {
"name" : "%s",
"serverfarm" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"version_detected": "%s",
"version_to_create": "%s"
}
""" % (name, asp, rg_str, full_sku, os_val, location, src_path,
detected_version, runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
if not check_resource_group_exists(cmd, rg_name):
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, location)
logger.warning("Resource group creation complete")
else:
logger.warning("Resource group '%s' already exists.", rg_name)
# create asp
if not check_if_asp_exists(cmd, rg_name, asp):
logger.warning("Creating App service plan '%s' ...", asp)
sku_def = SkuDescription(tier=full_sku, name=sku, capacity=(1 if is_linux else None))
plan_def = AppServicePlan(location=loc_name, app_service_plan_name=asp,
sku=sku_def, reserved=(is_linux or None))
client.app_service_plans.create_or_update(rg_name, asp, plan_def)
logger.warning("App service plan creation complete")
else:
logger.warning("App service plan '%s' already exists.", asp)
# create the app
if not check_app_exists(cmd, rg_name, name):
logger.warning("Creating app '%s' ....", name)
create_webapp(cmd, rg_name, name, asp, runtime_version if is_linux else None)
logger.warning("Webapp creation complete")
else:
logger.warning("App '%s' already exists", name)
# update create_json to include the app_url
url = _get_app_url(cmd, rg_name, name) # picks the custom domain URL incase a domain is assigned
if do_deployment:
if not is_skip_build:
# setting to build after deployment
logger.warning("Updating app settings to enable build after deployment")
update_app_settings(cmd, rg_name, name, ["SCM_DO_BUILD_DURING_DEPLOYMENT=true"])
# work around until the timeout limits issue for linux is investigated & fixed
# wakeup kudu, by making an SCM call
_ping_scm_site(cmd, rg_name, name)
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
logger.warning("Preparing to deploy %s contents to app.",
'' if is_skip_build else 'and build')
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
# Remove the file afer deployment, handling exception if user removed the file manually
try:
os.remove(zip_file_path)
except OSError:
pass
else:
logger.warning('No known package (Node, ASP.NET, .NETCORE, or Static Html) '
'found skipping zip and deploy process')
create_json.update({'app_url': url})
logger.warning("All done.")
return create_json
def _ping_scm_site(cmd, resource_group, name):
# wakeup kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
requests.get(scm_url + '/api/settings', headers=authorization)
def _get_app_url(cmd, rg_name, app_name):
site = _generic_site_operation(cmd.cli_ctx, rg_name, app_name, 'get')
return "https://" + site.enabled_host_names[0]
def _check_for_ready_tunnel(remote_debugging, tunnel_server):
default_port = tunnel_server.is_port_set_to_default()
if default_port is not remote_debugging:
return True
return False
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None):
import time
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
user_name = next(p['userName'] for p in profiles)
user_password = next(p['userPWD'] for p in profiles)
import threading
from .tunnel import TunnelServer
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
host_name = name
if slot is not None:
host_name += "-" + slot
tunnel_server = TunnelServer('', port, host_name, user_name, user_password)
config = get_site_configs(cmd, resource_group_name, name, slot)
_ping_scm_site(cmd, resource_group_name, name)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server, config.remote_debugging_enabled))
t.daemon = True
t.start()
# Wait indefinitely for CTRL-C
while True:
time.sleep(5)
def _start_tunnel(tunnel_server, remote_debugging_enabled):
import time
if not _check_for_ready_tunnel(remote_debugging_enabled, tunnel_server):
logger.warning('Tunnel is not ready yet, please wait (may take up to 1 minute)')
while True:
time.sleep(1)
logger.warning('.')
if _check_for_ready_tunnel(remote_debugging_enabled, tunnel_server):
break
if remote_debugging_enabled is False:
logger.warning('SSH is available { username: root, password: Docker! }')
tunnel_server.start_server()
| true | true |
1c4af3134b3e34c42733d1345933b4cd1aa8b0eb | 342 | py | Python | pylib/os_path_test.py | bvberkum/oil | 8b93aeda9e8da8af790d747fcb11d00a673eb58c | [
"Apache-2.0"
] | null | null | null | pylib/os_path_test.py | bvberkum/oil | 8b93aeda9e8da8af790d747fcb11d00a673eb58c | [
"Apache-2.0"
] | null | null | null | pylib/os_path_test.py | bvberkum/oil | 8b93aeda9e8da8af790d747fcb11d00a673eb58c | [
"Apache-2.0"
] | 1 | 2021-03-06T22:08:23.000Z | 2021-03-06T22:08:23.000Z | #!/usr/bin/env python2
"""
os_path_test.py: Tests for os_path.py
"""
from __future__ import print_function
import unittest
from pylib import os_path # module under test
class OsPathTest(unittest.TestCase):
def testBasename(self):
self.assertEqual('bar', os_path.basename('foo/bar'))
if __name__ == '__main__':
unittest.main()
| 17.1 | 56 | 0.733918 | from __future__ import print_function
import unittest
from pylib import os_path
class OsPathTest(unittest.TestCase):
def testBasename(self):
self.assertEqual('bar', os_path.basename('foo/bar'))
if __name__ == '__main__':
unittest.main()
| true | true |
1c4af34de1b3fe4ed1ff6c877d7aaa3d3b36f519 | 295 | py | Python | llvm_headers/conanfile.py | Manu343726/clang-conan-packages | 4739985e95d4b0cc3f682ad0c7f5229697584136 | [
"MIT"
] | 5 | 2020-01-16T09:42:22.000Z | 2021-06-24T03:33:29.000Z | recipes/llvm_headers/conanfile.py | bincrafters/conan-llvm | caa0f2da0086978b88631df6a545a13819588407 | [
"MIT"
] | 3 | 2020-01-16T00:04:32.000Z | 2020-01-16T01:38:59.000Z | llvm_headers/conanfile.py | Manu343726/clang-conan-packages | 4739985e95d4b0cc3f682ad0c7f5229697584136 | [
"MIT"
] | 2 | 2020-04-06T23:08:59.000Z | 2020-05-28T06:25:33.000Z | from conans import python_requires
common = python_requires('llvm-common/0.0.0@Manu343726/testing')
class LLVMHeaders(common.LLVMModulePackage):
version = common.LLVMModulePackage.version
name = 'llvm_headers'
llvm_component = 'llvm'
header_only = True
include_dirs = ['']
| 26.818182 | 64 | 0.742373 | from conans import python_requires
common = python_requires('llvm-common/0.0.0@Manu343726/testing')
class LLVMHeaders(common.LLVMModulePackage):
version = common.LLVMModulePackage.version
name = 'llvm_headers'
llvm_component = 'llvm'
header_only = True
include_dirs = ['']
| true | true |
1c4af46bbed39d9dc35000b7d9f4aea79877771f | 4,760 | py | Python | logger.py | HaoranZ99/RL-2 | 253c2fd8c705f88d9cc79abd9f331dc99b5895eb | [
"MIT"
] | null | null | null | logger.py | HaoranZ99/RL-2 | 253c2fd8c705f88d9cc79abd9f331dc99b5895eb | [
"MIT"
] | null | null | null | logger.py | HaoranZ99/RL-2 | 253c2fd8c705f88d9cc79abd9f331dc99b5895eb | [
"MIT"
] | null | null | null | import numpy as np
import time, datetime
import matplotlib.pyplot as plt
class Logger():
def __init__(self, save_dir):
self.save_log = save_dir / "log"
with open(self.save_log, "w") as f:
f.write(
f"{'Episode':>8}{'Step':>8}{'Epsilon':>10}{'MeanReward':>15}"
f"{'MeanLength':>15}{'MeanLoss':>15}{'MeanQValue':>15}"
f"{'TimeDelta':>15}{'Time':>20}\n"
)
self.ep_rewards_plot = save_dir / "reward_plot.jpg"
self.ep_lengths_plot = save_dir / "length_plot.jpg"
self.ep_avg_losses_plot = save_dir / "loss_plot.jpg"
self.ep_avg_qs_plot = save_dir / "q_plot.jpg"
# History metrics
self.ep_rewards = []
self.ep_lengths = []
self.ep_avg_losses = []
self.ep_avg_qs = []
# Moving averages, added for every call to record()
self.moving_avg_ep_rewards = []
self.moving_avg_ep_lengths = []
self.moving_avg_ep_avg_losses = []
self.moving_avg_ep_avg_qs = []
# Current episode metric
self.init_episode()
# Timing
self.record_time = time.time()
def log_step(self, reward, loss, q):
self.curr_ep_reward += reward
self.curr_ep_length += 1
if loss:
self.curr_ep_loss += loss
self.curr_ep_q += q
self.curr_ep_loss_length += 1
def log_episode(self):
"Mark end of episode"
self.ep_rewards.append(self.curr_ep_reward)
self.ep_lengths.append(self.curr_ep_length)
if self.curr_ep_loss_length == 0:
ep_avg_loss = 0
ep_avg_q = 0
else:
ep_avg_loss = np.round(self.curr_ep_loss / self.curr_ep_loss_length, 5)
ep_avg_q = np.round(self.curr_ep_q / self.curr_ep_loss_length, 5)
self.ep_avg_losses.append(ep_avg_loss)
self.ep_avg_qs.append(ep_avg_q)
self.init_episode()
def init_episode(self):
self.curr_ep_reward = 0.0
self.curr_ep_length = 0
self.curr_ep_loss = 0.0
self.curr_ep_q = 0.0
self.curr_ep_loss_length = 0
def record(self, episode, epsilon, step):
mean_ep_reward = np.round(np.mean(self.ep_rewards[-100:]), 3)
mean_ep_length = np.round(np.mean(self.ep_lengths[-100:]), 3)
mean_ep_loss = np.round(np.mean(self.ep_avg_losses[-100:]), 3)
mean_ep_q = np.round(np.mean(self.ep_avg_qs[-100:]), 3)
self.moving_avg_ep_rewards.append(mean_ep_reward)
self.moving_avg_ep_lengths.append(mean_ep_length)
self.moving_avg_ep_avg_losses.append(mean_ep_loss)
self.moving_avg_ep_avg_qs.append(mean_ep_q)
last_record_time = self.record_time
self.record_time = time.time()
time_since_last_record = np.round(self.record_time - last_record_time, 3)
print(
f"Episode {episode} - "
f"Step {step} - "
f"Epsilon {epsilon} - "
f"Mean Reward {mean_ep_reward} - "
f"Mean Length {mean_ep_length} - "
f"Mean Loss {mean_ep_loss} - "
f"Mean Q Value {mean_ep_q} - "
f"Time Delta {time_since_last_record} - "
f"Time {datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')}"
)
with open(self.save_log, "a") as f:
f.write(
f"{episode:8d}{step:8d}{epsilon:10.3f}"
f"{mean_ep_reward:15.3f}{mean_ep_length:15.3f}{mean_ep_loss:15.3f}{mean_ep_q:15.3f}"
f"{time_since_last_record:15.3f}"
f"{datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S'):>20}\n"
)
for metric in ["ep_rewards", "ep_lengths", "ep_avg_losses", "ep_avg_qs"]:
plt.plot(getattr(self, f"moving_avg_{metric}"))
plt.savefig(getattr(self, f"{metric}_plot"))
plt.clf()
def replay_log_step(self, _save_dir, _save_log, state, action):
with open(_save_dir / _save_log, "a") as f:
f.write(
f"The agent take action {action}, the state is {state}.\n"
)
def replay_log(self, _save_dir, _save_log, msg):
with open(_save_dir / _save_log, "a") as f:
f.write(
f"{msg}\n"
)
def repaly_brief(self, _save_dir, _save_log, dict, step):
with open(_save_dir / _save_log, "a") as f:
for key, val in dict.items():
f.write(f"{key} : {val / step * 100:10.3f}%.\n")
def get_action_meanings(self):
return {0: "Eat", 1: "Send gift", 2: "Idle", 3: "Chat", 4: "Work", 5: "Comments on Moments",
6:"Like on Moments", 7: "Live room", 8: "Play games", 9: "Disco dancing", 10: "Pray"} | 37.480315 | 101 | 0.576471 | import numpy as np
import time, datetime
import matplotlib.pyplot as plt
class Logger():
def __init__(self, save_dir):
self.save_log = save_dir / "log"
with open(self.save_log, "w") as f:
f.write(
f"{'Episode':>8}{'Step':>8}{'Epsilon':>10}{'MeanReward':>15}"
f"{'MeanLength':>15}{'MeanLoss':>15}{'MeanQValue':>15}"
f"{'TimeDelta':>15}{'Time':>20}\n"
)
self.ep_rewards_plot = save_dir / "reward_plot.jpg"
self.ep_lengths_plot = save_dir / "length_plot.jpg"
self.ep_avg_losses_plot = save_dir / "loss_plot.jpg"
self.ep_avg_qs_plot = save_dir / "q_plot.jpg"
self.ep_rewards = []
self.ep_lengths = []
self.ep_avg_losses = []
self.ep_avg_qs = []
self.moving_avg_ep_rewards = []
self.moving_avg_ep_lengths = []
self.moving_avg_ep_avg_losses = []
self.moving_avg_ep_avg_qs = []
self.init_episode()
self.record_time = time.time()
def log_step(self, reward, loss, q):
self.curr_ep_reward += reward
self.curr_ep_length += 1
if loss:
self.curr_ep_loss += loss
self.curr_ep_q += q
self.curr_ep_loss_length += 1
def log_episode(self):
self.ep_rewards.append(self.curr_ep_reward)
self.ep_lengths.append(self.curr_ep_length)
if self.curr_ep_loss_length == 0:
ep_avg_loss = 0
ep_avg_q = 0
else:
ep_avg_loss = np.round(self.curr_ep_loss / self.curr_ep_loss_length, 5)
ep_avg_q = np.round(self.curr_ep_q / self.curr_ep_loss_length, 5)
self.ep_avg_losses.append(ep_avg_loss)
self.ep_avg_qs.append(ep_avg_q)
self.init_episode()
def init_episode(self):
self.curr_ep_reward = 0.0
self.curr_ep_length = 0
self.curr_ep_loss = 0.0
self.curr_ep_q = 0.0
self.curr_ep_loss_length = 0
def record(self, episode, epsilon, step):
mean_ep_reward = np.round(np.mean(self.ep_rewards[-100:]), 3)
mean_ep_length = np.round(np.mean(self.ep_lengths[-100:]), 3)
mean_ep_loss = np.round(np.mean(self.ep_avg_losses[-100:]), 3)
mean_ep_q = np.round(np.mean(self.ep_avg_qs[-100:]), 3)
self.moving_avg_ep_rewards.append(mean_ep_reward)
self.moving_avg_ep_lengths.append(mean_ep_length)
self.moving_avg_ep_avg_losses.append(mean_ep_loss)
self.moving_avg_ep_avg_qs.append(mean_ep_q)
last_record_time = self.record_time
self.record_time = time.time()
time_since_last_record = np.round(self.record_time - last_record_time, 3)
print(
f"Episode {episode} - "
f"Step {step} - "
f"Epsilon {epsilon} - "
f"Mean Reward {mean_ep_reward} - "
f"Mean Length {mean_ep_length} - "
f"Mean Loss {mean_ep_loss} - "
f"Mean Q Value {mean_ep_q} - "
f"Time Delta {time_since_last_record} - "
f"Time {datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')}"
)
with open(self.save_log, "a") as f:
f.write(
f"{episode:8d}{step:8d}{epsilon:10.3f}"
f"{mean_ep_reward:15.3f}{mean_ep_length:15.3f}{mean_ep_loss:15.3f}{mean_ep_q:15.3f}"
f"{time_since_last_record:15.3f}"
f"{datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S'):>20}\n"
)
for metric in ["ep_rewards", "ep_lengths", "ep_avg_losses", "ep_avg_qs"]:
plt.plot(getattr(self, f"moving_avg_{metric}"))
plt.savefig(getattr(self, f"{metric}_plot"))
plt.clf()
def replay_log_step(self, _save_dir, _save_log, state, action):
with open(_save_dir / _save_log, "a") as f:
f.write(
f"The agent take action {action}, the state is {state}.\n"
)
def replay_log(self, _save_dir, _save_log, msg):
with open(_save_dir / _save_log, "a") as f:
f.write(
f"{msg}\n"
)
def repaly_brief(self, _save_dir, _save_log, dict, step):
with open(_save_dir / _save_log, "a") as f:
for key, val in dict.items():
f.write(f"{key} : {val / step * 100:10.3f}%.\n")
def get_action_meanings(self):
return {0: "Eat", 1: "Send gift", 2: "Idle", 3: "Chat", 4: "Work", 5: "Comments on Moments",
6:"Like on Moments", 7: "Live room", 8: "Play games", 9: "Disco dancing", 10: "Pray"} | true | true |
1c4af486fea5fc7d523969abc73ef3add41db54f | 7,727 | py | Python | src/aks-preview/azext_aks_preview/commands.py | xiazhan/azure-cli-extensions | fa33dba098c1c4aa7624a9d8393722a9dabd050a | [
"MIT"
] | null | null | null | src/aks-preview/azext_aks_preview/commands.py | xiazhan/azure-cli-extensions | fa33dba098c1c4aa7624a9d8393722a9dabd050a | [
"MIT"
] | 1 | 2021-07-12T22:10:21.000Z | 2021-07-12T22:10:21.000Z | src/aks-preview/azext_aks_preview/commands.py | xiazhan/azure-cli-extensions | fa33dba098c1c4aa7624a9d8393722a9dabd050a | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands import CliCommandType
from ._client_factory import cf_managed_clusters
from ._client_factory import cf_maintenance_configurations
from ._client_factory import cf_container_services
from ._client_factory import cf_agent_pools
from ._format import aks_show_table_format
from ._format import aks_agentpool_show_table_format
from ._format import aks_agentpool_list_table_format
from ._format import aks_versions_table_format
from ._format import aks_upgrades_table_format
from ._format import aks_pod_identities_table_format
from ._format import aks_pod_identity_exceptions_table_format
from ._format import aks_run_command_result_format
def load_command_table(self, _):
managed_clusters_sdk = CliCommandType(
operations_tmpl='azext_aks_preview.vendored_sdks.azure_mgmt_preview_aks.'
'operations._managed_clusters_operations#ManagedClustersOperations.{}',
operation_group='managed_clusters',
client_factory=cf_managed_clusters
)
container_services_sdk = CliCommandType(
operations_tmpl='azext_aks_preview.vendored_sdks.azure_mgmt_preview_aks.'
'operations.container_service_operations#ContainerServicesOperations.{}',
operation_group='container_services',
client_factory=cf_container_services
)
agent_pools_sdk = CliCommandType(
operations_tmpl='azext_aks_preview.vendored_sdks.azure_mgmt_preview_aks.'
'operations._agent_pools_operations#AgentPoolsOperations.{}',
client_factory=cf_managed_clusters
)
maintenance_configuration_sdk = CliCommandType(
operations_tmpl='azext_aks_preview.vendored_sdks.azure_mgmt_preview_aks.'
'operations._maintenance_configurations_operations#MaintenanceConfigurationsOperations.{}',
client_factory=cf_maintenance_configurations
)
# AKS managed cluster commands
with self.command_group('aks', managed_clusters_sdk, client_factory=cf_managed_clusters) as g:
g.custom_command('kollect', 'aks_kollect')
g.custom_command('kanalyze', 'aks_kanalyze')
g.custom_command('browse', 'aks_browse')
g.custom_command('create', 'aks_create', supports_no_wait=True)
g.custom_command('update', 'aks_update', supports_no_wait=True)
g.custom_command('scale', 'aks_scale', supports_no_wait=True)
g.custom_command('disable-addons', 'aks_disable_addons',
supports_no_wait=True)
g.custom_command('enable-addons', 'aks_enable_addons',
supports_no_wait=True)
g.custom_command('get-credentials', 'aks_get_credentials')
g.custom_show_command('show', 'aks_show',
table_transformer=aks_show_table_format)
g.custom_command('upgrade', 'aks_upgrade', supports_no_wait=True)
g.command('get-upgrades', 'get_upgrade_profile',
table_transformer=aks_upgrades_table_format)
g.custom_command('rotate-certs', 'aks_rotate_certs', supports_no_wait=True,
confirmation='Kubernetes will be unavailable during certificate rotation process.\n' +
'Are you sure you want to perform this operation?')
g.wait_command('wait')
g.command('stop', 'begin_stop', supports_no_wait=True)
g.command('start', 'begin_start', supports_no_wait=True)
g.custom_command('get-os-options', 'aks_get_os_options')
# AKS container service commands
with self.command_group('aks', container_services_sdk, client_factory=cf_container_services) as g:
g.custom_command('get-versions', 'aks_get_versions',
table_transformer=aks_versions_table_format)
with self.command_group('aks command', managed_clusters_sdk, client_factory=cf_managed_clusters) as g:
g.custom_command('invoke', 'aks_runcommand', supports_no_wait=True,
table_transformer=aks_run_command_result_format)
g.custom_command('result', 'aks_command_result',
supports_no_wait=False, table_transformer=aks_run_command_result_format)
# AKS maintenance configuration commands
with self.command_group('aks maintenanceconfiguration', maintenance_configuration_sdk, client_factory=cf_maintenance_configurations) as g:
g.custom_command('list', 'aks_maintenanceconfiguration_list')
g.custom_show_command('show', 'aks_maintenanceconfiguration_show')
g.custom_command('add', 'aks_maintenanceconfiguration_add')
g.custom_command('update', 'aks_maintenanceconfiguration_update')
g.custom_command('delete', 'aks_maintenanceconfiguration_delete')
# AKS maintenance configuration commands
with self.command_group('aks maintenanceconfiguration', maintenance_configuration_sdk, client_factory=cf_maintenance_configurations) as g:
g.custom_command('list', 'aks_maintenanceconfiguration_list')
g.custom_show_command('show', 'aks_maintenanceconfiguration_show')
g.custom_command('add', 'aks_maintenanceconfiguration_add')
g.custom_command('update', 'aks_maintenanceconfiguration_update')
g.custom_command('delete', 'aks_maintenanceconfiguration_delete')
# AKS agent pool commands
with self.command_group('aks nodepool', agent_pools_sdk, client_factory=cf_agent_pools) as g:
g.custom_command('list', 'aks_agentpool_list',
table_transformer=aks_agentpool_list_table_format)
g.custom_show_command('show', 'aks_agentpool_show',
table_transformer=aks_agentpool_show_table_format)
g.custom_command('add', 'aks_agentpool_add', supports_no_wait=True)
g.custom_command('scale', 'aks_agentpool_scale', supports_no_wait=True)
g.custom_command('upgrade', 'aks_agentpool_upgrade',
supports_no_wait=True)
g.custom_command('update', 'aks_agentpool_update',
supports_no_wait=True)
g.custom_command('delete', 'aks_agentpool_delete',
supports_no_wait=True)
g.custom_command('get-upgrades', 'aks_agentpool_get_upgrade_profile')
# AKS pod identity commands
with self.command_group('aks pod-identity', managed_clusters_sdk, client_factory=cf_managed_clusters) as g:
g.custom_command('add', 'aks_pod_identity_add')
g.custom_command('delete', 'aks_pod_identity_delete')
g.custom_command('list', 'aks_pod_identity_list',
table_transformer=aks_pod_identities_table_format)
# AKS pod identity exception commands
with self.command_group('aks pod-identity exception', managed_clusters_sdk, client_factory=cf_managed_clusters) as g:
g.custom_command('add', 'aks_pod_identity_exception_add')
g.custom_command('delete', 'aks_pod_identity_exception_delete')
g.custom_command('update', 'aks_pod_identity_exception_update')
g.custom_command('list', 'aks_pod_identity_exception_list',
table_transformer=aks_pod_identity_exceptions_table_format)
# AKS egress commands
with self.command_group('aks egress-endpoints', managed_clusters_sdk, client_factory=cf_managed_clusters) as g:
g.custom_command('list', 'aks_egress_endpoints_list')
| 56.40146 | 142 | 0.711143 |
from azure.cli.core.commands import CliCommandType
from ._client_factory import cf_managed_clusters
from ._client_factory import cf_maintenance_configurations
from ._client_factory import cf_container_services
from ._client_factory import cf_agent_pools
from ._format import aks_show_table_format
from ._format import aks_agentpool_show_table_format
from ._format import aks_agentpool_list_table_format
from ._format import aks_versions_table_format
from ._format import aks_upgrades_table_format
from ._format import aks_pod_identities_table_format
from ._format import aks_pod_identity_exceptions_table_format
from ._format import aks_run_command_result_format
def load_command_table(self, _):
managed_clusters_sdk = CliCommandType(
operations_tmpl='azext_aks_preview.vendored_sdks.azure_mgmt_preview_aks.'
'operations._managed_clusters_operations#ManagedClustersOperations.{}',
operation_group='managed_clusters',
client_factory=cf_managed_clusters
)
container_services_sdk = CliCommandType(
operations_tmpl='azext_aks_preview.vendored_sdks.azure_mgmt_preview_aks.'
'operations.container_service_operations#ContainerServicesOperations.{}',
operation_group='container_services',
client_factory=cf_container_services
)
agent_pools_sdk = CliCommandType(
operations_tmpl='azext_aks_preview.vendored_sdks.azure_mgmt_preview_aks.'
'operations._agent_pools_operations#AgentPoolsOperations.{}',
client_factory=cf_managed_clusters
)
maintenance_configuration_sdk = CliCommandType(
operations_tmpl='azext_aks_preview.vendored_sdks.azure_mgmt_preview_aks.'
'operations._maintenance_configurations_operations#MaintenanceConfigurationsOperations.{}',
client_factory=cf_maintenance_configurations
)
with self.command_group('aks', managed_clusters_sdk, client_factory=cf_managed_clusters) as g:
g.custom_command('kollect', 'aks_kollect')
g.custom_command('kanalyze', 'aks_kanalyze')
g.custom_command('browse', 'aks_browse')
g.custom_command('create', 'aks_create', supports_no_wait=True)
g.custom_command('update', 'aks_update', supports_no_wait=True)
g.custom_command('scale', 'aks_scale', supports_no_wait=True)
g.custom_command('disable-addons', 'aks_disable_addons',
supports_no_wait=True)
g.custom_command('enable-addons', 'aks_enable_addons',
supports_no_wait=True)
g.custom_command('get-credentials', 'aks_get_credentials')
g.custom_show_command('show', 'aks_show',
table_transformer=aks_show_table_format)
g.custom_command('upgrade', 'aks_upgrade', supports_no_wait=True)
g.command('get-upgrades', 'get_upgrade_profile',
table_transformer=aks_upgrades_table_format)
g.custom_command('rotate-certs', 'aks_rotate_certs', supports_no_wait=True,
confirmation='Kubernetes will be unavailable during certificate rotation process.\n' +
'Are you sure you want to perform this operation?')
g.wait_command('wait')
g.command('stop', 'begin_stop', supports_no_wait=True)
g.command('start', 'begin_start', supports_no_wait=True)
g.custom_command('get-os-options', 'aks_get_os_options')
with self.command_group('aks', container_services_sdk, client_factory=cf_container_services) as g:
g.custom_command('get-versions', 'aks_get_versions',
table_transformer=aks_versions_table_format)
with self.command_group('aks command', managed_clusters_sdk, client_factory=cf_managed_clusters) as g:
g.custom_command('invoke', 'aks_runcommand', supports_no_wait=True,
table_transformer=aks_run_command_result_format)
g.custom_command('result', 'aks_command_result',
supports_no_wait=False, table_transformer=aks_run_command_result_format)
with self.command_group('aks maintenanceconfiguration', maintenance_configuration_sdk, client_factory=cf_maintenance_configurations) as g:
g.custom_command('list', 'aks_maintenanceconfiguration_list')
g.custom_show_command('show', 'aks_maintenanceconfiguration_show')
g.custom_command('add', 'aks_maintenanceconfiguration_add')
g.custom_command('update', 'aks_maintenanceconfiguration_update')
g.custom_command('delete', 'aks_maintenanceconfiguration_delete')
with self.command_group('aks maintenanceconfiguration', maintenance_configuration_sdk, client_factory=cf_maintenance_configurations) as g:
g.custom_command('list', 'aks_maintenanceconfiguration_list')
g.custom_show_command('show', 'aks_maintenanceconfiguration_show')
g.custom_command('add', 'aks_maintenanceconfiguration_add')
g.custom_command('update', 'aks_maintenanceconfiguration_update')
g.custom_command('delete', 'aks_maintenanceconfiguration_delete')
with self.command_group('aks nodepool', agent_pools_sdk, client_factory=cf_agent_pools) as g:
g.custom_command('list', 'aks_agentpool_list',
table_transformer=aks_agentpool_list_table_format)
g.custom_show_command('show', 'aks_agentpool_show',
table_transformer=aks_agentpool_show_table_format)
g.custom_command('add', 'aks_agentpool_add', supports_no_wait=True)
g.custom_command('scale', 'aks_agentpool_scale', supports_no_wait=True)
g.custom_command('upgrade', 'aks_agentpool_upgrade',
supports_no_wait=True)
g.custom_command('update', 'aks_agentpool_update',
supports_no_wait=True)
g.custom_command('delete', 'aks_agentpool_delete',
supports_no_wait=True)
g.custom_command('get-upgrades', 'aks_agentpool_get_upgrade_profile')
with self.command_group('aks pod-identity', managed_clusters_sdk, client_factory=cf_managed_clusters) as g:
g.custom_command('add', 'aks_pod_identity_add')
g.custom_command('delete', 'aks_pod_identity_delete')
g.custom_command('list', 'aks_pod_identity_list',
table_transformer=aks_pod_identities_table_format)
with self.command_group('aks pod-identity exception', managed_clusters_sdk, client_factory=cf_managed_clusters) as g:
g.custom_command('add', 'aks_pod_identity_exception_add')
g.custom_command('delete', 'aks_pod_identity_exception_delete')
g.custom_command('update', 'aks_pod_identity_exception_update')
g.custom_command('list', 'aks_pod_identity_exception_list',
table_transformer=aks_pod_identity_exceptions_table_format)
with self.command_group('aks egress-endpoints', managed_clusters_sdk, client_factory=cf_managed_clusters) as g:
g.custom_command('list', 'aks_egress_endpoints_list')
| true | true |
1c4af53588d57359ae6d05b519a1dc4f2f7ca79b | 1,981 | py | Python | ampel/mongo/query/general.py | mafn/Ampel-core | 744acbf36f0a2ceae7230ceab1350236c1501b57 | [
"BSD-3-Clause"
] | null | null | null | ampel/mongo/query/general.py | mafn/Ampel-core | 744acbf36f0a2ceae7230ceab1350236c1501b57 | [
"BSD-3-Clause"
] | null | null | null | ampel/mongo/query/general.py | mafn/Ampel-core | 744acbf36f0a2ceae7230ceab1350236c1501b57 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: Ampel-core/ampel/mongo/query/general.py
# License: BSD-3-Clause
# Author: valery brinnel <[email protected]>
# Date: 11.12.2019
# Last Modified Date: 17.02.2021
# Last Modified By: valery brinnel <[email protected]>
from bson.int64 import Int64
from typing import Any, Literal
from ampel.types import Tag, ChannelId, StockId, StrictIterable
from ampel.model.operator.AnyOf import AnyOf
from ampel.model.operator.AllOf import AllOf
from ampel.model.operator.OneOf import OneOf
from ampel.mongo.utils import maybe_match_array
from ampel.mongo.schema import apply_schema, apply_excl_schema
type_stock_id = (int, Int64, bytes, str)
def build_general_query(
stock: None | StockId | StrictIterable[StockId] = None,
channel: None | ChannelId | dict | AllOf[ChannelId] | AnyOf[ChannelId] | OneOf[ChannelId] = None,
tag: None | dict[Literal['with', 'without'], Tag | dict | AllOf[Tag] | AnyOf[Tag] | OneOf[Tag]] = None
) -> dict[str, Any]:
"""
Builds a query usable with the ampel "stock", "t0" (with channel=None), "t1" and "t2" collections
:param stock: matching multiple ids with a single query is possible
:param channel: None (no criterium) means all channel are considered.
:param tag: tags to be (or not to be) matched by query
:returns: query dict with matching criteria
:raises ValueError: apply_schema can raise ValueError in case the provided dict schema structure is unsupported
"""
query = {}
if stock:
query['stock'] = stock if isinstance(stock, type_stock_id) \
else maybe_match_array(stock) # type: ignore[arg-type]
if channel:
apply_schema(query, 'channel', channel)
if tag:
if 'with' in tag:
apply_schema(query, 'tag', tag['with'])
# Order matters, parse_dict(...) must be called *after* parse_excl_dict(...)
if 'without' in tag:
apply_excl_schema(query, 'tag', tag['without'])
return query
| 36.018182 | 112 | 0.710752 |
from bson.int64 import Int64
from typing import Any, Literal
from ampel.types import Tag, ChannelId, StockId, StrictIterable
from ampel.model.operator.AnyOf import AnyOf
from ampel.model.operator.AllOf import AllOf
from ampel.model.operator.OneOf import OneOf
from ampel.mongo.utils import maybe_match_array
from ampel.mongo.schema import apply_schema, apply_excl_schema
type_stock_id = (int, Int64, bytes, str)
def build_general_query(
stock: None | StockId | StrictIterable[StockId] = None,
channel: None | ChannelId | dict | AllOf[ChannelId] | AnyOf[ChannelId] | OneOf[ChannelId] = None,
tag: None | dict[Literal['with', 'without'], Tag | dict | AllOf[Tag] | AnyOf[Tag] | OneOf[Tag]] = None
) -> dict[str, Any]:
query = {}
if stock:
query['stock'] = stock if isinstance(stock, type_stock_id) \
else maybe_match_array(stock)
if channel:
apply_schema(query, 'channel', channel)
if tag:
if 'with' in tag:
apply_schema(query, 'tag', tag['with'])
if 'without' in tag:
apply_excl_schema(query, 'tag', tag['without'])
return query
| true | true |
1c4af5509e74f37a845df6918bcfcc8286f347c3 | 307 | py | Python | PycharmProjects/PythonExercicios/ex030.py | RodrigoMASRamos/Projects.py | ed15981b320914c9667305dcd5fb5b7906fd9b00 | [
"MIT"
] | null | null | null | PycharmProjects/PythonExercicios/ex030.py | RodrigoMASRamos/Projects.py | ed15981b320914c9667305dcd5fb5b7906fd9b00 | [
"MIT"
] | null | null | null | PycharmProjects/PythonExercicios/ex030.py | RodrigoMASRamos/Projects.py | ed15981b320914c9667305dcd5fb5b7906fd9b00 | [
"MIT"
] | null | null | null | #Crie um programa que leia um número inteiro e mostre na tela se ele é PAR ou ÍMPAR
num = input('Digite um número inteiro, e eu vou lhe dizer se ele é par ou impar: ').strip()
num = int(num)
if num % 2 == 0:
print(f'O número {num} é um número par.')
else:
print(f'O número {num} é um número impar.') | 43.857143 | 91 | 0.674267 | num = input('Digite um número inteiro, e eu vou lhe dizer se ele é par ou impar: ').strip()
num = int(num)
if num % 2 == 0:
print(f'O número {num} é um número par.')
else:
print(f'O número {num} é um número impar.') | true | true |
1c4af5a3dd37ea59624418f4740c2e857872d948 | 9,018 | py | Python | backend/cookie/cookie/config/settings/production.py | NehemiasEC/BS | 7ce92a4efd7522f2ee7b35cce7620c3d125510ca | [
"MIT"
] | 1 | 2020-05-27T03:56:02.000Z | 2020-05-27T03:56:02.000Z | backend/cookie/cookie/config/settings/production.py | NehemiasEC/BS | 7ce92a4efd7522f2ee7b35cce7620c3d125510ca | [
"MIT"
] | 21 | 2020-05-28T06:23:37.000Z | 2022-03-12T00:51:49.000Z | backend/cookie/cookie/config/settings/production.py | NehemiasEC/BS | 7ce92a4efd7522f2ee7b35cce7620c3d125510ca | [
"MIT"
] | null | null | null | import logging
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["example.com"])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": env("REDIS_URL"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
# Mimicing memcache behavior.
# http://jazzband.github.io/django-redis/latest/#_memcached_exceptions_behavior
"IGNORE_EXCEPTIONS": True,
},
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ["storages"] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env("DJANGO_AWS_ACCESS_KEY_ID")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env("DJANGO_AWS_SECRET_ACCESS_KEY")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env("DJANGO_AWS_STORAGE_BUCKET_NAME")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
"CacheControl": f"max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate"
}
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_DEFAULT_ACL = None
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_REGION_NAME = env("DJANGO_AWS_S3_REGION_NAME", default=None)
# STATIC
# ------------------------
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# MEDIA
# ------------------------------------------------------------------------------
DEFAULT_FILE_STORAGE = "cookie.utils.storages.MediaRootS3Boto3Storage"
MEDIA_URL = f"https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[-1]["OPTIONS"]["loaders"] = [ # type: ignore[index] # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL", default="cookie <[email protected]>"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env(
"DJANGO_EMAIL_SUBJECT_PREFIX", default="[cookie]"
)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env("DJANGO_ADMIN_URL")
# Anymail
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ["anymail"] # noqa F405
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
# https://anymail.readthedocs.io/en/stable/esps/sendinblue/
EMAIL_BACKEND = "anymail.backends.sendinblue.EmailBackend"
ANYMAIL = {
"SENDINBLUE_API_KEY": env("SENDINBLUE_API_KEY"),
"SENDINBLUE_API_URL": env(
"SENDINBLUE_API_URL", default="https://api.sendinblue.com/v3/"
),
}
# django-compressor
# ------------------------------------------------------------------------------
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_ENABLED
COMPRESS_ENABLED = env.bool("COMPRESS_ENABLED", default=True)
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_STORAGE
COMPRESS_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_URL
COMPRESS_URL = STATIC_URL # noqa F405
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_OFFLINE
COMPRESS_OFFLINE = True # Offline compression is required when using Whitenoise
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_FILTERS
COMPRESS_FILTERS = {
"css": [
"compressor.filters.css_default.CssAbsoluteFilter",
"compressor.filters.cssmin.rCSSMinFilter",
],
"js": ["compressor.filters.jsmin.JSMinFilter"],
}
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
"django.db.backends": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
# Errors logged by the SDK itself
"sentry_sdk": {"level": "ERROR", "handlers": ["console"], "propagate": False},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
},
}
# Sentry
# ------------------------------------------------------------------------------
SENTRY_DSN = env("SENTRY_DSN")
SENTRY_LOG_LEVEL = env.int("DJANGO_SENTRY_LOG_LEVEL", logging.INFO)
sentry_logging = LoggingIntegration(
level=SENTRY_LOG_LEVEL, # Capture info and above as breadcrumbs
event_level=logging.ERROR, # Send errors as events
)
sentry_sdk.init(
dsn=SENTRY_DSN,
integrations=[sentry_logging, DjangoIntegration(), CeleryIntegration()],
)
# Your stuff...
# ------------------------------------------------------------------------------
| 41.944186 | 100 | 0.634952 | import logging
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from .base import * from .base import env
SECRET_KEY = env("DJANGO_SECRET_KEY")
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["example.com"])
DATABASES["default"] = env.db("DATABASE_URL") DATABASES["default"]["ATOMIC_REQUESTS"] = True DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60)
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": env("REDIS_URL"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"IGNORE_EXCEPTIONS": True,
},
}
}
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
INSTALLED_APPS += ["storages"] AWS_ACCESS_KEY_ID = env("DJANGO_AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = env("DJANGO_AWS_SECRET_ACCESS_KEY")
AWS_STORAGE_BUCKET_NAME = env("DJANGO_AWS_STORAGE_BUCKET_NAME")
AWS_QUERYSTRING_AUTH = False
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
"CacheControl": f"max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate"
}
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_DEFAULT_ACL = None
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_REGION_NAME = env("DJANGO_AWS_S3_REGION_NAME", default=None)
# STATIC
# ------------------------
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# MEDIA
# ------------------------------------------------------------------------------
DEFAULT_FILE_STORAGE = "cookie.utils.storages.MediaRootS3Boto3Storage"
MEDIA_URL = f"https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[-1]["OPTIONS"]["loaders"] = [ # type: ignore[index] # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL", default="cookie <[email protected]>"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env(
"DJANGO_EMAIL_SUBJECT_PREFIX", default="[cookie]"
)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env("DJANGO_ADMIN_URL")
# Anymail
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ["anymail"] # noqa F405
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
# https://anymail.readthedocs.io/en/stable/esps/sendinblue/
EMAIL_BACKEND = "anymail.backends.sendinblue.EmailBackend"
ANYMAIL = {
"SENDINBLUE_API_KEY": env("SENDINBLUE_API_KEY"),
"SENDINBLUE_API_URL": env(
"SENDINBLUE_API_URL", default="https://api.sendinblue.com/v3/"
),
}
# django-compressor
# ------------------------------------------------------------------------------
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_ENABLED
COMPRESS_ENABLED = env.bool("COMPRESS_ENABLED", default=True)
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_STORAGE
COMPRESS_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_URL
COMPRESS_URL = STATIC_URL # noqa F405
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_OFFLINE
COMPRESS_OFFLINE = True # Offline compression is required when using Whitenoise
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_FILTERS
COMPRESS_FILTERS = {
"css": [
"compressor.filters.css_default.CssAbsoluteFilter",
"compressor.filters.cssmin.rCSSMinFilter",
],
"js": ["compressor.filters.jsmin.JSMinFilter"],
}
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
"django.db.backends": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
# Errors logged by the SDK itself
"sentry_sdk": {"level": "ERROR", "handlers": ["console"], "propagate": False},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
},
}
# Sentry
# ------------------------------------------------------------------------------
SENTRY_DSN = env("SENTRY_DSN")
SENTRY_LOG_LEVEL = env.int("DJANGO_SENTRY_LOG_LEVEL", logging.INFO)
sentry_logging = LoggingIntegration(
level=SENTRY_LOG_LEVEL, # Capture info and above as breadcrumbs
event_level=logging.ERROR, # Send errors as events
)
sentry_sdk.init(
dsn=SENTRY_DSN,
integrations=[sentry_logging, DjangoIntegration(), CeleryIntegration()],
)
# Your stuff...
# ------------------------------------------------------------------------------
| true | true |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.