hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c49cc9c62f8a2e1acc97ec88014ca7f54dbd2f8 | 861 | py | Python | tests/test_settings.py | ibqn/django-graphql-jwt | dd92319071092bb517187904f3ac0610e8443edf | [
"MIT"
] | 1 | 2019-06-19T12:05:08.000Z | 2019-06-19T12:05:08.000Z | tests/test_settings.py | ibqn/django-graphql-jwt | dd92319071092bb517187904f3ac0610e8443edf | [
"MIT"
] | 1 | 2018-11-01T05:12:50.000Z | 2018-11-01T05:12:50.000Z | tests/test_settings.py | ibqn/django-graphql-jwt | dd92319071092bb517187904f3ac0610e8443edf | [
"MIT"
] | 1 | 2021-03-10T17:53:41.000Z | 2021-03-10T17:53:41.000Z | from datetime import timedelta
from django.test import TestCase
from graphql_jwt import settings
class SettingsTests(TestCase):
def test_perform_import(self):
f = settings.perform_import(id, '')
self.assertEqual(f, id)
f = settings.perform_import('datetime.timedelta', '')
self.assertEqual(f, timedelta)
def test_import_from_string_error(self):
with self.assertRaises(ImportError):
settings.import_from_string('import.error', '')
def test_reload_settings(self):
getattr(settings.jwt_settings, 'JWT_ALGORITHM')
settings.reload_settings(setting='TEST')
self.assertTrue(settings.jwt_settings._cached_attrs)
delattr(settings.jwt_settings, '_user_settings')
settings.jwt_settings.reload()
self.assertFalse(settings.jwt_settings._cached_attrs)
| 27.774194 | 61 | 0.70964 | from datetime import timedelta
from django.test import TestCase
from graphql_jwt import settings
class SettingsTests(TestCase):
def test_perform_import(self):
f = settings.perform_import(id, '')
self.assertEqual(f, id)
f = settings.perform_import('datetime.timedelta', '')
self.assertEqual(f, timedelta)
def test_import_from_string_error(self):
with self.assertRaises(ImportError):
settings.import_from_string('import.error', '')
def test_reload_settings(self):
getattr(settings.jwt_settings, 'JWT_ALGORITHM')
settings.reload_settings(setting='TEST')
self.assertTrue(settings.jwt_settings._cached_attrs)
delattr(settings.jwt_settings, '_user_settings')
settings.jwt_settings.reload()
self.assertFalse(settings.jwt_settings._cached_attrs)
| true | true |
1c49cd273a7d8c3849364cca03ddab157ababced | 196 | py | Python | _modules/_site/week_5.py | CoffeePoweredComputers/489-data-structures | e2a56f3cecde42bf1520030e24750cbe17e1b704 | [
"MIT"
] | null | null | null | _modules/_site/week_5.py | CoffeePoweredComputers/489-data-structures | e2a56f3cecde42bf1520030e24750cbe17e1b704 | [
"MIT"
] | null | null | null | _modules/_site/week_5.py | CoffeePoweredComputers/489-data-structures | e2a56f3cecde42bf1520030e24750cbe17e1b704 | [
"MIT"
] | null | null | null | Sep 19
:
Sep 20
:
**Lecture**{: .label .label-light-blue} X
Sep 21
:
Sep 22
: **Lecture**
Sep 23
: **zyBooks**{: .label .label-orange}
Sep 24
: **Nothing Due**
Sep 25
: **Nothing Due**
| 8.521739 | 41 | 0.571429 | Sep 19
:
Sep 20
:
**Lecture**{: .label .label-light-blue} X
Sep 21
:
Sep 22
: **Lecture**
Sep 23
: **zyBooks**{: .label .label-orange}
Sep 24
: **Nothing Due**
Sep 25
: **Nothing Due**
| false | true |
1c49cd379a153b4789e7d9393eb5505762cb9e05 | 576 | py | Python | services/models/mnemosyne.py | life-game-player/Hephaestus | 0c695193d8d2d8c70061e2e26ec8c718544342c6 | [
"MIT"
] | null | null | null | services/models/mnemosyne.py | life-game-player/Hephaestus | 0c695193d8d2d8c70061e2e26ec8c718544342c6 | [
"MIT"
] | null | null | null | services/models/mnemosyne.py | life-game-player/Hephaestus | 0c695193d8d2d8c70061e2e26ec8c718544342c6 | [
"MIT"
] | null | null | null | import torch
def create(
host, user, passwd,
module, operator, operation, result
):
"""
Operation:
1: Create
2: Modify
3: Query
4: Delete
Result:
0: Succeeded
1: Failed
"""
conn = torch.connect(host, user, passwd, 'hephaestus')
list_sql = list()
list_sql.append(
"INSERT INTO mnemosyne(module, operator, operation, result) "
"VALUES('{}', {}, {}, {})".format(module, operator, operation, result)
)
torch.execute_list(conn, list_sql)
| 22.153846 | 78 | 0.53125 | import torch
def create(
host, user, passwd,
module, operator, operation, result
):
conn = torch.connect(host, user, passwd, 'hephaestus')
list_sql = list()
list_sql.append(
"INSERT INTO mnemosyne(module, operator, operation, result) "
"VALUES('{}', {}, {}, {})".format(module, operator, operation, result)
)
torch.execute_list(conn, list_sql)
| true | true |
1c49ce60fe551913416a74e91ae623292230edb4 | 2,260 | py | Python | bclearer_boson_1_1_source/b_code/configurations/getters/boson_1_2e_k_configuration_getter_separate_names_and_instances.py | boro-alpha/bclearer_boson_1_1 | 15207d240fd3144b155922dc5c5d14822023026a | [
"MIT"
] | 1 | 2021-07-20T15:48:58.000Z | 2021-07-20T15:48:58.000Z | bclearer_boson_1_1_source/b_code/configurations/getters/boson_1_2e_k_configuration_getter_separate_names_and_instances.py | boro-alpha/bclearer_boson_1_1 | 15207d240fd3144b155922dc5c5d14822023026a | [
"MIT"
] | null | null | null | bclearer_boson_1_1_source/b_code/configurations/getters/boson_1_2e_k_configuration_getter_separate_names_and_instances.py | boro-alpha/bclearer_boson_1_1 | 15207d240fd3144b155922dc5c5d14822023026a | [
"MIT"
] | null | null | null | from bclearer_boson_1_1_source.b_code.common_knowledge.inspire_matched_ea_objects import InspireMatchedEaObjects
from bclearer_source.b_code.common_knowledge.convention_shift_operation_types import ConventionShiftOperationTypes
from bclearer_source.b_code.configurations.bespoke_name_to_instance_configuration_objects import BespokeNameToInstanceConfigurationObjects
from bclearer_source.b_code.configurations.convention_shift_operation_configurations import ConventionShiftOperationConfigurations
def get_boson_1_2e_k1_configuration_separate_standard_names_and_instances() \
-> ConventionShiftOperationConfigurations:
convention_shift_operation_configuration = \
ConventionShiftOperationConfigurations(
convention_shift_operation_type=ConventionShiftOperationTypes.SEPARATE_STANDARD_NAMES_AND_INSTANCES,
output_universe_short_name='2e_k1_output_sep_standard_instances',
package_name='2e_k1_new_objects_sep_standard_instances')
return \
convention_shift_operation_configuration
def get_boson_1_2e_k2_configuration_bespoke_standard_names_and_instances() \
-> ConventionShiftOperationConfigurations:
list_of_configuration_objects = \
[
BespokeNameToInstanceConfigurationObjects(
matched_naming_space_type=InspireMatchedEaObjects.IDENTIFIER,
name_instance_attribute_name=InspireMatchedEaObjects.LOCAL_ID_ATTRIBUTE.object_name,
package_name='2e_k2_new_objects_sep_bespoke_instances'),
BespokeNameToInstanceConfigurationObjects(
matched_naming_space_type=InspireMatchedEaObjects.GEOGRAPHICAL_NAME,
matched_name_instance_type=InspireMatchedEaObjects.SPELLING_OF_NAME)
]
convention_shift_operation_configuration = \
ConventionShiftOperationConfigurations(
convention_shift_operation_type=ConventionShiftOperationTypes.SEPARATE_BESPOKE_NAMES_AND_INSTANCES,
output_universe_short_name='2e_k2_output_sep_bespoke_instances',
list_of_configuration_objects=list_of_configuration_objects,
package_name='2e_k2_new_objects_sep_bespoke_instances')
return \
convention_shift_operation_configuration
| 55.121951 | 138 | 0.815044 | from bclearer_boson_1_1_source.b_code.common_knowledge.inspire_matched_ea_objects import InspireMatchedEaObjects
from bclearer_source.b_code.common_knowledge.convention_shift_operation_types import ConventionShiftOperationTypes
from bclearer_source.b_code.configurations.bespoke_name_to_instance_configuration_objects import BespokeNameToInstanceConfigurationObjects
from bclearer_source.b_code.configurations.convention_shift_operation_configurations import ConventionShiftOperationConfigurations
def get_boson_1_2e_k1_configuration_separate_standard_names_and_instances() \
-> ConventionShiftOperationConfigurations:
convention_shift_operation_configuration = \
ConventionShiftOperationConfigurations(
convention_shift_operation_type=ConventionShiftOperationTypes.SEPARATE_STANDARD_NAMES_AND_INSTANCES,
output_universe_short_name='2e_k1_output_sep_standard_instances',
package_name='2e_k1_new_objects_sep_standard_instances')
return \
convention_shift_operation_configuration
def get_boson_1_2e_k2_configuration_bespoke_standard_names_and_instances() \
-> ConventionShiftOperationConfigurations:
list_of_configuration_objects = \
[
BespokeNameToInstanceConfigurationObjects(
matched_naming_space_type=InspireMatchedEaObjects.IDENTIFIER,
name_instance_attribute_name=InspireMatchedEaObjects.LOCAL_ID_ATTRIBUTE.object_name,
package_name='2e_k2_new_objects_sep_bespoke_instances'),
BespokeNameToInstanceConfigurationObjects(
matched_naming_space_type=InspireMatchedEaObjects.GEOGRAPHICAL_NAME,
matched_name_instance_type=InspireMatchedEaObjects.SPELLING_OF_NAME)
]
convention_shift_operation_configuration = \
ConventionShiftOperationConfigurations(
convention_shift_operation_type=ConventionShiftOperationTypes.SEPARATE_BESPOKE_NAMES_AND_INSTANCES,
output_universe_short_name='2e_k2_output_sep_bespoke_instances',
list_of_configuration_objects=list_of_configuration_objects,
package_name='2e_k2_new_objects_sep_bespoke_instances')
return \
convention_shift_operation_configuration
| true | true |
1c49cebb0a5ba4d641d71c1a1b47be7267c38f1c | 13,667 | py | Python | LPIPSmodels/dist_model.py | HERMINDERSINGH1234/ML_Extra_Resolution_Increases | 1fefceeab83f03fa8194cb63f78c5dbf7e90aeae | [
"Apache-2.0"
] | 1 | 2021-07-17T10:13:10.000Z | 2021-07-17T10:13:10.000Z | LPIPSmodels/dist_model.py | HERMINDERSINGH1234/ML_Extra_Resolution_Increases | 1fefceeab83f03fa8194cb63f78c5dbf7e90aeae | [
"Apache-2.0"
] | null | null | null | LPIPSmodels/dist_model.py | HERMINDERSINGH1234/ML_Extra_Resolution_Increases | 1fefceeab83f03fa8194cb63f78c5dbf7e90aeae | [
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
import sys
sys.path.append('..')
sys.path.append('.')
import numpy as np
import torch
from torch import nn
import os
from collections import OrderedDict
from torch.autograd import Variable
import itertools
from .base_model import BaseModel
from scipy.ndimage import zoom
import fractions
import functools
import skimage.transform
from IPython import embed
from . import networks_basic as networks
from . import util
class DistModel(BaseModel):
def name(self):
return self.model_name
def initialize(self, model='net-lin', net='alex', pnet_rand=False, pnet_tune=False, model_path=None, colorspace='Lab', use_gpu=True, printNet=False, spatial=False, spatial_shape=None, spatial_order=1, spatial_factor=None, is_train=False, lr=.0001, beta1=0.5, version='0.1'):
'''
INPUTS
model - ['net-lin'] for linearly calibrated network
['net'] for off-the-shelf network
['L2'] for L2 distance in Lab colorspace
['SSIM'] for ssim in RGB colorspace
net - ['squeeze','alex','vgg']
model_path - if None, will look in weights/[NET_NAME].pth
colorspace - ['Lab','RGB'] colorspace to use for L2 and SSIM
use_gpu - bool - whether or not to use a GPU
printNet - bool - whether or not to print network architecture out
spatial - bool - whether to output an array containing varying distances across spatial dimensions
spatial_shape - if given, output spatial shape. if None then spatial shape is determined automatically via spatial_factor (see below).
spatial_factor - if given, specifies upsampling factor relative to the largest spatial extent of a convolutional layer. if None then resized to size of input images.
spatial_order - spline order of filter for upsampling in spatial mode, by default 1 (bilinear).
is_train - bool - [True] for training mode
lr - float - initial learning rate
beta1 - float - initial momentum term for adam
version - 0.1 for latest, 0.0 was original
'''
BaseModel.initialize(self, use_gpu=use_gpu)
self.model = model
self.net = net
self.use_gpu = use_gpu
self.is_train = is_train
self.spatial = spatial
self.spatial_shape = spatial_shape
self.spatial_order = spatial_order
self.spatial_factor = spatial_factor
self.model_name = '%s [%s]'%(model,net)
if(self.model == 'net-lin'): # pretrained net + linear layer
self.net = networks.PNetLin(use_gpu=use_gpu,pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net,use_dropout=True,spatial=spatial,version=version)
kw = {}
if not use_gpu:
kw['map_location'] = 'cpu'
if(model_path is None):
import inspect
# model_path = './PerceptualSimilarity/weights/v%s/%s.pth'%(version,net)
model_path = os.path.abspath(os.path.join(inspect.getfile(self.initialize), '..', 'v%s/%s.pth'%(version,net)))
if(not is_train):
print('Loading model from: %s'%model_path)
self.net.load_state_dict(torch.load(model_path, **kw))
elif(self.model=='net'): # pretrained network
assert not self.spatial, 'spatial argument not supported yet for uncalibrated networks'
self.net = networks.PNet(use_gpu=use_gpu,pnet_type=net)
self.is_fake_net = True
elif(self.model in ['L2','l2']):
self.net = networks.L2(use_gpu=use_gpu,colorspace=colorspace) # not really a network, only for testing
self.model_name = 'L2'
elif(self.model in ['DSSIM','dssim','SSIM','ssim']):
self.net = networks.DSSIM(use_gpu=use_gpu,colorspace=colorspace)
self.model_name = 'SSIM'
else:
raise ValueError("Model [%s] not recognized." % self.model)
self.parameters = list(self.net.parameters())
if self.is_train: # training mode
# extra network on top to go from distances (d0,d1) => predicted human judgment (h*)
self.rankLoss = networks.BCERankingLoss(use_gpu=use_gpu)
self.parameters+=self.rankLoss.parameters
self.lr = lr
self.old_lr = lr
self.optimizer_net = torch.optim.Adam(self.parameters, lr=lr, betas=(beta1, 0.999))
else: # test mode
self.net.eval()
if(printNet):
print('---------- Networks initialized -------------')
networks.print_network(self.net)
print('-----------------------------------------------')
def forward_pair(self,in1,in2,retPerLayer=False):
if(retPerLayer):
return self.net.forward(in1,in2, retPerLayer=True)
else:
return self.net.forward(in1,in2)
def forward(self, in0, in1, retNumpy=True):
''' Function computes the distance between image patches in0 and in1
INPUTS
in0, in1 - torch.Tensor object of shape Nx3xXxY - image patch scaled to [-1,1]
retNumpy - [False] to return as torch.Tensor, [True] to return as numpy array
OUTPUT
computed distances between in0 and in1
'''
self.input_ref = in0
self.input_p0 = in1
if(self.use_gpu):
self.input_ref = self.input_ref.cuda()
self.input_p0 = self.input_p0.cuda()
self.var_ref = Variable(self.input_ref,requires_grad=True)
self.var_p0 = Variable(self.input_p0,requires_grad=True)
self.d0 = self.forward_pair(self.var_ref, self.var_p0)
self.loss_total = self.d0
def convert_output(d0):
if(retNumpy):
ans = d0.cpu().data.numpy()
if not self.spatial:
ans = ans.flatten()
else:
assert(ans.shape[0] == 1 and len(ans.shape) == 4)
return ans[0,...].transpose([1, 2, 0]) # Reshape to usual numpy image format: (height, width, channels)
return ans
else:
return d0
if self.spatial:
L = [convert_output(x) for x in self.d0]
spatial_shape = self.spatial_shape
if spatial_shape is None:
if(self.spatial_factor is None):
spatial_shape = (in0.size()[2],in0.size()[3])
else:
spatial_shape = (max([x.shape[0] for x in L])*self.spatial_factor, max([x.shape[1] for x in L])*self.spatial_factor)
L = [skimage.transform.resize(x, spatial_shape, order=self.spatial_order, mode='edge') for x in L]
L = np.mean(np.concatenate(L, 2) * len(L), 2)
return L
else:
return convert_output(self.d0)
# ***** TRAINING FUNCTIONS *****
def optimize_parameters(self):
self.forward_train()
self.optimizer_net.zero_grad()
self.backward_train()
self.optimizer_net.step()
self.clamp_weights()
def clamp_weights(self):
for module in self.net.modules():
if(hasattr(module, 'weight') and module.kernel_size==(1,1)):
module.weight.data = torch.clamp(module.weight.data,min=0)
def set_input(self, data):
self.input_ref = data['ref']
self.input_p0 = data['p0']
self.input_p1 = data['p1']
self.input_judge = data['judge']
if(self.use_gpu):
self.input_ref = self.input_ref.cuda()
self.input_p0 = self.input_p0.cuda()
self.input_p1 = self.input_p1.cuda()
self.input_judge = self.input_judge.cuda()
self.var_ref = Variable(self.input_ref,requires_grad=True)
self.var_p0 = Variable(self.input_p0,requires_grad=True)
self.var_p1 = Variable(self.input_p1,requires_grad=True)
def forward_train(self): # run forward pass
self.d0 = self.forward_pair(self.var_ref, self.var_p0)
self.d1 = self.forward_pair(self.var_ref, self.var_p1)
self.acc_r = self.compute_accuracy(self.d0,self.d1,self.input_judge)
# var_judge
self.var_judge = Variable(1.*self.input_judge).view(self.d0.size())
self.loss_total = self.rankLoss.forward(self.d0, self.d1, self.var_judge*2.-1.)
return self.loss_total
def backward_train(self):
torch.mean(self.loss_total).backward()
def compute_accuracy(self,d0,d1,judge):
''' d0, d1 are Variables, judge is a Tensor '''
d1_lt_d0 = (d1<d0).cpu().data.numpy().flatten()
judge_per = judge.cpu().numpy().flatten()
return d1_lt_d0*judge_per + (1-d1_lt_d0)*(1-judge_per)
def get_current_errors(self):
retDict = OrderedDict([('loss_total', self.loss_total.data.cpu().numpy()),
('acc_r', self.acc_r)])
for key in retDict.keys():
retDict[key] = np.mean(retDict[key])
return retDict
def get_current_visuals(self):
zoom_factor = 256/self.var_ref.data.size()[2]
ref_img = util.tensor2im(self.var_ref.data)
p0_img = util.tensor2im(self.var_p0.data)
p1_img = util.tensor2im(self.var_p1.data)
ref_img_vis = zoom(ref_img,[zoom_factor, zoom_factor, 1],order=0)
p0_img_vis = zoom(p0_img,[zoom_factor, zoom_factor, 1],order=0)
p1_img_vis = zoom(p1_img,[zoom_factor, zoom_factor, 1],order=0)
return OrderedDict([('ref', ref_img_vis),
('p0', p0_img_vis),
('p1', p1_img_vis)])
def save(self, path, label):
self.save_network(self.net, path, '', label)
self.save_network(self.rankLoss.net, path, 'rank', label)
def update_learning_rate(self,nepoch_decay):
lrd = self.lr / nepoch_decay
lr = self.old_lr - lrd
for param_group in self.optimizer_net.param_groups:
param_group['lr'] = lr
print('update lr [%s] decay: %f -> %f' % (type,self.old_lr, lr))
self.old_lr = lr
def score_2afc_dataset(data_loader,func):
''' Function computes Two Alternative Forced Choice (2AFC) score using
distance function 'func' in dataset 'data_loader'
INPUTS
data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside
func - callable distance function - calling d=func(in0,in1) should take 2
pytorch tensors with shape Nx3xXxY, and return numpy array of length N
OUTPUTS
[0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators
[1] - dictionary with following elements
d0s,d1s - N arrays containing distances between reference patch to perturbed patches
gts - N array in [0,1], preferred patch selected by human evaluators
(closer to "0" for left patch p0, "1" for right patch p1,
"0.6" means 60pct people preferred right patch, 40pct preferred left)
scores - N array in [0,1], corresponding to what percentage function agreed with humans
CONSTS
N - number of test triplets in data_loader
'''
d0s = []
d1s = []
gts = []
# bar = pb.ProgressBar(max_value=data_loader.load_data().__len__())
for (i,data) in enumerate(data_loader.load_data()):
d0s+=func(data['ref'],data['p0']).tolist()
d1s+=func(data['ref'],data['p1']).tolist()
gts+=data['judge'].cpu().numpy().flatten().tolist()
# bar.update(i)
d0s = np.array(d0s)
d1s = np.array(d1s)
gts = np.array(gts)
scores = (d0s<d1s)*(1.-gts) + (d1s<d0s)*gts + (d1s==d0s)*.5
return(np.mean(scores), dict(d0s=d0s,d1s=d1s,gts=gts,scores=scores))
def score_jnd_dataset(data_loader,func):
''' Function computes JND score using distance function 'func' in dataset 'data_loader'
INPUTS
data_loader - CustomDatasetDataLoader object - contains a JNDDataset inside
func - callable distance function - calling d=func(in0,in1) should take 2
pytorch tensors with shape Nx3xXxY, and return numpy array of length N
OUTPUTS
[0] - JND score in [0,1], mAP score (area under precision-recall curve)
[1] - dictionary with following elements
ds - N array containing distances between two patches shown to human evaluator
sames - N array containing fraction of people who thought the two patches were identical
CONSTS
N - number of test triplets in data_loader
'''
ds = []
gts = []
# bar = pb.ProgressBar(max_value=data_loader.load_data().__len__())
for (i,data) in enumerate(data_loader.load_data()):
ds+=func(data['p0'],data['p1']).tolist()
gts+=data['same'].cpu().numpy().flatten().tolist()
# bar.update(i)
sames = np.array(gts)
ds = np.array(ds)
sorted_inds = np.argsort(ds)
ds_sorted = ds[sorted_inds]
sames_sorted = sames[sorted_inds]
TPs = np.cumsum(sames_sorted)
FPs = np.cumsum(1-sames_sorted)
FNs = np.sum(sames_sorted)-TPs
precs = TPs/(TPs+FPs)
recs = TPs/(TPs+FNs)
score = util.voc_ap(recs,precs)
return(score, dict(ds=ds,sames=sames))
| 41.795107 | 279 | 0.599693 |
from __future__ import absolute_import
import sys
sys.path.append('..')
sys.path.append('.')
import numpy as np
import torch
from torch import nn
import os
from collections import OrderedDict
from torch.autograd import Variable
import itertools
from .base_model import BaseModel
from scipy.ndimage import zoom
import fractions
import functools
import skimage.transform
from IPython import embed
from . import networks_basic as networks
from . import util
class DistModel(BaseModel):
def name(self):
return self.model_name
def initialize(self, model='net-lin', net='alex', pnet_rand=False, pnet_tune=False, model_path=None, colorspace='Lab', use_gpu=True, printNet=False, spatial=False, spatial_shape=None, spatial_order=1, spatial_factor=None, is_train=False, lr=.0001, beta1=0.5, version='0.1'):
BaseModel.initialize(self, use_gpu=use_gpu)
self.model = model
self.net = net
self.use_gpu = use_gpu
self.is_train = is_train
self.spatial = spatial
self.spatial_shape = spatial_shape
self.spatial_order = spatial_order
self.spatial_factor = spatial_factor
self.model_name = '%s [%s]'%(model,net)
if(self.model == 'net-lin'): self.net = networks.PNetLin(use_gpu=use_gpu,pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net,use_dropout=True,spatial=spatial,version=version)
kw = {}
if not use_gpu:
kw['map_location'] = 'cpu'
if(model_path is None):
import inspect
model_path = os.path.abspath(os.path.join(inspect.getfile(self.initialize), '..', 'v%s/%s.pth'%(version,net)))
if(not is_train):
print('Loading model from: %s'%model_path)
self.net.load_state_dict(torch.load(model_path, **kw))
elif(self.model=='net'): assert not self.spatial, 'spatial argument not supported yet for uncalibrated networks'
self.net = networks.PNet(use_gpu=use_gpu,pnet_type=net)
self.is_fake_net = True
elif(self.model in ['L2','l2']):
self.net = networks.L2(use_gpu=use_gpu,colorspace=colorspace) self.model_name = 'L2'
elif(self.model in ['DSSIM','dssim','SSIM','ssim']):
self.net = networks.DSSIM(use_gpu=use_gpu,colorspace=colorspace)
self.model_name = 'SSIM'
else:
raise ValueError("Model [%s] not recognized." % self.model)
self.parameters = list(self.net.parameters())
if self.is_train: self.rankLoss = networks.BCERankingLoss(use_gpu=use_gpu)
self.parameters+=self.rankLoss.parameters
self.lr = lr
self.old_lr = lr
self.optimizer_net = torch.optim.Adam(self.parameters, lr=lr, betas=(beta1, 0.999))
else: self.net.eval()
if(printNet):
print('---------- Networks initialized -------------')
networks.print_network(self.net)
print('-----------------------------------------------')
def forward_pair(self,in1,in2,retPerLayer=False):
if(retPerLayer):
return self.net.forward(in1,in2, retPerLayer=True)
else:
return self.net.forward(in1,in2)
def forward(self, in0, in1, retNumpy=True):
self.input_ref = in0
self.input_p0 = in1
if(self.use_gpu):
self.input_ref = self.input_ref.cuda()
self.input_p0 = self.input_p0.cuda()
self.var_ref = Variable(self.input_ref,requires_grad=True)
self.var_p0 = Variable(self.input_p0,requires_grad=True)
self.d0 = self.forward_pair(self.var_ref, self.var_p0)
self.loss_total = self.d0
def convert_output(d0):
if(retNumpy):
ans = d0.cpu().data.numpy()
if not self.spatial:
ans = ans.flatten()
else:
assert(ans.shape[0] == 1 and len(ans.shape) == 4)
return ans[0,...].transpose([1, 2, 0]) return ans
else:
return d0
if self.spatial:
L = [convert_output(x) for x in self.d0]
spatial_shape = self.spatial_shape
if spatial_shape is None:
if(self.spatial_factor is None):
spatial_shape = (in0.size()[2],in0.size()[3])
else:
spatial_shape = (max([x.shape[0] for x in L])*self.spatial_factor, max([x.shape[1] for x in L])*self.spatial_factor)
L = [skimage.transform.resize(x, spatial_shape, order=self.spatial_order, mode='edge') for x in L]
L = np.mean(np.concatenate(L, 2) * len(L), 2)
return L
else:
return convert_output(self.d0)
def optimize_parameters(self):
self.forward_train()
self.optimizer_net.zero_grad()
self.backward_train()
self.optimizer_net.step()
self.clamp_weights()
def clamp_weights(self):
for module in self.net.modules():
if(hasattr(module, 'weight') and module.kernel_size==(1,1)):
module.weight.data = torch.clamp(module.weight.data,min=0)
def set_input(self, data):
self.input_ref = data['ref']
self.input_p0 = data['p0']
self.input_p1 = data['p1']
self.input_judge = data['judge']
if(self.use_gpu):
self.input_ref = self.input_ref.cuda()
self.input_p0 = self.input_p0.cuda()
self.input_p1 = self.input_p1.cuda()
self.input_judge = self.input_judge.cuda()
self.var_ref = Variable(self.input_ref,requires_grad=True)
self.var_p0 = Variable(self.input_p0,requires_grad=True)
self.var_p1 = Variable(self.input_p1,requires_grad=True)
def forward_train(self): self.d0 = self.forward_pair(self.var_ref, self.var_p0)
self.d1 = self.forward_pair(self.var_ref, self.var_p1)
self.acc_r = self.compute_accuracy(self.d0,self.d1,self.input_judge)
self.var_judge = Variable(1.*self.input_judge).view(self.d0.size())
self.loss_total = self.rankLoss.forward(self.d0, self.d1, self.var_judge*2.-1.)
return self.loss_total
def backward_train(self):
torch.mean(self.loss_total).backward()
def compute_accuracy(self,d0,d1,judge):
d1_lt_d0 = (d1<d0).cpu().data.numpy().flatten()
judge_per = judge.cpu().numpy().flatten()
return d1_lt_d0*judge_per + (1-d1_lt_d0)*(1-judge_per)
def get_current_errors(self):
retDict = OrderedDict([('loss_total', self.loss_total.data.cpu().numpy()),
('acc_r', self.acc_r)])
for key in retDict.keys():
retDict[key] = np.mean(retDict[key])
return retDict
def get_current_visuals(self):
zoom_factor = 256/self.var_ref.data.size()[2]
ref_img = util.tensor2im(self.var_ref.data)
p0_img = util.tensor2im(self.var_p0.data)
p1_img = util.tensor2im(self.var_p1.data)
ref_img_vis = zoom(ref_img,[zoom_factor, zoom_factor, 1],order=0)
p0_img_vis = zoom(p0_img,[zoom_factor, zoom_factor, 1],order=0)
p1_img_vis = zoom(p1_img,[zoom_factor, zoom_factor, 1],order=0)
return OrderedDict([('ref', ref_img_vis),
('p0', p0_img_vis),
('p1', p1_img_vis)])
def save(self, path, label):
self.save_network(self.net, path, '', label)
self.save_network(self.rankLoss.net, path, 'rank', label)
def update_learning_rate(self,nepoch_decay):
lrd = self.lr / nepoch_decay
lr = self.old_lr - lrd
for param_group in self.optimizer_net.param_groups:
param_group['lr'] = lr
print('update lr [%s] decay: %f -> %f' % (type,self.old_lr, lr))
self.old_lr = lr
def score_2afc_dataset(data_loader,func):
d0s = []
d1s = []
gts = []
for (i,data) in enumerate(data_loader.load_data()):
d0s+=func(data['ref'],data['p0']).tolist()
d1s+=func(data['ref'],data['p1']).tolist()
gts+=data['judge'].cpu().numpy().flatten().tolist()
d0s = np.array(d0s)
d1s = np.array(d1s)
gts = np.array(gts)
scores = (d0s<d1s)*(1.-gts) + (d1s<d0s)*gts + (d1s==d0s)*.5
return(np.mean(scores), dict(d0s=d0s,d1s=d1s,gts=gts,scores=scores))
def score_jnd_dataset(data_loader,func):
ds = []
gts = []
for (i,data) in enumerate(data_loader.load_data()):
ds+=func(data['p0'],data['p1']).tolist()
gts+=data['same'].cpu().numpy().flatten().tolist()
sames = np.array(gts)
ds = np.array(ds)
sorted_inds = np.argsort(ds)
ds_sorted = ds[sorted_inds]
sames_sorted = sames[sorted_inds]
TPs = np.cumsum(sames_sorted)
FPs = np.cumsum(1-sames_sorted)
FNs = np.sum(sames_sorted)-TPs
precs = TPs/(TPs+FPs)
recs = TPs/(TPs+FNs)
score = util.voc_ap(recs,precs)
return(score, dict(ds=ds,sames=sames))
| true | true |
1c49cf543130427f0a2e98a414b5ce3c3321b3df | 6,004 | py | Python | sdk/python/pulumi_azure_nextgen/azurestack/latest/customer_subscription.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/azurestack/latest/customer_subscription.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/azurestack/latest/customer_subscription.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['CustomerSubscription']
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:azurestack:CustomerSubscription'.""", DeprecationWarning)
class CustomerSubscription(pulumi.CustomResource):
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:azurestack:CustomerSubscription'.""", DeprecationWarning)
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
customer_subscription_name: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
registration_name: Optional[pulumi.Input[str]] = None,
resource_group: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Customer subscription.
Latest API Version: 2017-06-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] customer_subscription_name: Name of the product.
:param pulumi.Input[str] etag: The entity tag used for optimistic concurrency when modifying the resource.
:param pulumi.Input[str] registration_name: Name of the Azure Stack registration.
:param pulumi.Input[str] resource_group: Name of the resource group.
:param pulumi.Input[str] tenant_id: Tenant Id.
"""
pulumi.log.warn("CustomerSubscription is deprecated: The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:azurestack:CustomerSubscription'.")
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['customer_subscription_name'] = customer_subscription_name
__props__['etag'] = etag
if registration_name is None and not opts.urn:
raise TypeError("Missing required property 'registration_name'")
__props__['registration_name'] = registration_name
if resource_group is None and not opts.urn:
raise TypeError("Missing required property 'resource_group'")
__props__['resource_group'] = resource_group
__props__['tenant_id'] = tenant_id
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:azurestack:CustomerSubscription"), pulumi.Alias(type_="azure-nextgen:azurestack/v20170601:CustomerSubscription"), pulumi.Alias(type_="azure-nextgen:azurestack/v20200601preview:CustomerSubscription")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(CustomerSubscription, __self__).__init__(
'azure-nextgen:azurestack/latest:CustomerSubscription',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'CustomerSubscription':
"""
Get an existing CustomerSubscription resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return CustomerSubscription(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
The entity tag used for optimistic concurrency when modifying the resource.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Output[Optional[str]]:
"""
Tenant Id.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Type of Resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 44.474074 | 287 | 0.659893 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['CustomerSubscription']
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:azurestack:CustomerSubscription'.""", DeprecationWarning)
class CustomerSubscription(pulumi.CustomResource):
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:azurestack:CustomerSubscription'.""", DeprecationWarning)
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
customer_subscription_name: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
registration_name: Optional[pulumi.Input[str]] = None,
resource_group: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
pulumi.log.warn("CustomerSubscription is deprecated: The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:azurestack:CustomerSubscription'.")
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['customer_subscription_name'] = customer_subscription_name
__props__['etag'] = etag
if registration_name is None and not opts.urn:
raise TypeError("Missing required property 'registration_name'")
__props__['registration_name'] = registration_name
if resource_group is None and not opts.urn:
raise TypeError("Missing required property 'resource_group'")
__props__['resource_group'] = resource_group
__props__['tenant_id'] = tenant_id
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:azurestack:CustomerSubscription"), pulumi.Alias(type_="azure-nextgen:azurestack/v20170601:CustomerSubscription"), pulumi.Alias(type_="azure-nextgen:azurestack/v20200601preview:CustomerSubscription")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(CustomerSubscription, __self__).__init__(
'azure-nextgen:azurestack/latest:CustomerSubscription',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'CustomerSubscription':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return CustomerSubscription(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true | true |
1c49cfd95d576b090d11ac58239cd27ff7c71312 | 553 | py | Python | scripts/RunServer.py | ekg/shasta | e2fd3c3d79fb4cafe77c62f6af2fef46f7a04b01 | [
"BSD-3-Clause"
] | null | null | null | scripts/RunServer.py | ekg/shasta | e2fd3c3d79fb4cafe77c62f6af2fef46f7a04b01 | [
"BSD-3-Clause"
] | null | null | null | scripts/RunServer.py | ekg/shasta | e2fd3c3d79fb4cafe77c62f6af2fef46f7a04b01 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python3
import os
import shasta
import GetConfig
# Find the path to the docs directory.
thisScriptPath = os.path.realpath(__file__)
thisScriptDirectory = os.path.dirname(thisScriptPath)
thisScriptParentDirectory = os.path.dirname(thisScriptDirectory)
docsDirectory = thisScriptParentDirectory + '/docs'
# Read the config file.
config = GetConfig.getConfig()
# Initialize the assembler.
a = shasta.Assembler()
a.accessAllSoft()
a.setupConsensusCaller(config['Assembly']['consensusCaller'])
a.setDocsDirectory(docsDirectory)
a.explore()
| 22.12 | 64 | 0.793852 |
import os
import shasta
import GetConfig
thisScriptPath = os.path.realpath(__file__)
thisScriptDirectory = os.path.dirname(thisScriptPath)
thisScriptParentDirectory = os.path.dirname(thisScriptDirectory)
docsDirectory = thisScriptParentDirectory + '/docs'
config = GetConfig.getConfig()
a = shasta.Assembler()
a.accessAllSoft()
a.setupConsensusCaller(config['Assembly']['consensusCaller'])
a.setDocsDirectory(docsDirectory)
a.explore()
| true | true |
1c49d18f431832a15fe255123e31a03b0a805e27 | 5,159 | py | Python | settings.py | garyp/djwed | a3cecfa77f55574fecc05621a33d0cdd20a85fb2 | [
"MIT"
] | 1 | 2021-01-27T09:56:00.000Z | 2021-01-27T09:56:00.000Z | settings.py | garyp/djwed | a3cecfa77f55574fecc05621a33d0cdd20a85fb2 | [
"MIT"
] | null | null | null | settings.py | garyp/djwed | a3cecfa77f55574fecc05621a33d0cdd20a85fb2 | [
"MIT"
] | null | null | null | # Django settings for djwed project.
import logging
from socket import gethostname
production = False
# This is the production hostname (as I'd develop on a desktop and then run
# the system on a colo server.)
if 'my-colo-server' == gethostname().split(".")[0]:
production = True
if not production:
DEBUG = True
else:
DEBUG = False
TEMPLATE_DEBUG = DEBUG
logging.basicConfig(level=logging.INFO)
ADMINS = (
('Ben Bitdiddle', '[email protected]'),
)
MANAGERS = (
('Ben Bitdiddle', '[email protected]'),
('Alyssa P Hacker', '[email protected]'),
)
# Default From email address used by djwed when sending emails, with a display
# name as well
FROM_EMAIL = ('Alyssa & Ben', '[email protected]')
# Default From email address used by Django when emailing errors and
# notifications
DEFAULT_FROM_EMAIL = SERVER_EMAIL = FROM_EMAIL[1]
WEDDING_NAMES = 'Alyssa and Ben'
# By keeping the database in a sqlite3 file, I was able to check it into
# subversion and easily make copies to the staging environment.
# The performance of sqlite3 was just fine for a single-wedding environment
# as this was intended for.
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
# Path to base of code, as well to database file if using sqlite3.
# Change these file paths as appropriate.
if production:
WEDDING_BASE = '/www/wedding'
DATABASE_NAME = WEDDING_BASE + '/data/weddingdata.sqlite'
else:
WEDDING_BASE = '/u/media/project/wedding/website/djwed'
DATABASE_NAME = WEDDING_BASE + '/data/weddingdata-test.sqlite'
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
EMAIL_HOST = 'localhost'
if production:
SEND_EMAIL = True
EMAIL_PORT = 25
else:
SEND_EMAIL = False
EMAIL_PORT = 1025
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'US/Eastern'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Authentication backend classes (as strings) to use when attempting to authenticate a user.
AUTHENTICATION_BACKENDS = (
'djwed.wedding.auth.InviteeAuthBackend',
'django.contrib.auth.backends.ModelBackend'
)
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = WEDDING_BASE + '/media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
if production:
MEDIA_URL = 'http://wedding.example.org/media/'
else:
MEDIA_URL = 'http://localhost:8000/static/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/admin/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'CHANGE_ME_TO_SOME_OTHER_RANDOM_STRING!!'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
# Needs to be at the end
'django.contrib.csrf.middleware.CsrfMiddleware',
)
ROOT_URLCONF = 'djwed.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
WEDDING_BASE + '/templates/',
WEDDING_BASE + '/photologue/templates/photologue/templates/',
WEDDING_BASE + '/photologue/templates/',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
#"django.contrib.messages.context_processors.messages"
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'djwed.wedding',
'tagging',
'photologue',
'south',
)
| 32.043478 | 101 | 0.724559 |
import logging
from socket import gethostname
production = False
# the system on a colo server.)
if 'my-colo-server' == gethostname().split(".")[0]:
production = True
if not production:
DEBUG = True
else:
DEBUG = False
TEMPLATE_DEBUG = DEBUG
logging.basicConfig(level=logging.INFO)
ADMINS = (
('Ben Bitdiddle', '[email protected]'),
)
MANAGERS = (
('Ben Bitdiddle', '[email protected]'),
('Alyssa P Hacker', '[email protected]'),
)
# Default From email address used by djwed when sending emails, with a display
# name as well
FROM_EMAIL = ('Alyssa & Ben', '[email protected]')
# Default From email address used by Django when emailing errors and
# notifications
DEFAULT_FROM_EMAIL = SERVER_EMAIL = FROM_EMAIL[1]
WEDDING_NAMES = 'Alyssa and Ben'
# By keeping the database in a sqlite3 file, I was able to check it into
# subversion and easily make copies to the staging environment.
# The performance of sqlite3 was just fine for a single-wedding environment
# as this was intended for.
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
# Path to base of code, as well to database file if using sqlite3.
# Change these file paths as appropriate.
if production:
WEDDING_BASE = '/www/wedding'
DATABASE_NAME = WEDDING_BASE + '/data/weddingdata.sqlite'
else:
WEDDING_BASE = '/u/media/project/wedding/website/djwed'
DATABASE_NAME = WEDDING_BASE + '/data/weddingdata-test.sqlite'
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
EMAIL_HOST = 'localhost'
if production:
SEND_EMAIL = True
EMAIL_PORT = 25
else:
SEND_EMAIL = False
EMAIL_PORT = 1025
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'US/Eastern'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Authentication backend classes (as strings) to use when attempting to authenticate a user.
AUTHENTICATION_BACKENDS = (
'djwed.wedding.auth.InviteeAuthBackend',
'django.contrib.auth.backends.ModelBackend'
)
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = WEDDING_BASE + '/media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
if production:
MEDIA_URL = 'http://wedding.example.org/media/'
else:
MEDIA_URL = 'http://localhost:8000/static/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/admin/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'CHANGE_ME_TO_SOME_OTHER_RANDOM_STRING!!'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.csrf.middleware.CsrfMiddleware',
)
ROOT_URLCONF = 'djwed.urls'
TEMPLATE_DIRS = (
WEDDING_BASE + '/templates/',
WEDDING_BASE + '/photologue/templates/photologue/templates/',
WEDDING_BASE + '/photologue/templates/',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
#"django.contrib.messages.context_processors.messages"
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'djwed.wedding',
'tagging',
'photologue',
'south',
)
| true | true |
1c49d22b406a20082fc4ddf2a42ae03c8ebb4cf3 | 1,325 | py | Python | spanner/google/cloud/spanner.py | bomboradata/bombora-google-cloud-python | 255bbebe6c50490f40fcc3eed40bae1e77e03859 | [
"Apache-2.0"
] | null | null | null | spanner/google/cloud/spanner.py | bomboradata/bombora-google-cloud-python | 255bbebe6c50490f40fcc3eed40bae1e77e03859 | [
"Apache-2.0"
] | null | null | null | spanner/google/cloud/spanner.py | bomboradata/bombora-google-cloud-python | 255bbebe6c50490f40fcc3eed40bae1e77e03859 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cloud Spanner API package."""
from __future__ import absolute_import
from google.cloud.spanner_v1 import __version__
from google.cloud.spanner_v1 import AbstractSessionPool
from google.cloud.spanner_v1 import BurstyPool
from google.cloud.spanner_v1 import Client
from google.cloud.spanner_v1 import enums
from google.cloud.spanner_v1 import FixedSizePool
from google.cloud.spanner_v1 import KeyRange
from google.cloud.spanner_v1 import KeySet
from google.cloud.spanner_v1 import param_types
from google.cloud.spanner_v1 import types
__all__ = (
'__version__',
'AbstractSessionPool',
'BurstyPool',
'Client',
'enums',
'FixedSizePool',
'KeyRange',
'KeySet',
'param_types',
'types',
)
| 30.813953 | 74 | 0.767547 |
from __future__ import absolute_import
from google.cloud.spanner_v1 import __version__
from google.cloud.spanner_v1 import AbstractSessionPool
from google.cloud.spanner_v1 import BurstyPool
from google.cloud.spanner_v1 import Client
from google.cloud.spanner_v1 import enums
from google.cloud.spanner_v1 import FixedSizePool
from google.cloud.spanner_v1 import KeyRange
from google.cloud.spanner_v1 import KeySet
from google.cloud.spanner_v1 import param_types
from google.cloud.spanner_v1 import types
__all__ = (
'__version__',
'AbstractSessionPool',
'BurstyPool',
'Client',
'enums',
'FixedSizePool',
'KeyRange',
'KeySet',
'param_types',
'types',
)
| true | true |
1c49d2430b558f5439579e9257bf1022eef95d92 | 583 | py | Python | script.plexodus/service.py | MR-Unknown-Cm/addons | 8df1ebe58c95620bb02a05dbae7bf37954915cbd | [
"Apache-2.0"
] | 1 | 2020-03-03T10:01:21.000Z | 2020-03-03T10:01:21.000Z | script.plexodus/service.py | MR-Unknown-Cm/addons | 8df1ebe58c95620bb02a05dbae7bf37954915cbd | [
"Apache-2.0"
] | null | null | null | script.plexodus/service.py | MR-Unknown-Cm/addons | 8df1ebe58c95620bb02a05dbae7bf37954915cbd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import xbmc
import xbmcgui
import xbmcaddon
def main():
if xbmc.getInfoLabel('Window(10000).Property(script.plexodus.service.started)'):
# Prevent add-on updates from starting a new version of the addon
return
xbmcgui.Window(10000).setProperty('script.plexodus.service.started', '1')
if xbmcaddon.Addon().getSetting('kiosk.mode') == 'true':
xbmc.log('script.plexodus: Starting from service (Kiosk Mode)', xbmc.LOGNOTICE)
xbmc.executebuiltin('RunScript(script.plexodus)')
if __name__ == '__main__':
main()
| 27.761905 | 87 | 0.686106 | import xbmc
import xbmcgui
import xbmcaddon
def main():
if xbmc.getInfoLabel('Window(10000).Property(script.plexodus.service.started)'):
return
xbmcgui.Window(10000).setProperty('script.plexodus.service.started', '1')
if xbmcaddon.Addon().getSetting('kiosk.mode') == 'true':
xbmc.log('script.plexodus: Starting from service (Kiosk Mode)', xbmc.LOGNOTICE)
xbmc.executebuiltin('RunScript(script.plexodus)')
if __name__ == '__main__':
main()
| true | true |
1c49d2ea6b5f5d7265c638d9c946745c6efc2c2a | 1,696 | py | Python | examples/p3/Ball.py | djpeach/pygamer | 77a0cdab58bc29d06cc88c8cc823850794fe0bf0 | [
"MIT"
] | null | null | null | examples/p3/Ball.py | djpeach/pygamer | 77a0cdab58bc29d06cc88c8cc823850794fe0bf0 | [
"MIT"
] | null | null | null | examples/p3/Ball.py | djpeach/pygamer | 77a0cdab58bc29d06cc88c8cc823850794fe0bf0 | [
"MIT"
] | null | null | null | import pygame
import pygamer
import time
class Ball(pygamer.Object):
def __init__(self, speed, color, radius, center_position):
rect = pygame.rect.Rect(center_position[0] - radius, center_position[1] - radius, radius * 2, radius * 2)
super().__init__(rect, speed)
self.color = color
self.radius = radius
self.diameter = radius * 2
self.initital_position = center_position
self.scored = False
def check_bounds(self, screen):
x, y = self.speed
if self.left < screen.left:
self.rect = pygame.rect.Rect(0, self.top, self.width, self.height)
self.speed = (0, 0)
self.scored = True
elif self.right > screen.right:
self.rect = pygame.rect.Rect(screen.right - self.width, self.top, self.width, self.height)
self.speed = (0, 0)
self.scored = True
elif self.top < screen.top:
self.rect = pygame.rect.Rect(self.left, 0, self.width, self.height)
self.speed = (x, -y)
elif self.bottom > screen.bottom:
self.rect = pygame.rect.Rect(self.left, screen.bottom - self.height, self.width, self.height)
self.speed = (x, -y)
def check_paddle_collisions(self, paddle, reset_x):
x, y = self.speed
if paddle.rect.colliderect(self):
if abs(x) < 15:
x *= 1.3
self.rect = pygame.rect.Rect(reset_x, self.top, self.width, self.height)
self.speed = (-x, y)
def draw(self, surface_to_draw_on):
pygame.draw.circle(surface_to_draw_on, self.color, (self.rect.x + self.radius, self.rect.y + self.radius), self.radius)
| 39.44186 | 127 | 0.598467 | import pygame
import pygamer
import time
class Ball(pygamer.Object):
def __init__(self, speed, color, radius, center_position):
rect = pygame.rect.Rect(center_position[0] - radius, center_position[1] - radius, radius * 2, radius * 2)
super().__init__(rect, speed)
self.color = color
self.radius = radius
self.diameter = radius * 2
self.initital_position = center_position
self.scored = False
def check_bounds(self, screen):
x, y = self.speed
if self.left < screen.left:
self.rect = pygame.rect.Rect(0, self.top, self.width, self.height)
self.speed = (0, 0)
self.scored = True
elif self.right > screen.right:
self.rect = pygame.rect.Rect(screen.right - self.width, self.top, self.width, self.height)
self.speed = (0, 0)
self.scored = True
elif self.top < screen.top:
self.rect = pygame.rect.Rect(self.left, 0, self.width, self.height)
self.speed = (x, -y)
elif self.bottom > screen.bottom:
self.rect = pygame.rect.Rect(self.left, screen.bottom - self.height, self.width, self.height)
self.speed = (x, -y)
def check_paddle_collisions(self, paddle, reset_x):
x, y = self.speed
if paddle.rect.colliderect(self):
if abs(x) < 15:
x *= 1.3
self.rect = pygame.rect.Rect(reset_x, self.top, self.width, self.height)
self.speed = (-x, y)
def draw(self, surface_to_draw_on):
pygame.draw.circle(surface_to_draw_on, self.color, (self.rect.x + self.radius, self.rect.y + self.radius), self.radius)
| true | true |
1c49d35a318a1a44c92df86786fbdadfd07a7a15 | 196 | py | Python | detectron/lib/python3.6/site-packages/torchvision/version.py | JustinBear99/Mask_RCNN | d43eaf7c6ebf29d4d6da796a0f7ff5561e21acff | [
"Apache-2.0"
] | null | null | null | detectron/lib/python3.6/site-packages/torchvision/version.py | JustinBear99/Mask_RCNN | d43eaf7c6ebf29d4d6da796a0f7ff5561e21acff | [
"Apache-2.0"
] | null | null | null | detectron/lib/python3.6/site-packages/torchvision/version.py | JustinBear99/Mask_RCNN | d43eaf7c6ebf29d4d6da796a0f7ff5561e21acff | [
"Apache-2.0"
] | null | null | null | __version__ = '0.6.1'
git_version = '35d732ac53aebbed917993523d685b4cb09ef6ea'
from torchvision.extension import _check_cuda_version
if _check_cuda_version() > 0:
cuda = _check_cuda_version()
| 32.666667 | 56 | 0.811224 | __version__ = '0.6.1'
git_version = '35d732ac53aebbed917993523d685b4cb09ef6ea'
from torchvision.extension import _check_cuda_version
if _check_cuda_version() > 0:
cuda = _check_cuda_version()
| true | true |
1c49d3ed3d448eb8beffb819f40560927ff5b27b | 12,269 | py | Python | testtools/tests/test_run.py | sparkiegeek/testtools | f86658ac18521db4254e7292c4a4dda6017d930e | [
"MIT"
] | null | null | null | testtools/tests/test_run.py | sparkiegeek/testtools | f86658ac18521db4254e7292c4a4dda6017d930e | [
"MIT"
] | null | null | null | testtools/tests/test_run.py | sparkiegeek/testtools | f86658ac18521db4254e7292c4a4dda6017d930e | [
"MIT"
] | null | null | null | # Copyright (c) 2010 testtools developers. See LICENSE for details.
"""Tests for the test runner logic."""
import doctest
import io
from unittest import TestSuite
import sys
from textwrap import dedent
from extras import try_import
fixtures = try_import('fixtures')
testresources = try_import('testresources')
import unittest
import testtools
from testtools import TestCase, run, skipUnless
from testtools.compat import (
_b,
)
from testtools.matchers import (
Contains,
DocTestMatches,
MatchesRegex,
)
if fixtures:
class SampleTestFixture(fixtures.Fixture):
"""Creates testtools.runexample temporarily."""
def __init__(self, broken=False):
"""Create a SampleTestFixture.
:param broken: If True, the sample file will not be importable.
"""
if not broken:
init_contents = _b("""\
from testtools import TestCase
class TestFoo(TestCase):
def test_bar(self):
pass
def test_quux(self):
pass
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
""")
else:
init_contents = b"class not in\n"
self.package = fixtures.PythonPackage(
'runexample', [('__init__.py', init_contents)])
def setUp(self):
super().setUp()
self.useFixture(self.package)
testtools.__path__.append(self.package.base)
self.addCleanup(testtools.__path__.remove, self.package.base)
self.addCleanup(sys.modules.pop, 'testtools.runexample', None)
if fixtures and testresources:
class SampleResourcedFixture(fixtures.Fixture):
"""Creates a test suite that uses testresources."""
def __init__(self):
super().__init__()
self.package = fixtures.PythonPackage(
'resourceexample', [('__init__.py', _b("""
from fixtures import Fixture
from testresources import (
FixtureResource,
OptimisingTestSuite,
ResourcedTestCase,
)
from testtools import TestCase
class Printer(Fixture):
def setUp(self):
super(Printer, self).setUp()
print('Setting up Printer')
def reset(self):
pass
class TestFoo(TestCase, ResourcedTestCase):
# When run, this will print just one Setting up Printer, unless the
# OptimisingTestSuite is not honoured, when one per test case will print.
resources=[('res', FixtureResource(Printer()))]
def test_bar(self):
pass
def test_foo(self):
pass
def test_quux(self):
pass
def test_suite():
from unittest import TestLoader
return OptimisingTestSuite(TestLoader().loadTestsFromName(__name__))
"""))])
def setUp(self):
super().setUp()
self.useFixture(self.package)
self.addCleanup(testtools.__path__.remove, self.package.base)
testtools.__path__.append(self.package.base)
if fixtures:
class SampleLoadTestsPackage(fixtures.Fixture):
"""Creates a test suite package using load_tests."""
def __init__(self):
super().__init__()
self.package = fixtures.PythonPackage(
'discoverexample', [('__init__.py', _b("""
from testtools import TestCase, clone_test_with_new_id
class TestExample(TestCase):
def test_foo(self):
pass
def load_tests(loader, tests, pattern):
tests.addTest(clone_test_with_new_id(tests._tests[1]._tests[0], "fred"))
return tests
"""))])
def setUp(self):
super().setUp()
self.useFixture(self.package)
self.addCleanup(sys.path.remove, self.package.base)
class TestRun(TestCase):
def setUp(self):
super().setUp()
if fixtures is None:
self.skipTest("Need fixtures")
def test_run_custom_list(self):
self.useFixture(SampleTestFixture())
tests = []
class CaptureList(run.TestToolsTestRunner):
def list(self, test):
tests.append({case.id() for case
in testtools.testsuite.iterate_tests(test)})
out = io.StringIO()
try:
program = run.TestProgram(
argv=['prog', '-l', 'testtools.runexample.test_suite'],
stdout=out, testRunner=CaptureList)
except SystemExit:
exc_info = sys.exc_info()
raise AssertionError("-l tried to exit. %r" % exc_info[1])
self.assertEqual([{'testtools.runexample.TestFoo.test_bar',
'testtools.runexample.TestFoo.test_quux'}], tests)
def test_run_list_with_loader(self):
# list() is attempted with a loader first.
self.useFixture(SampleTestFixture())
tests = []
class CaptureList(run.TestToolsTestRunner):
def list(self, test, loader=None):
tests.append({case.id() for case
in testtools.testsuite.iterate_tests(test)})
tests.append(loader)
out = io.StringIO()
try:
program = run.TestProgram(
argv=['prog', '-l', 'testtools.runexample.test_suite'],
stdout=out, testRunner=CaptureList)
except SystemExit:
exc_info = sys.exc_info()
raise AssertionError("-l tried to exit. %r" % exc_info[1])
self.assertEqual([{'testtools.runexample.TestFoo.test_bar',
'testtools.runexample.TestFoo.test_quux'}, program.testLoader],
tests)
def test_run_list(self):
self.useFixture(SampleTestFixture())
out = io.StringIO()
try:
run.main(['prog', '-l', 'testtools.runexample.test_suite'], out)
except SystemExit:
exc_info = sys.exc_info()
raise AssertionError("-l tried to exit. %r" % exc_info[1])
self.assertEqual("""testtools.runexample.TestFoo.test_bar
testtools.runexample.TestFoo.test_quux
""", out.getvalue())
def test_run_list_failed_import(self):
broken = self.useFixture(SampleTestFixture(broken=True))
out = io.StringIO()
# XXX: http://bugs.python.org/issue22811
unittest.defaultTestLoader._top_level_dir = None
exc = self.assertRaises(
SystemExit,
run.main, ['prog', 'discover', '-l', broken.package.base, '*.py'], out)
self.assertEqual(2, exc.args[0])
self.assertThat(out.getvalue(), DocTestMatches("""\
unittest.loader._FailedTest.runexample
Failed to import test module: runexample
Traceback (most recent call last):
File ".../loader.py", line ..., in _find_test_path
package = self._get_module_from_name(name)
File ".../loader.py", line ..., in _get_module_from_name
__import__(name)
File ".../runexample/__init__.py", line 1
class not in
...^...
SyntaxError: invalid syntax
""", doctest.ELLIPSIS))
def test_run_orders_tests(self):
self.useFixture(SampleTestFixture())
out = io.StringIO()
# We load two tests - one that exists and one that doesn't, and we
# should get the one that exists and neither the one that doesn't nor
# the unmentioned one that does.
tempdir = self.useFixture(fixtures.TempDir())
tempname = tempdir.path + '/tests.list'
f = open(tempname, 'wb')
try:
f.write(_b("""
testtools.runexample.TestFoo.test_bar
testtools.runexample.missingtest
"""))
finally:
f.close()
try:
run.main(['prog', '-l', '--load-list', tempname,
'testtools.runexample.test_suite'], out)
except SystemExit:
exc_info = sys.exc_info()
raise AssertionError(
"-l --load-list tried to exit. %r" % exc_info[1])
self.assertEqual("""testtools.runexample.TestFoo.test_bar
""", out.getvalue())
def test_run_load_list(self):
self.useFixture(SampleTestFixture())
out = io.StringIO()
# We load two tests - one that exists and one that doesn't, and we
# should get the one that exists and neither the one that doesn't nor
# the unmentioned one that does.
tempdir = self.useFixture(fixtures.TempDir())
tempname = tempdir.path + '/tests.list'
f = open(tempname, 'wb')
try:
f.write(_b("""
testtools.runexample.TestFoo.test_bar
testtools.runexample.missingtest
"""))
finally:
f.close()
try:
run.main(['prog', '-l', '--load-list', tempname,
'testtools.runexample.test_suite'], out)
except SystemExit:
exc_info = sys.exc_info()
raise AssertionError(
"-l --load-list tried to exit. %r" % exc_info[1])
self.assertEqual("""testtools.runexample.TestFoo.test_bar
""", out.getvalue())
def test_load_list_preserves_custom_suites(self):
if testresources is None:
self.skipTest("Need testresources")
self.useFixture(SampleResourcedFixture())
# We load two tests, not loading one. Both share a resource, so we
# should see just one resource setup occur.
tempdir = self.useFixture(fixtures.TempDir())
tempname = tempdir.path + '/tests.list'
f = open(tempname, 'wb')
try:
f.write(_b("""
testtools.resourceexample.TestFoo.test_bar
testtools.resourceexample.TestFoo.test_foo
"""))
finally:
f.close()
stdout = self.useFixture(fixtures.StringStream('stdout'))
with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
try:
run.main(['prog', '--load-list', tempname,
'testtools.resourceexample.test_suite'], stdout.stream)
except SystemExit:
# Evil resides in TestProgram.
pass
out = stdout.getDetails()['stdout'].as_text()
self.assertEqual(1, out.count('Setting up Printer'), "%r" % out)
def test_run_failfast(self):
stdout = self.useFixture(fixtures.StringStream('stdout'))
class Failing(TestCase):
def test_a(self):
self.fail('a')
def test_b(self):
self.fail('b')
with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
runner = run.TestToolsTestRunner(failfast=True)
runner.run(TestSuite([Failing('test_a'), Failing('test_b')]))
self.assertThat(
stdout.getDetails()['stdout'].as_text(), Contains('Ran 1 test'))
def test_run_locals(self):
stdout = self.useFixture(fixtures.StringStream('stdout'))
class Failing(TestCase):
def test_a(self):
a = 1
self.fail('a')
runner = run.TestToolsTestRunner(tb_locals=True, stdout=stdout.stream)
runner.run(Failing('test_a'))
self.assertThat(
stdout.getDetails()['stdout'].as_text(), Contains('a = 1'))
def test_stdout_honoured(self):
self.useFixture(SampleTestFixture())
tests = []
out = io.StringIO()
exc = self.assertRaises(SystemExit, run.main,
argv=['prog', 'testtools.runexample.test_suite'],
stdout=out)
self.assertEqual((0,), exc.args)
self.assertThat(
out.getvalue(),
MatchesRegex("""Tests running...
Ran 2 tests in \\d.\\d\\d\\ds
OK
"""))
@skipUnless(fixtures, "fixtures not present")
def test_issue_16662(self):
# unittest's discover implementation didn't handle load_tests on
# packages. That is fixed pending commit, but we want to offer it
# to all testtools users regardless of Python version.
# See http://bugs.python.org/issue16662
pkg = self.useFixture(SampleLoadTestsPackage())
out = io.StringIO()
# XXX: http://bugs.python.org/issue22811
unittest.defaultTestLoader._top_level_dir = None
self.assertEqual(None, run.main(
['prog', 'discover', '-l', pkg.package.base], out))
self.assertEqual(dedent("""\
discoverexample.TestExample.test_foo
fred
"""), out.getvalue())
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
| 34.175487 | 83 | 0.615454 |
import doctest
import io
from unittest import TestSuite
import sys
from textwrap import dedent
from extras import try_import
fixtures = try_import('fixtures')
testresources = try_import('testresources')
import unittest
import testtools
from testtools import TestCase, run, skipUnless
from testtools.compat import (
_b,
)
from testtools.matchers import (
Contains,
DocTestMatches,
MatchesRegex,
)
if fixtures:
class SampleTestFixture(fixtures.Fixture):
def __init__(self, broken=False):
if not broken:
init_contents = _b("""\
from testtools import TestCase
class TestFoo(TestCase):
def test_bar(self):
pass
def test_quux(self):
pass
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
""")
else:
init_contents = b"class not in\n"
self.package = fixtures.PythonPackage(
'runexample', [('__init__.py', init_contents)])
def setUp(self):
super().setUp()
self.useFixture(self.package)
testtools.__path__.append(self.package.base)
self.addCleanup(testtools.__path__.remove, self.package.base)
self.addCleanup(sys.modules.pop, 'testtools.runexample', None)
if fixtures and testresources:
class SampleResourcedFixture(fixtures.Fixture):
def __init__(self):
super().__init__()
self.package = fixtures.PythonPackage(
'resourceexample', [('__init__.py', _b("""
from fixtures import Fixture
from testresources import (
FixtureResource,
OptimisingTestSuite,
ResourcedTestCase,
)
from testtools import TestCase
class Printer(Fixture):
def setUp(self):
super(Printer, self).setUp()
print('Setting up Printer')
def reset(self):
pass
class TestFoo(TestCase, ResourcedTestCase):
# When run, this will print just one Setting up Printer, unless the
# OptimisingTestSuite is not honoured, when one per test case will print.
resources=[('res', FixtureResource(Printer()))]
def test_bar(self):
pass
def test_foo(self):
pass
def test_quux(self):
pass
def test_suite():
from unittest import TestLoader
return OptimisingTestSuite(TestLoader().loadTestsFromName(__name__))
"""))])
def setUp(self):
super().setUp()
self.useFixture(self.package)
self.addCleanup(testtools.__path__.remove, self.package.base)
testtools.__path__.append(self.package.base)
if fixtures:
class SampleLoadTestsPackage(fixtures.Fixture):
def __init__(self):
super().__init__()
self.package = fixtures.PythonPackage(
'discoverexample', [('__init__.py', _b("""
from testtools import TestCase, clone_test_with_new_id
class TestExample(TestCase):
def test_foo(self):
pass
def load_tests(loader, tests, pattern):
tests.addTest(clone_test_with_new_id(tests._tests[1]._tests[0], "fred"))
return tests
"""))])
def setUp(self):
super().setUp()
self.useFixture(self.package)
self.addCleanup(sys.path.remove, self.package.base)
class TestRun(TestCase):
def setUp(self):
super().setUp()
if fixtures is None:
self.skipTest("Need fixtures")
def test_run_custom_list(self):
self.useFixture(SampleTestFixture())
tests = []
class CaptureList(run.TestToolsTestRunner):
def list(self, test):
tests.append({case.id() for case
in testtools.testsuite.iterate_tests(test)})
out = io.StringIO()
try:
program = run.TestProgram(
argv=['prog', '-l', 'testtools.runexample.test_suite'],
stdout=out, testRunner=CaptureList)
except SystemExit:
exc_info = sys.exc_info()
raise AssertionError("-l tried to exit. %r" % exc_info[1])
self.assertEqual([{'testtools.runexample.TestFoo.test_bar',
'testtools.runexample.TestFoo.test_quux'}], tests)
def test_run_list_with_loader(self):
self.useFixture(SampleTestFixture())
tests = []
class CaptureList(run.TestToolsTestRunner):
def list(self, test, loader=None):
tests.append({case.id() for case
in testtools.testsuite.iterate_tests(test)})
tests.append(loader)
out = io.StringIO()
try:
program = run.TestProgram(
argv=['prog', '-l', 'testtools.runexample.test_suite'],
stdout=out, testRunner=CaptureList)
except SystemExit:
exc_info = sys.exc_info()
raise AssertionError("-l tried to exit. %r" % exc_info[1])
self.assertEqual([{'testtools.runexample.TestFoo.test_bar',
'testtools.runexample.TestFoo.test_quux'}, program.testLoader],
tests)
def test_run_list(self):
self.useFixture(SampleTestFixture())
out = io.StringIO()
try:
run.main(['prog', '-l', 'testtools.runexample.test_suite'], out)
except SystemExit:
exc_info = sys.exc_info()
raise AssertionError("-l tried to exit. %r" % exc_info[1])
self.assertEqual("""testtools.runexample.TestFoo.test_bar
testtools.runexample.TestFoo.test_quux
""", out.getvalue())
def test_run_list_failed_import(self):
broken = self.useFixture(SampleTestFixture(broken=True))
out = io.StringIO()
unittest.defaultTestLoader._top_level_dir = None
exc = self.assertRaises(
SystemExit,
run.main, ['prog', 'discover', '-l', broken.package.base, '*.py'], out)
self.assertEqual(2, exc.args[0])
self.assertThat(out.getvalue(), DocTestMatches("""\
unittest.loader._FailedTest.runexample
Failed to import test module: runexample
Traceback (most recent call last):
File ".../loader.py", line ..., in _find_test_path
package = self._get_module_from_name(name)
File ".../loader.py", line ..., in _get_module_from_name
__import__(name)
File ".../runexample/__init__.py", line 1
class not in
...^...
SyntaxError: invalid syntax
""", doctest.ELLIPSIS))
def test_run_orders_tests(self):
self.useFixture(SampleTestFixture())
out = io.StringIO()
# should get the one that exists and neither the one that doesn't nor
tempdir = self.useFixture(fixtures.TempDir())
tempname = tempdir.path + '/tests.list'
f = open(tempname, 'wb')
try:
f.write(_b("""
testtools.runexample.TestFoo.test_bar
testtools.runexample.missingtest
"""))
finally:
f.close()
try:
run.main(['prog', '-l', '--load-list', tempname,
'testtools.runexample.test_suite'], out)
except SystemExit:
exc_info = sys.exc_info()
raise AssertionError(
"-l --load-list tried to exit. %r" % exc_info[1])
self.assertEqual("""testtools.runexample.TestFoo.test_bar
""", out.getvalue())
def test_run_load_list(self):
self.useFixture(SampleTestFixture())
out = io.StringIO()
# should get the one that exists and neither the one that doesn't nor
tempdir = self.useFixture(fixtures.TempDir())
tempname = tempdir.path + '/tests.list'
f = open(tempname, 'wb')
try:
f.write(_b("""
testtools.runexample.TestFoo.test_bar
testtools.runexample.missingtest
"""))
finally:
f.close()
try:
run.main(['prog', '-l', '--load-list', tempname,
'testtools.runexample.test_suite'], out)
except SystemExit:
exc_info = sys.exc_info()
raise AssertionError(
"-l --load-list tried to exit. %r" % exc_info[1])
self.assertEqual("""testtools.runexample.TestFoo.test_bar
""", out.getvalue())
def test_load_list_preserves_custom_suites(self):
if testresources is None:
self.skipTest("Need testresources")
self.useFixture(SampleResourcedFixture())
tempdir = self.useFixture(fixtures.TempDir())
tempname = tempdir.path + '/tests.list'
f = open(tempname, 'wb')
try:
f.write(_b("""
testtools.resourceexample.TestFoo.test_bar
testtools.resourceexample.TestFoo.test_foo
"""))
finally:
f.close()
stdout = self.useFixture(fixtures.StringStream('stdout'))
with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
try:
run.main(['prog', '--load-list', tempname,
'testtools.resourceexample.test_suite'], stdout.stream)
except SystemExit:
pass
out = stdout.getDetails()['stdout'].as_text()
self.assertEqual(1, out.count('Setting up Printer'), "%r" % out)
def test_run_failfast(self):
stdout = self.useFixture(fixtures.StringStream('stdout'))
class Failing(TestCase):
def test_a(self):
self.fail('a')
def test_b(self):
self.fail('b')
with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
runner = run.TestToolsTestRunner(failfast=True)
runner.run(TestSuite([Failing('test_a'), Failing('test_b')]))
self.assertThat(
stdout.getDetails()['stdout'].as_text(), Contains('Ran 1 test'))
def test_run_locals(self):
stdout = self.useFixture(fixtures.StringStream('stdout'))
class Failing(TestCase):
def test_a(self):
a = 1
self.fail('a')
runner = run.TestToolsTestRunner(tb_locals=True, stdout=stdout.stream)
runner.run(Failing('test_a'))
self.assertThat(
stdout.getDetails()['stdout'].as_text(), Contains('a = 1'))
def test_stdout_honoured(self):
self.useFixture(SampleTestFixture())
tests = []
out = io.StringIO()
exc = self.assertRaises(SystemExit, run.main,
argv=['prog', 'testtools.runexample.test_suite'],
stdout=out)
self.assertEqual((0,), exc.args)
self.assertThat(
out.getvalue(),
MatchesRegex("""Tests running...
Ran 2 tests in \\d.\\d\\d\\ds
OK
"""))
@skipUnless(fixtures, "fixtures not present")
def test_issue_16662(self):
pkg = self.useFixture(SampleLoadTestsPackage())
out = io.StringIO()
unittest.defaultTestLoader._top_level_dir = None
self.assertEqual(None, run.main(
['prog', 'discover', '-l', pkg.package.base], out))
self.assertEqual(dedent("""\
discoverexample.TestExample.test_foo
fred
"""), out.getvalue())
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
| true | true |
1c49d5612454adebb23817577e69371c43f1abab | 17,792 | py | Python | coap/coapTransmitter.py | TimothyClaeys/coap | 02487f06980c5c434fcf7efc0f04a97a081e1f13 | [
"BSD-3-Clause"
] | 53 | 2015-03-04T19:41:29.000Z | 2021-09-27T18:39:52.000Z | coap/coapTransmitter.py | TimothyClaeys/coap | 02487f06980c5c434fcf7efc0f04a97a081e1f13 | [
"BSD-3-Clause"
] | 7 | 2016-05-18T15:49:43.000Z | 2019-06-12T15:06:30.000Z | coap/coapTransmitter.py | TimothyClaeys/coap | 02487f06980c5c434fcf7efc0f04a97a081e1f13 | [
"BSD-3-Clause"
] | 57 | 2015-01-07T08:54:54.000Z | 2021-09-27T18:39:55.000Z | import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('coapTransmitter')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import time
import threading
import random
from . import coapDefines as d
from . import coapException as e
from . import coapUtils as u
from . import coapMessage as m
class coapTransmitter(threading.Thread):
'''
\brief A class which takes care of transmitting a CoAP message.
It handles:
- waiting for an app-level reply, and
- waiting for a transport-level ACK in case of a confirmable messages.
The thread is ephemeral: it is created for each transmission, and becomes
inactive when the transmission is completed, or times out.
'''
# states of the finite state machine this class implements
STATE_INIT = 'INIT'
STATE_TXCON = 'TXCON'
STATE_TXNON = 'TXNON'
STATE_WAITFORACK = 'WAITFORACK'
STATE_ACKRX = 'ACKRX'
STATE_WAITFOREXPIRATIONMID = 'WAITFOREXPIRATIONMID'
STATE_WAITFORRESP = 'WAITFORRESP'
STATE_RESPRX = 'RESPRX'
STATE_TXACK = 'TXACK'
STATE_ALL = [
STATE_INIT,
STATE_TXCON,
STATE_TXNON,
STATE_WAITFORACK,
STATE_WAITFOREXPIRATIONMID,
STATE_WAITFORRESP,
STATE_TXACK,
]
def __init__(self,sendFunc,srcIp,srcPort,destIp,destPort,confirmable,messageId,code,token,options,payload,securityContext,requestSeq,ackTimeout,respTimeout,maxRetransmit):
'''
\brief Initilizer function.
This function initializes this instance by recording everything about
the CoAP message to be exchange with the remote endpoint. It does not,
however, initiate the exchange, which is done by calling the transmit()
method.
\paran[in] sendFunc The function to call to send a CoAP message.
\param[in] srcIp The IP address of the local endpoint, a string of the
form 'aaaa::1'.
\param[in] srcport The UDP port the local endpoint is attached to, an
integer between 0x0000 and 0xffff.
\param[in] destIp The IP address of the remote CoAP endpoint, a
string of the form 'aaaa::1'.
\param[in] destPort The UDP port the remote endpoint is attached to, an
integer between 0x0000 and 0xffff.
\param[in] confirmable A boolean indicating whether the CoAP request is
to be send confirmable (True) or non-confirmable (False).
\param[in] messageId The message ID to be used for the CoAP request, an
integer. The caller of this function needs to enforce unicity rules
for the value passed.
\param[in] code The CoAP method to used in the request. Needs to a
value of METHOD_ALL.
\param[in] token The token to be used for this exchange. The caller
of this function needs to enforce unicity rules for the value
passed.
\param[in] options A list of CoAP options. Each element needs to be
an instance of the coapOption class. Note that this class will add
appropriate CoAP options to encore the URI and query, if needed.
\param[in] payload The payload to pass in the CoAP request. This needs
to be a byte list, i.e. a list of intergers between 0x00 and 0xff.
This function does not parse this payload, which is written as-is
in the CoAP request.
\param[in] securityContext Security context used for protection of the request
\param[in] requestSeq OSCORE's sequence number from the request.
\param[in] ackTimeout The ACK timeout.
\param[in] respTimeout The app-level response timeout.
'''
# log
log.debug('creating instance')
# store params
self.sendFunc = sendFunc
self.srcIp = srcIp
self.srcPort = srcPort
self.destIp = destIp
self.destPort = destPort
self.confirmable = confirmable
self.messageId = messageId
self.code = code
self.token = token
self.options = options
self.payload = payload
self.securityContext = securityContext
self.requestSeq = requestSeq
self.maxRetransmit = maxRetransmit
# local variables
self.dataLock = threading.Lock() # lock access to internal state
self.fsmSem = threading.Lock() # trigger an FSM iteration
self.startLock = threading.Lock() # released to start communicating
self.endLock = threading.Lock() # released when done communicating
self.stateLock = threading.RLock() # busy setting or getting FSM state
self.rxMsgEvent = threading.Event()
self.receivedACK = None
self.receivedResp = None
self.coapResponse = None
self.coapError = None
self.state = self.STATE_INIT # current state of the FSM
self.numTxCON = 0
self.ackTimeout = ackTimeout
self.respTimeout = respTimeout
self.fsmGoOn = True
self.fsmAction = {
self.STATE_INIT: self._action_INIT,
self.STATE_TXCON: self._action_TXCON,
self.STATE_TXNON: self._action_TXNON,
self.STATE_WAITFORACK: self._action_WAITFORACK,
self.STATE_ACKRX: self._action_ACKRX,
self.STATE_WAITFOREXPIRATIONMID: self._action_WAITFOREXPIRATIONMID,
self.STATE_WAITFORRESP: self._action_WAITFORRESP,
self.STATE_RESPRX: self._action_RESPRX,
self.STATE_TXACK: self._action_TXACK,
}
# initialize parent
threading.Thread.__init__(self)
# give this thread a name
self.name = '[{0}]:{1}--m0x{2:x},0x{3:x}-->[{4}]:{5}'.format(
self.srcIp,
self.srcPort,
self.messageId,
self.token,
self.destIp,
self.destPort,
)
# by default, I'm not communicating
self.startLock.acquire()
self.endLock.acquire()
# start the thread's execution
self.start()
#======================== public ==========================================
def transmit(self):
'''
\brief Start the interaction with the destination, including waiting
for transport-level ACK (if needed), waiting for an app-level
response, and ACKing that (if needed)
This function blocks until a response is received, or the interaction
times out.
\raise coapTimeout When either no ACK is received in time (for
confirmable requests), or no application-level response is received.
\return The received response, already parsed.
'''
# log
log.debug('transmit()')
# start the thread's execution
self.startLock.release()
# wait for it to be done
self.endLock.acquire()
# raise an exception if went wrong, or return response
with self.dataLock:
if self.coapError:
assert not self.coapResponse
raise self.coapError #pylint: disable=E0702
if self.coapResponse:
assert not self.coapError
return self.coapResponse
raise SystemError('neither an error, nor a response')
def getState(self):
with self.stateLock:
return self.state
def receiveMessage(self, timestamp, srcIp, srcPort, message):
assert srcIp==self.destIp
assert srcPort==self.destPort
assert (message['token']==self.token) or (message['messageId']==self.messageId)
# log
log.debug('receiveMessage timestamp={0} srcIp={1} srcPort={2} message={3}'.format(timestamp,srcIp,srcPort,message))
# turn message into exception if needed
if message['code'] not in d.METHOD_ALL+d.COAP_RC_ALL_SUCCESS:
message = e.coapRcFactory(message['code'])
# store packet
with self.dataLock:
self.LastRxPacket = (timestamp,srcIp,srcPort,message)
# signal reception
self.rxMsgEvent.set()
#======================= private ==========================================
#===== fsm
def run(self):
try:
# wait for transmit() to be called
self.startLock.acquire()
# log
log.debug('start FSM')
while self.fsmGoOn:
# wait for the FSM to be kicked
self.fsmSem.acquire()
# log
log.debug('fsm state iteration: {0}'.format(self.getState()))
# call the appropriate action
self.fsmAction[self.getState()]()
# is interaction done?
with self.dataLock:
if self.coapError or self.coapResponse:
self.endLock.release()
self.fsmGoOn=False
except Exception as err:
log.critical(u.formatCrashMessage(
threadName = self.name,
error = err
)
)
def _action_INIT(self):
# log
log.debug('_action_INIT()')
# set state according to confirmable
if self.confirmable:
self._setState(self.STATE_TXCON)
else:
self._setState(self.STATE_TXNON)
# kick FSM
self._kickFsm()
def _action_TXCON(self):
# log
log.debug('_action_TXCON()')
# flag error if max number of CON transmits reached
if self.numTxCON>self.maxRetransmit+1:
# this is an error case
self.coapError = e.coapTimeout('No ACK received after {0} tries (max {1})'.format(
self.numTxCON,
self.maxRetransmit+1,
)
)
return
# build message
message = m.buildMessage(
msgtype = d.TYPE_CON,
token = self.token,
code = self.code,
messageId = self.messageId,
options = self.options,
payload = self.payload,
securityContext = self.securityContext,
partialIV = self.requestSeq,
)
# send
self.sendFunc(
destIp = self.destIp,
destPort = self.destPort,
msg = message,
)
# increment number of transmitted messages
self.numTxCON += 1
# update FSM state
self._setState(self.STATE_WAITFORACK)
# kick FSM
self._kickFsm()
def _action_TXNON(self):
# log
log.debug('_action_TXNON()')
# build message
message = m.buildMessage(
msgtype = d.TYPE_NON,
token = self.token,
code = self.code,
messageId = self.messageId,
options = self.options,
payload = self.payload,
securityContext = self.securityContext,
partialIV = self.requestSeq,
)
# send
self.sendFunc(
destIp = self.destIp,
destPort = self.destPort,
msg = message,
)
# update FSM state
self._setState(self.STATE_WAITFORRESP)
# kick FSM
self._kickFsm()
def _action_WAITFORACK(self):
# log
log.debug('_action_WAITFORACK()')
startTime = time.time()
ackMaxWait = self.ackTimeout*random.uniform(1, d.DFLT_ACK_RANDOM_FACTOR)
while True:
waitTimeLeft = startTime+ackMaxWait-time.time()
if self.rxMsgEvent.wait(timeout=waitTimeLeft):
# I got message
with self.dataLock:
(timestamp,srcIp,srcPort,message) = self.LastRxPacket
if isinstance(message,e.coapRc):
with self.dataLock:
self.coapError = message
return
elif (
message['type']==d.TYPE_ACK and
message['messageId']==self.messageId
):
# store ACK
with self.dataLock:
self.receivedACK = (timestamp,srcIp,srcPort,message)
# update FSM state
self._setState(self.STATE_ACKRX)
# kick FSM
self._kickFsm()
return
else:
# re-send
# update FSM state
self._setState(self.STATE_TXCON)
# kick FSM
self._kickFsm()
return
def _action_ACKRX(self):
# log
log.debug('_action_ACKRX()')
with self.dataLock:
assert self.receivedACK
(timestamp,srcIp,srcPort,message) = self.receivedACK
if message['code']==d.COAP_RC_NONE:
# response NOT piggybacked
# update FSM state
self._setState(self.STATE_WAITFORRESP)
# kick FSM
self._kickFsm()
else:
# piggybacked response
# successful end of FSM
with self.dataLock:
self.coapResponse = message
def _action_WAITFOREXPIRATIONMID(self):
# log
log.debug('_action_WAITFOREXPIRATIONMID()')
raise NotImplementedError()
def _action_WAITFORRESP(self):
# log
log.debug('_action_WAITFORRESP()')
startTime = time.time()
while True:
waitTimeLeft = startTime+self.respTimeout-time.time()
if self.rxMsgEvent.wait(timeout=waitTimeLeft):
# I got message
with self.dataLock:
(timestamp,srcIp,srcPort,message) = self.LastRxPacket
if isinstance(message,e.coapRc):
with self.dataLock:
self.coapError = message
return
elif (
(
message['type']==d.TYPE_CON or
message['type']==d.TYPE_NON
) and
message['token']==self.token
):
# store response
with self.dataLock:
self.receivedResp = (timestamp,srcIp,srcPort,message)
# update FSM state
self._setState(self.STATE_RESPRX)
# kick FSM
self._kickFsm()
return
else:
# this is an error case
self.coapError = e.coapTimeout('No Response received after {0}s'.format(
self.respTimeout,
)
)
return
def _action_RESPRX(self):
# log
log.debug('_action_RESPRX()')
with self.dataLock:
(timestamp,srcIp,srcPort,message) = self.receivedResp
# decide whether to ACK response
if message['type']==d.TYPE_CON:
self._setState(self.STATE_TXACK)
elif message['type']==d.TYPE_NON:
# successful end of FSM
with self.dataLock:
self.coapResponse = message
else:
raise SystemError('unexpected message type {0}'.format(message['type']))
# kick FSM
self._kickFsm()
def _action_TXACK(self):
# log
log.debug('_action_TXACK()')
with self.dataLock:
(timestamp,srcIp,srcPort,message) = self.receivedResp
# build ACK
message = m.buildMessage(
msgtype = d.TYPE_ACK,
token = None,
code = d.COAP_RC_NONE,
messageId = message['messageId'],
)
# send
self.sendFunc(
destIp = message['srcId'], #pylint: disable=E1126
destPort = message['srcPort'], #pylint: disable=E1126
msg = message,
)
# successful end of FSM
with self.dataLock:
self.coapResponse = message
# kick FSM
self._kickFsm()
#===== helpers
def _kickFsm(self):
self.fsmSem.release()
def _setState(self,newState):
with self.stateLock:
self.state = newState
log.debug('{0}: state={1}'.format(self.name,newState))
| 34.48062 | 176 | 0.519054 | import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('coapTransmitter')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import time
import threading
import random
from . import coapDefines as d
from . import coapException as e
from . import coapUtils as u
from . import coapMessage as m
class coapTransmitter(threading.Thread):
STATE_INIT = 'INIT'
STATE_TXCON = 'TXCON'
STATE_TXNON = 'TXNON'
STATE_WAITFORACK = 'WAITFORACK'
STATE_ACKRX = 'ACKRX'
STATE_WAITFOREXPIRATIONMID = 'WAITFOREXPIRATIONMID'
STATE_WAITFORRESP = 'WAITFORRESP'
STATE_RESPRX = 'RESPRX'
STATE_TXACK = 'TXACK'
STATE_ALL = [
STATE_INIT,
STATE_TXCON,
STATE_TXNON,
STATE_WAITFORACK,
STATE_WAITFOREXPIRATIONMID,
STATE_WAITFORRESP,
STATE_TXACK,
]
def __init__(self,sendFunc,srcIp,srcPort,destIp,destPort,confirmable,messageId,code,token,options,payload,securityContext,requestSeq,ackTimeout,respTimeout,maxRetransmit):
log.debug('creating instance')
self.sendFunc = sendFunc
self.srcIp = srcIp
self.srcPort = srcPort
self.destIp = destIp
self.destPort = destPort
self.confirmable = confirmable
self.messageId = messageId
self.code = code
self.token = token
self.options = options
self.payload = payload
self.securityContext = securityContext
self.requestSeq = requestSeq
self.maxRetransmit = maxRetransmit
self.dataLock = threading.Lock() self.fsmSem = threading.Lock() self.startLock = threading.Lock() self.endLock = threading.Lock() self.stateLock = threading.RLock() self.rxMsgEvent = threading.Event()
self.receivedACK = None
self.receivedResp = None
self.coapResponse = None
self.coapError = None
self.state = self.STATE_INIT self.numTxCON = 0
self.ackTimeout = ackTimeout
self.respTimeout = respTimeout
self.fsmGoOn = True
self.fsmAction = {
self.STATE_INIT: self._action_INIT,
self.STATE_TXCON: self._action_TXCON,
self.STATE_TXNON: self._action_TXNON,
self.STATE_WAITFORACK: self._action_WAITFORACK,
self.STATE_ACKRX: self._action_ACKRX,
self.STATE_WAITFOREXPIRATIONMID: self._action_WAITFOREXPIRATIONMID,
self.STATE_WAITFORRESP: self._action_WAITFORRESP,
self.STATE_RESPRX: self._action_RESPRX,
self.STATE_TXACK: self._action_TXACK,
}
threading.Thread.__init__(self)
self.name = '[{0}]:{1}--m0x{2:x},0x{3:x}-->[{4}]:{5}'.format(
self.srcIp,
self.srcPort,
self.messageId,
self.token,
self.destIp,
self.destPort,
)
self.startLock.acquire()
self.endLock.acquire()
# start the thread's execution
self.start()
def transmit(self):
log.debug('transmit()')
self.startLock.release()
# wait for it to be done
self.endLock.acquire()
# raise an exception if went wrong, or return response
with self.dataLock:
if self.coapError:
assert not self.coapResponse
raise self.coapError #pylint: disable=E0702
if self.coapResponse:
assert not self.coapError
return self.coapResponse
raise SystemError('neither an error, nor a response')
def getState(self):
with self.stateLock:
return self.state
def receiveMessage(self, timestamp, srcIp, srcPort, message):
assert srcIp==self.destIp
assert srcPort==self.destPort
assert (message['token']==self.token) or (message['messageId']==self.messageId)
# log
log.debug('receiveMessage timestamp={0} srcIp={1} srcPort={2} message={3}'.format(timestamp,srcIp,srcPort,message))
# turn message into exception if needed
if message['code'] not in d.METHOD_ALL+d.COAP_RC_ALL_SUCCESS:
message = e.coapRcFactory(message['code'])
# store packet
with self.dataLock:
self.LastRxPacket = (timestamp,srcIp,srcPort,message)
# signal reception
self.rxMsgEvent.set()
#======================= private ==========================================
#===== fsm
def run(self):
try:
# wait for transmit() to be called
self.startLock.acquire()
# log
log.debug('start FSM')
while self.fsmGoOn:
# wait for the FSM to be kicked
self.fsmSem.acquire()
# log
log.debug('fsm state iteration: {0}'.format(self.getState()))
# call the appropriate action
self.fsmAction[self.getState()]()
# is interaction done?
with self.dataLock:
if self.coapError or self.coapResponse:
self.endLock.release()
self.fsmGoOn=False
except Exception as err:
log.critical(u.formatCrashMessage(
threadName = self.name,
error = err
)
)
def _action_INIT(self):
# log
log.debug('_action_INIT()')
# set state according to confirmable
if self.confirmable:
self._setState(self.STATE_TXCON)
else:
self._setState(self.STATE_TXNON)
# kick FSM
self._kickFsm()
def _action_TXCON(self):
# log
log.debug('_action_TXCON()')
# flag error if max number of CON transmits reached
if self.numTxCON>self.maxRetransmit+1:
# this is an error case
self.coapError = e.coapTimeout('No ACK received after {0} tries (max {1})'.format(
self.numTxCON,
self.maxRetransmit+1,
)
)
return
# build message
message = m.buildMessage(
msgtype = d.TYPE_CON,
token = self.token,
code = self.code,
messageId = self.messageId,
options = self.options,
payload = self.payload,
securityContext = self.securityContext,
partialIV = self.requestSeq,
)
# send
self.sendFunc(
destIp = self.destIp,
destPort = self.destPort,
msg = message,
)
# increment number of transmitted messages
self.numTxCON += 1
# update FSM state
self._setState(self.STATE_WAITFORACK)
# kick FSM
self._kickFsm()
def _action_TXNON(self):
# log
log.debug('_action_TXNON()')
# build message
message = m.buildMessage(
msgtype = d.TYPE_NON,
token = self.token,
code = self.code,
messageId = self.messageId,
options = self.options,
payload = self.payload,
securityContext = self.securityContext,
partialIV = self.requestSeq,
)
# send
self.sendFunc(
destIp = self.destIp,
destPort = self.destPort,
msg = message,
)
# update FSM state
self._setState(self.STATE_WAITFORRESP)
# kick FSM
self._kickFsm()
def _action_WAITFORACK(self):
# log
log.debug('_action_WAITFORACK()')
startTime = time.time()
ackMaxWait = self.ackTimeout*random.uniform(1, d.DFLT_ACK_RANDOM_FACTOR)
while True:
waitTimeLeft = startTime+ackMaxWait-time.time()
if self.rxMsgEvent.wait(timeout=waitTimeLeft):
# I got message
with self.dataLock:
(timestamp,srcIp,srcPort,message) = self.LastRxPacket
if isinstance(message,e.coapRc):
with self.dataLock:
self.coapError = message
return
elif (
message['type']==d.TYPE_ACK and
message['messageId']==self.messageId
):
# store ACK
with self.dataLock:
self.receivedACK = (timestamp,srcIp,srcPort,message)
# update FSM state
self._setState(self.STATE_ACKRX)
# kick FSM
self._kickFsm()
return
else:
# re-send
# update FSM state
self._setState(self.STATE_TXCON)
# kick FSM
self._kickFsm()
return
def _action_ACKRX(self):
# log
log.debug('_action_ACKRX()')
with self.dataLock:
assert self.receivedACK
(timestamp,srcIp,srcPort,message) = self.receivedACK
if message['code']==d.COAP_RC_NONE:
# response NOT piggybacked
# update FSM state
self._setState(self.STATE_WAITFORRESP)
# kick FSM
self._kickFsm()
else:
# piggybacked response
# successful end of FSM
with self.dataLock:
self.coapResponse = message
def _action_WAITFOREXPIRATIONMID(self):
# log
log.debug('_action_WAITFOREXPIRATIONMID()')
raise NotImplementedError()
def _action_WAITFORRESP(self):
# log
log.debug('_action_WAITFORRESP()')
startTime = time.time()
while True:
waitTimeLeft = startTime+self.respTimeout-time.time()
if self.rxMsgEvent.wait(timeout=waitTimeLeft):
# I got message
with self.dataLock:
(timestamp,srcIp,srcPort,message) = self.LastRxPacket
if isinstance(message,e.coapRc):
with self.dataLock:
self.coapError = message
return
elif (
(
message['type']==d.TYPE_CON or
message['type']==d.TYPE_NON
) and
message['token']==self.token
):
# store response
with self.dataLock:
self.receivedResp = (timestamp,srcIp,srcPort,message)
# update FSM state
self._setState(self.STATE_RESPRX)
# kick FSM
self._kickFsm()
return
else:
# this is an error case
self.coapError = e.coapTimeout('No Response received after {0}s'.format(
self.respTimeout,
)
)
return
def _action_RESPRX(self):
# log
log.debug('_action_RESPRX()')
with self.dataLock:
(timestamp,srcIp,srcPort,message) = self.receivedResp
# decide whether to ACK response
if message['type']==d.TYPE_CON:
self._setState(self.STATE_TXACK)
elif message['type']==d.TYPE_NON:
# successful end of FSM
with self.dataLock:
self.coapResponse = message
else:
raise SystemError('unexpected message type {0}'.format(message['type']))
# kick FSM
self._kickFsm()
def _action_TXACK(self):
# log
log.debug('_action_TXACK()')
with self.dataLock:
(timestamp,srcIp,srcPort,message) = self.receivedResp
# build ACK
message = m.buildMessage(
msgtype = d.TYPE_ACK,
token = None,
code = d.COAP_RC_NONE,
messageId = message['messageId'],
)
# send
self.sendFunc(
destIp = message['srcId'], #pylint: disable=E1126
destPort = message['srcPort'], #pylint: disable=E1126
msg = message,
)
# successful end of FSM
with self.dataLock:
self.coapResponse = message
# kick FSM
self._kickFsm()
#===== helpers
def _kickFsm(self):
self.fsmSem.release()
def _setState(self,newState):
with self.stateLock:
self.state = newState
log.debug('{0}: state={1}'.format(self.name,newState))
| true | true |
1c49d5f237a286b2bb5c13f14cb28491327c2343 | 3,576 | py | Python | funboost/factories/publisher_factotry.py | DJMIN/funboost | 7570ca2909bb0b44a1080f5f98aa96c86d3da9d4 | [
"Apache-2.0"
] | null | null | null | funboost/factories/publisher_factotry.py | DJMIN/funboost | 7570ca2909bb0b44a1080f5f98aa96c86d3da9d4 | [
"Apache-2.0"
] | null | null | null | funboost/factories/publisher_factotry.py | DJMIN/funboost | 7570ca2909bb0b44a1080f5f98aa96c86d3da9d4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# @Author : ydf
# @Time : 2019/8/8 0008 13:16
import copy
from typing import Callable
from funboost.publishers.confluent_kafka_publisher import ConfluentKafkaPublisher
from funboost.publishers.http_publisher import HTTPPublisher
from funboost.publishers.kombu_publisher import KombuPublisher
from funboost.publishers.nats_publisher import NatsPublisher
from funboost.publishers.redis_publisher_lpush import RedisPublisherLpush
from funboost.publishers.tcp_publisher import TCPPublisher
from funboost.publishers.txt_file_publisher import TxtFilePublisher
from funboost.publishers.udp_publisher import UDPPublisher
from funboost.publishers.zeromq_publisher import ZeroMqPublisher
from funboost.publishers.kafka_publisher import KafkaPublisher
from funboost.publishers.local_python_queue_publisher import LocalPythonQueuePublisher
from funboost.publishers.mongomq_publisher import MongoMqPublisher
from funboost.publishers.nsq_publisher import NsqPublisher
from funboost.publishers.persist_queue_publisher import PersistQueuePublisher
from funboost.publishers.rabbitmq_amqpstorm_publisher import RabbitmqPublisherUsingAmqpStorm
from funboost.publishers.rabbitmq_pika_publisher import RabbitmqPublisher
from funboost.publishers.rabbitmq_rabbitpy_publisher import RabbitmqPublisherUsingRabbitpy
from funboost.publishers.redis_publisher import RedisPublisher
from funboost.publishers.rocketmq_publisher import RocketmqPublisher
from funboost.publishers.sqla_queue_publisher import SqlachemyQueuePublisher
from funboost.publishers.redis_stream_publisher import RedisStreamPublisher
from funboost.publishers.mqtt_publisher import MqttPublisher
from funboost.publishers.httpsqs_publisher import HttpsqsPublisher
from funboost import funboost_config_deafult
def get_publisher(queue_name, *, log_level_int=10, logger_prefix='', is_add_file_handler=True,
clear_queue_within_init=False, is_add_publish_time=True, consuming_function: Callable = None,
broker_kind: int = None):
"""
:param queue_name:
:param log_level_int:
:param logger_prefix:
:param is_add_file_handler:
:param clear_queue_within_init:
:param is_add_publish_time:是否添加发布时间,以后废弃,都添加。
:param consuming_function:消费函数,为了做发布时候的函数入参校验用的,如果不传则不做发布任务的校验,
例如add 函数接收x,y入参,你推送{"x":1,"z":3}就是不正确的,函数不接受z参数。
:param broker_kind: 中间件或使用包的种类。
:return:
"""
all_kwargs = copy.deepcopy(locals())
all_kwargs.pop('broker_kind')
broker_kind__publisher_type_map = {
0: RabbitmqPublisherUsingAmqpStorm,
1: RabbitmqPublisherUsingRabbitpy,
2: RedisPublisher,
3: LocalPythonQueuePublisher,
4: RabbitmqPublisher,
5: MongoMqPublisher,
6: PersistQueuePublisher,
7: NsqPublisher,
8: KafkaPublisher,
9: RedisPublisher,
10: SqlachemyQueuePublisher,
11: RocketmqPublisher,
12: RedisStreamPublisher,
13: ZeroMqPublisher,
14: RedisPublisherLpush,
15: KombuPublisher,
16: ConfluentKafkaPublisher,
17: MqttPublisher,
18: HttpsqsPublisher,
21: UDPPublisher,
22: TCPPublisher,
23: HTTPPublisher,
24: NatsPublisher,
25: TxtFilePublisher,
}
if broker_kind is None:
broker_kind = funboost_config_deafult.DEFAULT_BROKER_KIND
if broker_kind not in broker_kind__publisher_type_map:
raise ValueError(f'设置的中间件种类数字不正确,你设置的值是 {broker_kind} ')
return broker_kind__publisher_type_map[broker_kind](**all_kwargs)
| 43.609756 | 111 | 0.782438 | import copy
from typing import Callable
from funboost.publishers.confluent_kafka_publisher import ConfluentKafkaPublisher
from funboost.publishers.http_publisher import HTTPPublisher
from funboost.publishers.kombu_publisher import KombuPublisher
from funboost.publishers.nats_publisher import NatsPublisher
from funboost.publishers.redis_publisher_lpush import RedisPublisherLpush
from funboost.publishers.tcp_publisher import TCPPublisher
from funboost.publishers.txt_file_publisher import TxtFilePublisher
from funboost.publishers.udp_publisher import UDPPublisher
from funboost.publishers.zeromq_publisher import ZeroMqPublisher
from funboost.publishers.kafka_publisher import KafkaPublisher
from funboost.publishers.local_python_queue_publisher import LocalPythonQueuePublisher
from funboost.publishers.mongomq_publisher import MongoMqPublisher
from funboost.publishers.nsq_publisher import NsqPublisher
from funboost.publishers.persist_queue_publisher import PersistQueuePublisher
from funboost.publishers.rabbitmq_amqpstorm_publisher import RabbitmqPublisherUsingAmqpStorm
from funboost.publishers.rabbitmq_pika_publisher import RabbitmqPublisher
from funboost.publishers.rabbitmq_rabbitpy_publisher import RabbitmqPublisherUsingRabbitpy
from funboost.publishers.redis_publisher import RedisPublisher
from funboost.publishers.rocketmq_publisher import RocketmqPublisher
from funboost.publishers.sqla_queue_publisher import SqlachemyQueuePublisher
from funboost.publishers.redis_stream_publisher import RedisStreamPublisher
from funboost.publishers.mqtt_publisher import MqttPublisher
from funboost.publishers.httpsqs_publisher import HttpsqsPublisher
from funboost import funboost_config_deafult
def get_publisher(queue_name, *, log_level_int=10, logger_prefix='', is_add_file_handler=True,
clear_queue_within_init=False, is_add_publish_time=True, consuming_function: Callable = None,
broker_kind: int = None):
all_kwargs = copy.deepcopy(locals())
all_kwargs.pop('broker_kind')
broker_kind__publisher_type_map = {
0: RabbitmqPublisherUsingAmqpStorm,
1: RabbitmqPublisherUsingRabbitpy,
2: RedisPublisher,
3: LocalPythonQueuePublisher,
4: RabbitmqPublisher,
5: MongoMqPublisher,
6: PersistQueuePublisher,
7: NsqPublisher,
8: KafkaPublisher,
9: RedisPublisher,
10: SqlachemyQueuePublisher,
11: RocketmqPublisher,
12: RedisStreamPublisher,
13: ZeroMqPublisher,
14: RedisPublisherLpush,
15: KombuPublisher,
16: ConfluentKafkaPublisher,
17: MqttPublisher,
18: HttpsqsPublisher,
21: UDPPublisher,
22: TCPPublisher,
23: HTTPPublisher,
24: NatsPublisher,
25: TxtFilePublisher,
}
if broker_kind is None:
broker_kind = funboost_config_deafult.DEFAULT_BROKER_KIND
if broker_kind not in broker_kind__publisher_type_map:
raise ValueError(f'设置的中间件种类数字不正确,你设置的值是 {broker_kind} ')
return broker_kind__publisher_type_map[broker_kind](**all_kwargs)
| true | true |
1c49d63275e5e8dd3611c5fba177eb143551df99 | 15,890 | py | Python | docs/source/conf.py | crusaderky/distributed | d1cf1d452aece30b75adaf7f73f7cfdc69a63c4a | [
"BSD-3-Clause"
] | 1,358 | 2016-02-09T21:25:27.000Z | 2022-03-30T08:06:36.000Z | docs/source/conf.py | crusaderky/distributed | d1cf1d452aece30b75adaf7f73f7cfdc69a63c4a | [
"BSD-3-Clause"
] | 4,789 | 2016-02-10T00:13:43.000Z | 2022-03-31T23:56:27.000Z | docs/source/conf.py | crusaderky/distributed | d1cf1d452aece30b75adaf7f73f7cfdc69a63c4a | [
"BSD-3-Clause"
] | 791 | 2016-02-19T04:34:38.000Z | 2022-03-31T16:26:38.000Z | from __future__ import annotations
#
# Dask.distributed documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 6 14:42:44 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.todo",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.autosummary",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"numpydoc",
"sphinx_click.ext",
]
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Dask.distributed"
copyright = "2016, Anaconda, Inc."
author = "Anaconda, Inc."
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import distributed
version = distributed.__version__
# The full version, including alpha/beta/rc tags.
release = distributed.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns: list[str] = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "default"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
html_theme = "dask_sphinx_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path: list[str] = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "distributeddoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements: dict[str, str] = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"distributed.tex",
"Dask.distributed Documentation",
"Matthew Rocklin",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "Dask.distributed", "Dask.distributed Documentation", [author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"Dask.distributed",
"Dask.distributed Documentation",
author,
"Dask.distributed",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
# Link to GitHub issues and pull requests using :pr:`1234` and :issue:`1234`
# syntax
extlinks = {
"issue": ("https://github.com/dask/distributed/issues/%s", "GH#"),
"pr": ("https://github.com/dask/distributed/pull/%s", "GH#"),
}
# Configuration for intersphinx: refer to the Python standard library
# and the Numpy documentation.
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"numpy": ("https://docs.scipy.org/doc/numpy", None),
"pandas": ("https://pandas.pydata.org/docs", None),
"dask": ("https://docs.dask.org/en/latest", None),
"bokeh": ("https://docs.bokeh.org/en/latest", None),
}
# Redirects
# https://tech.signavio.com/2017/managing-sphinx-redirects
redirect_files = [
# old html, new html
("joblib.html", "https://ml.dask.org/joblib.html"),
("setup.html", "https://docs.dask.org/en/latest/setup.html"),
("ec2.html", "https://docs.dask.org/en/latest/setup/cloud.html"),
("configuration.html", "https://docs.dask.org/en/latest/configuration.html"),
(
"local-cluster.html",
"https://docs.dask.org/en/latest/setup/single-distributed.html",
),
("adaptive.html", "https://docs.dask.org/en/latest/setup/adaptive.html"),
("prometheus.html", "https://docs.dask.org/en/latest/setup/prometheus.html"),
("web.html", "https://docs.dask.org/en/latest/diagnostics-distributed.html"),
]
redirect_template = """\
<html>
<head>
<meta http-equiv="refresh" content="1; url={new}" />
<script>
window.location.href = "{new}"
</script>
</head>
</html>
"""
def copy_legacy_redirects(app, docname):
if app.builder.name == "html":
for html_src_path, new in redirect_files:
page = redirect_template.format(new=new)
target_path = app.outdir + "/" + html_src_path
with open(target_path, "w") as f:
f.write(page)
from docutils.parsers.rst import directives # type: ignore
# -- Configuration to keep autosummary in sync with autoclass::members ----------------------------------------------
# Fixes issues/3693
# See https://stackoverflow.com/questions/20569011/python-sphinx-autosummary-automated-listing-of-member-functions
from sphinx.ext.autosummary import Autosummary, get_documenter
from sphinx.util.inspect import safe_getattr
class AutoAutoSummary(Autosummary):
"""Create a summary for methods and attributes (autosummary).
See https://stackoverflow.com/questions/20569011/python-sphinx-autosummary-automated-listing-of-member-functions
"""
option_spec = {
"methods": directives.unchanged,
"attributes": directives.unchanged,
}
required_arguments = 1
@staticmethod
def get_members(app, obj, typ, include_public=None):
if not include_public:
include_public = []
items = []
for name in sorted(obj.__dict__.keys()):
try:
documenter = get_documenter(app, safe_getattr(obj, name), obj)
except AttributeError:
continue
if documenter.objtype in typ:
items.append(name)
public = [x for x in items if x in include_public or not x.startswith("_")]
return public, items
def run(self):
clazz = str(self.arguments[0])
(module_name, class_name) = clazz.rsplit(".", 1)
m = __import__(module_name, globals(), locals(), [class_name])
c = getattr(m, class_name)
app = self.state.document.settings.env.app
if "methods" in self.options:
_, methods = self.get_members(app, c, ["method"], ["__init__"])
self.content = [
f"{class_name}.{method}"
for method in methods
if not method.startswith("_")
]
if "attributes" in self.options:
_, attribs = self.get_members(app, c, ["attribute", "property"])
self.content = [
f"~{clazz}.{attrib}" for attrib in attribs if not attrib.startswith("_")
]
return super().run()
def setup(app):
app.add_directive("autoautosummary", AutoAutoSummary)
app.connect("build-finished", copy_legacy_redirects)
| 32.428571 | 117 | 0.685903 | from __future__ import annotations
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.todo",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.autosummary",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"numpydoc",
"sphinx_click.ext",
]
numpydoc_show_class_members = False
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
project = "Dask.distributed"
copyright = "2016, Anaconda, Inc."
author = "Anaconda, Inc."
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import distributed
version = distributed.__version__
# The full version, including alpha/beta/rc tags.
release = distributed.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns: list[str] = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "default"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
html_theme = "dask_sphinx_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path: list[str] = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "distributeddoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements: dict[str, str] = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"distributed.tex",
"Dask.distributed Documentation",
"Matthew Rocklin",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "Dask.distributed", "Dask.distributed Documentation", [author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"Dask.distributed",
"Dask.distributed Documentation",
author,
"Dask.distributed",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
epub_exclude_files = ["search.html"]
extlinks = {
"issue": ("https://github.com/dask/distributed/issues/%s", "GH#"),
"pr": ("https://github.com/dask/distributed/pull/%s", "GH#"),
}
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"numpy": ("https://docs.scipy.org/doc/numpy", None),
"pandas": ("https://pandas.pydata.org/docs", None),
"dask": ("https://docs.dask.org/en/latest", None),
"bokeh": ("https://docs.bokeh.org/en/latest", None),
}
redirect_files = [
("joblib.html", "https://ml.dask.org/joblib.html"),
("setup.html", "https://docs.dask.org/en/latest/setup.html"),
("ec2.html", "https://docs.dask.org/en/latest/setup/cloud.html"),
("configuration.html", "https://docs.dask.org/en/latest/configuration.html"),
(
"local-cluster.html",
"https://docs.dask.org/en/latest/setup/single-distributed.html",
),
("adaptive.html", "https://docs.dask.org/en/latest/setup/adaptive.html"),
("prometheus.html", "https://docs.dask.org/en/latest/setup/prometheus.html"),
("web.html", "https://docs.dask.org/en/latest/diagnostics-distributed.html"),
]
redirect_template = """\
<html>
<head>
<meta http-equiv="refresh" content="1; url={new}" />
<script>
window.location.href = "{new}"
</script>
</head>
</html>
"""
def copy_legacy_redirects(app, docname):
if app.builder.name == "html":
for html_src_path, new in redirect_files:
page = redirect_template.format(new=new)
target_path = app.outdir + "/" + html_src_path
with open(target_path, "w") as f:
f.write(page)
from docutils.parsers.rst import directives
from sphinx.ext.autosummary import Autosummary, get_documenter
from sphinx.util.inspect import safe_getattr
class AutoAutoSummary(Autosummary):
option_spec = {
"methods": directives.unchanged,
"attributes": directives.unchanged,
}
required_arguments = 1
@staticmethod
def get_members(app, obj, typ, include_public=None):
if not include_public:
include_public = []
items = []
for name in sorted(obj.__dict__.keys()):
try:
documenter = get_documenter(app, safe_getattr(obj, name), obj)
except AttributeError:
continue
if documenter.objtype in typ:
items.append(name)
public = [x for x in items if x in include_public or not x.startswith("_")]
return public, items
def run(self):
clazz = str(self.arguments[0])
(module_name, class_name) = clazz.rsplit(".", 1)
m = __import__(module_name, globals(), locals(), [class_name])
c = getattr(m, class_name)
app = self.state.document.settings.env.app
if "methods" in self.options:
_, methods = self.get_members(app, c, ["method"], ["__init__"])
self.content = [
f"{class_name}.{method}"
for method in methods
if not method.startswith("_")
]
if "attributes" in self.options:
_, attribs = self.get_members(app, c, ["attribute", "property"])
self.content = [
f"~{clazz}.{attrib}" for attrib in attribs if not attrib.startswith("_")
]
return super().run()
def setup(app):
app.add_directive("autoautosummary", AutoAutoSummary)
app.connect("build-finished", copy_legacy_redirects)
| true | true |
1c49d6af950d52fdab2916b0e3bb2e50b50c6d22 | 6,893 | py | Python | python/paddle/incubate/operators/graph_reindex.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | 11 | 2016-08-29T07:43:26.000Z | 2016-08-29T07:51:24.000Z | python/paddle/incubate/operators/graph_reindex.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | null | null | null | python/paddle/incubate/operators/graph_reindex.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | 1 | 2021-12-09T08:59:17.000Z | 2021-12-09T08:59:17.000Z | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.framework import _non_static_mode
from paddle.fluid.data_feeder import check_variable_and_dtype
from paddle.fluid import core
from paddle import _C_ops
def graph_reindex(x,
neighbors,
count,
value_buffer=None,
index_buffer=None,
flag_buffer_hashtable=False,
name=None):
"""
Graph Reindex API.
This API is mainly used in Graph Learning domain, which should be used
in conjunction with `graph_sample_neighbors` API. And the main purpose
is to reindex the ids information of the input nodes, and return the
corresponding graph edges after reindex.
**Notes**:
The number in x should be unique, otherwise it would cause potential errors.
Besides, we also support multi-edge-types neighbors reindexing. If we have different
edge_type neighbors for x, we should concatenate all the neighbors and count of x.
We will reindex all the nodes from 0.
Take input nodes x = [0, 1, 2] as an example.
If we have neighbors = [8, 9, 0, 4, 7, 6, 7], and count = [2, 3, 2],
then we know that the neighbors of 0 is [8, 9], the neighbors of 1
is [0, 4, 7], and the neighbors of 2 is [6, 7].
Args:
x (Tensor): The input nodes which we sample neighbors for. The available
data type is int32, int64.
neighbors (Tensor): The neighbors of the input nodes `x`. The data type
should be the same with `x`.
count (Tensor): The neighbor count of the input nodes `x`. And the
data type should be int32.
value_buffer (Tensor|None): Value buffer for hashtable. The data type should
be int32, and should be filled with -1.
index_buffer (Tensor|None): Index buffer for hashtable. The data type should
be int32, and should be filled with -1.
flag_buffer_hashtable (bool): Whether to use buffer for hashtable to speed up.
Default is False. Only useful for gpu version currently.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
reindex_src (Tensor): The source node index of graph edges after reindex.
reindex_dst (Tensor): The destination node index of graph edges after reindex.
out_nodes (Tensor): The index of unique input nodes and neighbors before reindex,
where we put the input nodes `x` in the front, and put neighbor
nodes in the back.
Examples:
.. code-block:: python
import paddle
x = [0, 1, 2]
neighbors_e1 = [8, 9, 0, 4, 7, 6, 7]
count_e1 = [2, 3, 2]
x = paddle.to_tensor(x, dtype="int64")
neighbors_e1 = paddle.to_tensor(neighbors_e1, dtype="int64")
count_e1 = paddle.to_tensor(count_e1, dtype="int32")
reindex_src, reindex_dst, out_nodes = \
paddle.incubate.graph_reindex(x, neighbors_e1, count_e1)
# reindex_src: [3, 4, 0, 5, 6, 7, 6]
# reindex_dst: [0, 0, 1, 1, 1, 2, 2]
# out_nodes: [0, 1, 2, 8, 9, 4, 7, 6]
neighbors_e2 = [0, 2, 3, 5, 1]
count_e2 = [1, 3, 1]
neighbors_e2 = paddle.to_tensor(neighbors_e2, dtype="int64")
count_e2 = paddle.to_tensor(count_e2, dtype="int32")
neighbors = paddle.concat([neighbors_e1, neighbors_e2])
count = paddle.concat([count_e1, count_e2])
reindex_src, reindex_dst, out_nodes = \
paddle.incubate.graph_reindex(x, neighbors, count)
# reindex_src: [3, 4, 0, 5, 6, 7, 6, 0, 2, 8, 9, 1]
# reindex_dst: [0, 0, 1, 1, 1, 2, 2, 0, 1, 1, 1, 2]
# out_nodes: [0, 1, 2, 8, 9, 4, 7, 6, 3, 5]
"""
if flag_buffer_hashtable:
if value_buffer is None or index_buffer is None:
raise ValueError(f"`value_buffer` and `index_buffer` should not"
"be None if `flag_buffer_hashtable` is True.")
if _non_static_mode():
reindex_src, reindex_dst, out_nodes = \
_C_ops.graph_reindex(x, neighbors, count, value_buffer, index_buffer,
"flag_buffer_hashtable", flag_buffer_hashtable)
return reindex_src, reindex_dst, out_nodes
check_variable_and_dtype(x, "X", ("int32", "int64"), "graph_reindex")
check_variable_and_dtype(neighbors, "Neighbors", ("int32", "int64"),
"graph_reindex")
check_variable_and_dtype(count, "Count", ("int32"), "graph_reindex")
if flag_buffer_hashtable:
check_variable_and_dtype(value_buffer, "HashTable_Value", ("int32"),
"graph_reindex")
check_variable_and_dtype(index_buffer, "HashTable_Value", ("int32"),
"graph_reindex")
helper = LayerHelper("graph_reindex", **locals())
reindex_src = helper.create_variable_for_type_inference(dtype=x.dtype)
reindex_dst = helper.create_variable_for_type_inference(dtype=x.dtype)
out_nodes = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="graph_reindex",
inputs={
"X":
x,
"Neighbors":
neighbors,
"Count":
count,
"HashTable_Value":
value_buffer if flag_buffer_hashtable else None,
"HashTable_Index":
index_buffer if flag_buffer_hashtable else None,
},
outputs={
"Reindex_Src": reindex_src,
"Reindex_Dst": reindex_dst,
"Out_Nodes": out_nodes
},
attrs={"flag_buffer_hashtable": flag_buffer_hashtable})
return reindex_src, reindex_dst, out_nodes
| 45.649007 | 94 | 0.596402 |
import paddle
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.framework import _non_static_mode
from paddle.fluid.data_feeder import check_variable_and_dtype
from paddle.fluid import core
from paddle import _C_ops
def graph_reindex(x,
neighbors,
count,
value_buffer=None,
index_buffer=None,
flag_buffer_hashtable=False,
name=None):
if flag_buffer_hashtable:
if value_buffer is None or index_buffer is None:
raise ValueError(f"`value_buffer` and `index_buffer` should not"
"be None if `flag_buffer_hashtable` is True.")
if _non_static_mode():
reindex_src, reindex_dst, out_nodes = \
_C_ops.graph_reindex(x, neighbors, count, value_buffer, index_buffer,
"flag_buffer_hashtable", flag_buffer_hashtable)
return reindex_src, reindex_dst, out_nodes
check_variable_and_dtype(x, "X", ("int32", "int64"), "graph_reindex")
check_variable_and_dtype(neighbors, "Neighbors", ("int32", "int64"),
"graph_reindex")
check_variable_and_dtype(count, "Count", ("int32"), "graph_reindex")
if flag_buffer_hashtable:
check_variable_and_dtype(value_buffer, "HashTable_Value", ("int32"),
"graph_reindex")
check_variable_and_dtype(index_buffer, "HashTable_Value", ("int32"),
"graph_reindex")
helper = LayerHelper("graph_reindex", **locals())
reindex_src = helper.create_variable_for_type_inference(dtype=x.dtype)
reindex_dst = helper.create_variable_for_type_inference(dtype=x.dtype)
out_nodes = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="graph_reindex",
inputs={
"X":
x,
"Neighbors":
neighbors,
"Count":
count,
"HashTable_Value":
value_buffer if flag_buffer_hashtable else None,
"HashTable_Index":
index_buffer if flag_buffer_hashtable else None,
},
outputs={
"Reindex_Src": reindex_src,
"Reindex_Dst": reindex_dst,
"Out_Nodes": out_nodes
},
attrs={"flag_buffer_hashtable": flag_buffer_hashtable})
return reindex_src, reindex_dst, out_nodes
| true | true |
1c49d6baa6cc4f1e16d6a5132cb65e2120088470 | 1,158 | py | Python | Advent of Code/2020/3/3.py | dimitrov-dimitar/competitive-programming | f2b022377baf6d4beff213fc513907b774c12352 | [
"MIT"
] | null | null | null | Advent of Code/2020/3/3.py | dimitrov-dimitar/competitive-programming | f2b022377baf6d4beff213fc513907b774c12352 | [
"MIT"
] | null | null | null | Advent of Code/2020/3/3.py | dimitrov-dimitar/competitive-programming | f2b022377baf6d4beff213fc513907b774c12352 | [
"MIT"
] | null | null | null | matrix = []
with open('input') as f:
for row in f:
row_matrix = [x for x in row if x != '\n']
row_matrix *= 1000
matrix.append(row_matrix)
# print(matrix[0])
i, j = 0, 0
counter_1 = counter_2 = counter_3 = counter_4 = counter_5 = 0
# Part One
# 3, 1
while True:
if i >= len(matrix):
break
if matrix[i][j] == '#':
counter_1 += 1
i += 1
j += 3
print(counter_1)
# Part Two
# 1, 1
i, j = 0, 0
while True:
if i >= len(matrix):
break
if matrix[i][j] == '#':
counter_2 += 1
i += 1
j += 1
print(counter_2)
# 5, 1
i, j = 0, 0
while True:
if i >= len(matrix):
break
if matrix[i][j] == '#':
counter_3 += 1
i += 1
j += 5
print(counter_3)
# 7, 1
i, j = 0, 0
while True:
if i >= len(matrix):
break
if matrix[i][j] == '#':
counter_4 += 1
i += 1
j += 7
print(counter_4)
# 1, 2
i, j = 0, 0
while True:
if i >= len(matrix):
break
if matrix[i][j] == '#':
counter_5 += 1
i += 2
j += 1
print(counter_5)
print(counter_1 * counter_2 * counter_3 * counter_4 * counter_5)
| 13.623529 | 64 | 0.486183 | matrix = []
with open('input') as f:
for row in f:
row_matrix = [x for x in row if x != '\n']
row_matrix *= 1000
matrix.append(row_matrix)
i, j = 0, 0
counter_1 = counter_2 = counter_3 = counter_4 = counter_5 = 0
while True:
if i >= len(matrix):
break
if matrix[i][j] == '#':
counter_1 += 1
i += 1
j += 3
print(counter_1)
i, j = 0, 0
while True:
if i >= len(matrix):
break
if matrix[i][j] == '#':
counter_2 += 1
i += 1
j += 1
print(counter_2)
i, j = 0, 0
while True:
if i >= len(matrix):
break
if matrix[i][j] == '#':
counter_3 += 1
i += 1
j += 5
print(counter_3)
i, j = 0, 0
while True:
if i >= len(matrix):
break
if matrix[i][j] == '#':
counter_4 += 1
i += 1
j += 7
print(counter_4)
i, j = 0, 0
while True:
if i >= len(matrix):
break
if matrix[i][j] == '#':
counter_5 += 1
i += 2
j += 1
print(counter_5)
print(counter_1 * counter_2 * counter_3 * counter_4 * counter_5)
| true | true |
1c49d6e03e4aa9f496a950a08e7afb8664ce56e7 | 4,752 | py | Python | Simple Text classifiers/Text Classification on 20Newsgroup using NN/20ng_classifier - Conv1d.py | tejasurya/Text_Classification_using_Neural_Networks | d4852780e6c86843aee768d306d19428c8cb9c7f | [
"MIT"
] | 1 | 2020-04-30T16:15:42.000Z | 2020-04-30T16:15:42.000Z | Simple Text classifiers/Text Classification on 20Newsgroup using NN/20ng_classifier - Conv1d.py | tejasurya/Text_Classification_using_Neural_Networks | d4852780e6c86843aee768d306d19428c8cb9c7f | [
"MIT"
] | null | null | null | Simple Text classifiers/Text Classification on 20Newsgroup using NN/20ng_classifier - Conv1d.py | tejasurya/Text_Classification_using_Neural_Networks | d4852780e6c86843aee768d306d19428c8cb9c7f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 6 15:55:01 2018
@author: HP
"""
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 5 14:31:43 2018
@author: HP
"""
import os
import pandas as pd
import nltk
import gensim
from gensim import corpora, models, similarities
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from gensim.models.keyedvectors import KeyedVectors as KV
from numpy import asarray
from numpy import zeros
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten, LSTM ,Dropout,GRU, Bidirectional
from keras.layers import Embedding
from collections import defaultdict
from keras.layers import Conv1D, MaxPooling1D
import random
from sklearn.datasets import fetch_20newsgroups
batch_size=32
embedding_size=128
nclass=20
# Convolution
kernel_size = 5
filters1 = 64
filters2 =128
filters3=256
filters4=512
filters5=1024
pool_size = 4
# GRU
gru_output_size = 70
#LSTM
lstm_output_size = 70
trim_len=200
sample_cnt=500
trainer = fetch_20newsgroups(subset='train')
tester = fetch_20newsgroups(subset='test')
#input - output
train_ip=trainer.data
train_op=list(trainer.target)
test_ip=tester.data
test_op=list(tester.target)
ip=train_ip+test_ip
op=train_op+test_op
ip=ip[0:sample_cnt]
for ty in range(len(ip)):
ip[ty]=ip[ty][0:trim_len]
op=op[0:sample_cnt]
len_finder=[]
for dat in ip:
len_finder.append(len(dat))
#Splitting train and test
input_train=[]
input_test=[]
input_valid=[]
j=0;
for zz in ip:
j=j+1
if (j%5 is 0):
input_test.append(zz)
elif(j%5 is 1):
input_valid.append(zz)
else:
input_train.append(zz)
label_train=[]
label_test=[]
label_valid=[]
j=0;
for zz in op:
j=j+1
if (j%5 is 0):
label_test.append(zz)
elif(j%5 is 1):
label_valid.append(zz)
else:
label_train.append(zz)
#one hot encoding
i=0
y_train=np.zeros((len(label_train),max(label_train)+1))
for x in label_train:
y_train[i][x]=1
i=i+1
i=0
y_test=np.zeros((len(label_test),max(label_test)+1))
for x in label_test:
y_test[i][x]=1
i=i+1
i=0
y_valid=np.zeros((len(label_valid),max(label_valid)+1))
for x in label_valid:
y_valid[i][x]=1
i=i+1
t = Tokenizer()
t.fit_on_texts(input_train)
vocab_size = len(t.word_index) + 1
# integer encode the documents
encoded_docs = t.texts_to_sequences(input_train)
#print(encoded_docs)
# pad documents to a max length of 4 words
max_length = max(len_finder)
padded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='post')
#print(padded_docs)
# load the whole embedding into memory
embeddings_index = dict()
f = open("G:\\NLP\\Dataset\\GloVe\\glove.6B.100d.txt", encoding="utf8")
for line in f:
values = line.split()
word = values[0]
coefs = asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
#print('Loaded %s word vectors.' % len(embeddings_index))
# create a weight matrix for words in training docs
embedding_matrix = zeros((vocab_size, 100))
for word, i in t.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
#Validating the model
vt = Tokenizer()
vt.fit_on_texts(input_valid)
vvocab_size = len(vt.word_index) + 1
# integer encode the documents
vencoded_docs = vt.texts_to_sequences(input_valid)
#print(encoded_docs)
# pad documents to a max length of 4 words
vpadded_docs = pad_sequences(vencoded_docs, maxlen=max_length, padding='post')
#print(padded_docs)
#Testing the model
tt = Tokenizer()
tt.fit_on_texts(input_test)
tvocab_size = len(tt.word_index) + 1
# integer encode the documents
tencoded_docs = tt.texts_to_sequences(input_test)
#print(encoded_docs)
# pad documents to a max length of 4 words
tpadded_docs = pad_sequences(tencoded_docs, maxlen=max_length, padding='post')
#print(padded_docs)
# define model
model = Sequential()
e = Embedding(vocab_size, 100, weights=[embedding_matrix], input_length=max_length, trainable=False)
model.add(e)
model.add(Conv1D(64,kernel_size,padding='valid',activation='relu',strides=1))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(Flatten())
model.add(Dense(nclass, activation='softmax'))
# compile the model
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
# summarize the model
print(model.summary())
# fit the model
model.fit(padded_docs,y_train, epochs=1, verbose=0, validation_data=(vpadded_docs, y_valid))
# evaluate the model
loss, accuracy = model.evaluate(tpadded_docs, y_test, verbose=0)
print('Accuracy: %f' % (accuracy*100))
| 23.180488 | 100 | 0.742635 |
import os
import pandas as pd
import nltk
import gensim
from gensim import corpora, models, similarities
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from gensim.models.keyedvectors import KeyedVectors as KV
from numpy import asarray
from numpy import zeros
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten, LSTM ,Dropout,GRU, Bidirectional
from keras.layers import Embedding
from collections import defaultdict
from keras.layers import Conv1D, MaxPooling1D
import random
from sklearn.datasets import fetch_20newsgroups
batch_size=32
embedding_size=128
nclass=20
kernel_size = 5
filters1 = 64
filters2 =128
filters3=256
filters4=512
filters5=1024
pool_size = 4
gru_output_size = 70
lstm_output_size = 70
trim_len=200
sample_cnt=500
trainer = fetch_20newsgroups(subset='train')
tester = fetch_20newsgroups(subset='test')
train_ip=trainer.data
train_op=list(trainer.target)
test_ip=tester.data
test_op=list(tester.target)
ip=train_ip+test_ip
op=train_op+test_op
ip=ip[0:sample_cnt]
for ty in range(len(ip)):
ip[ty]=ip[ty][0:trim_len]
op=op[0:sample_cnt]
len_finder=[]
for dat in ip:
len_finder.append(len(dat))
input_train=[]
input_test=[]
input_valid=[]
j=0;
for zz in ip:
j=j+1
if (j%5 is 0):
input_test.append(zz)
elif(j%5 is 1):
input_valid.append(zz)
else:
input_train.append(zz)
label_train=[]
label_test=[]
label_valid=[]
j=0;
for zz in op:
j=j+1
if (j%5 is 0):
label_test.append(zz)
elif(j%5 is 1):
label_valid.append(zz)
else:
label_train.append(zz)
i=0
y_train=np.zeros((len(label_train),max(label_train)+1))
for x in label_train:
y_train[i][x]=1
i=i+1
i=0
y_test=np.zeros((len(label_test),max(label_test)+1))
for x in label_test:
y_test[i][x]=1
i=i+1
i=0
y_valid=np.zeros((len(label_valid),max(label_valid)+1))
for x in label_valid:
y_valid[i][x]=1
i=i+1
t = Tokenizer()
t.fit_on_texts(input_train)
vocab_size = len(t.word_index) + 1
encoded_docs = t.texts_to_sequences(input_train)
max_length = max(len_finder)
padded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='post')
embeddings_index = dict()
f = open("G:\\NLP\\Dataset\\GloVe\\glove.6B.100d.txt", encoding="utf8")
for line in f:
values = line.split()
word = values[0]
coefs = asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
embedding_matrix = zeros((vocab_size, 100))
for word, i in t.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
vt = Tokenizer()
vt.fit_on_texts(input_valid)
vvocab_size = len(vt.word_index) + 1
vencoded_docs = vt.texts_to_sequences(input_valid)
vpadded_docs = pad_sequences(vencoded_docs, maxlen=max_length, padding='post')
tt = Tokenizer()
tt.fit_on_texts(input_test)
tvocab_size = len(tt.word_index) + 1
tencoded_docs = tt.texts_to_sequences(input_test)
tpadded_docs = pad_sequences(tencoded_docs, maxlen=max_length, padding='post')
model = Sequential()
e = Embedding(vocab_size, 100, weights=[embedding_matrix], input_length=max_length, trainable=False)
model.add(e)
model.add(Conv1D(64,kernel_size,padding='valid',activation='relu',strides=1))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(Flatten())
model.add(Dense(nclass, activation='softmax'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
print(model.summary())
model.fit(padded_docs,y_train, epochs=1, verbose=0, validation_data=(vpadded_docs, y_valid))
loss, accuracy = model.evaluate(tpadded_docs, y_test, verbose=0)
print('Accuracy: %f' % (accuracy*100))
| true | true |
1c49d71f05cafc9fa6e61f1005654be4254a0bd8 | 1,339 | py | Python | transcribe/scribe_modules/yum_repos.py | aakarshg/scribe | 0ae48546f9d461f9421305d0902ed73b81c0f112 | [
"Apache-2.0"
] | null | null | null | transcribe/scribe_modules/yum_repos.py | aakarshg/scribe | 0ae48546f9d461f9421305d0902ed73b81c0f112 | [
"Apache-2.0"
] | null | null | null | transcribe/scribe_modules/yum_repos.py | aakarshg/scribe | 0ae48546f9d461f9421305d0902ed73b81c0f112 | [
"Apache-2.0"
] | null | null | null | from . import ScribeModuleBaseClass
from . lib.util import format_url
base_url = "http://mirror.centos.org/centos/$releasever/{}/$basearch/"
# object_dict = {}
class Yum_repos(ScribeModuleBaseClass):
def __init__(self, input_dict=None, module_name=None, host_name=None,
input_type=None, scribe_uuid=None):
ScribeModuleBaseClass.__init__(self, module_name=module_name,
input_dict=input_dict,
host_name=host_name,
input_type=input_type,
scribe_uuid=scribe_uuid)
if input_dict:
# object_dict['repo_name'] = input_dict['repoid']
# object_dict['repo_state'] = self.update_repo_state(input_dict)
# object_dict['base_url'] = format_url(base_url, self.repo_name)
self.repo_name = input_dict['repoid']
self.repo_state = self.update_repo_state(input_dict)
# This is just for the sake of it
self.base_url = format_url(base_url, self.repo_name)
def update_repo_state(self, value):
if value['state'] == 'enabled':
return 1
return 0
def __iter__(self):
for attr, value in self.__dict__.items():
yield attr, value
| 38.257143 | 76 | 0.589246 | from . import ScribeModuleBaseClass
from . lib.util import format_url
base_url = "http://mirror.centos.org/centos/$releasever/{}/$basearch/"
class Yum_repos(ScribeModuleBaseClass):
def __init__(self, input_dict=None, module_name=None, host_name=None,
input_type=None, scribe_uuid=None):
ScribeModuleBaseClass.__init__(self, module_name=module_name,
input_dict=input_dict,
host_name=host_name,
input_type=input_type,
scribe_uuid=scribe_uuid)
if input_dict:
self.repo_name = input_dict['repoid']
self.repo_state = self.update_repo_state(input_dict)
self.base_url = format_url(base_url, self.repo_name)
def update_repo_state(self, value):
if value['state'] == 'enabled':
return 1
return 0
def __iter__(self):
for attr, value in self.__dict__.items():
yield attr, value
| true | true |
1c49d7dd71ba7d729f7fdaf9ace0f3e50bc1f6c4 | 4,730 | py | Python | src/v5.3/resources/swagger_client/models/tpdm_credential_student_academic_record.py | xmarcosx/edfi-notebook | 0564ebdf1d0f45a9d25056e7e61369f0a837534d | [
"Apache-2.0"
] | 2 | 2021-04-27T17:18:17.000Z | 2021-04-27T19:14:39.000Z | src/v5.1/resources/swagger_client/models/tpdm_credential_student_academic_record.py | xmarcosx/edfi-notebook | 0564ebdf1d0f45a9d25056e7e61369f0a837534d | [
"Apache-2.0"
] | null | null | null | src/v5.1/resources/swagger_client/models/tpdm_credential_student_academic_record.py | xmarcosx/edfi-notebook | 0564ebdf1d0f45a9d25056e7e61369f0a837534d | [
"Apache-2.0"
] | 1 | 2022-01-06T09:43:11.000Z | 2022-01-06T09:43:11.000Z | # coding: utf-8
"""
Ed-Fi Operational Data Store API
The Ed-Fi ODS / API enables applications to read and write education data stored in an Ed-Fi ODS through a secure REST interface. *** > *Note: Consumers of ODS / API information should sanitize all data for display and storage. The ODS / API provides reasonable safeguards against cross-site scripting attacks and other malicious content, but the platform does not and cannot guarantee that the data it contains is free of all potentially harmful content.* *** # noqa: E501
OpenAPI spec version: 3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.configuration import Configuration
class TpdmCredentialStudentAcademicRecord(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'student_academic_record_reference': 'EdFiStudentAcademicRecordReference'
}
attribute_map = {
'student_academic_record_reference': 'studentAcademicRecordReference'
}
def __init__(self, student_academic_record_reference=None, _configuration=None): # noqa: E501
"""TpdmCredentialStudentAcademicRecord - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._student_academic_record_reference = None
self.discriminator = None
self.student_academic_record_reference = student_academic_record_reference
@property
def student_academic_record_reference(self):
"""Gets the student_academic_record_reference of this TpdmCredentialStudentAcademicRecord. # noqa: E501
:return: The student_academic_record_reference of this TpdmCredentialStudentAcademicRecord. # noqa: E501
:rtype: EdFiStudentAcademicRecordReference
"""
return self._student_academic_record_reference
@student_academic_record_reference.setter
def student_academic_record_reference(self, student_academic_record_reference):
"""Sets the student_academic_record_reference of this TpdmCredentialStudentAcademicRecord.
:param student_academic_record_reference: The student_academic_record_reference of this TpdmCredentialStudentAcademicRecord. # noqa: E501
:type: EdFiStudentAcademicRecordReference
"""
if self._configuration.client_side_validation and student_academic_record_reference is None:
raise ValueError("Invalid value for `student_academic_record_reference`, must not be `None`") # noqa: E501
self._student_academic_record_reference = student_academic_record_reference
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TpdmCredentialStudentAcademicRecord, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TpdmCredentialStudentAcademicRecord):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TpdmCredentialStudentAcademicRecord):
return True
return self.to_dict() != other.to_dict()
| 37.84 | 482 | 0.664482 |
import pprint
import re
import six
from swagger_client.configuration import Configuration
class TpdmCredentialStudentAcademicRecord(object):
swagger_types = {
'student_academic_record_reference': 'EdFiStudentAcademicRecordReference'
}
attribute_map = {
'student_academic_record_reference': 'studentAcademicRecordReference'
}
def __init__(self, student_academic_record_reference=None, _configuration=None): if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._student_academic_record_reference = None
self.discriminator = None
self.student_academic_record_reference = student_academic_record_reference
@property
def student_academic_record_reference(self):
return self._student_academic_record_reference
@student_academic_record_reference.setter
def student_academic_record_reference(self, student_academic_record_reference):
if self._configuration.client_side_validation and student_academic_record_reference is None:
raise ValueError("Invalid value for `student_academic_record_reference`, must not be `None`")
self._student_academic_record_reference = student_academic_record_reference
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TpdmCredentialStudentAcademicRecord, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, TpdmCredentialStudentAcademicRecord):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, TpdmCredentialStudentAcademicRecord):
return True
return self.to_dict() != other.to_dict()
| true | true |
1c49d846d74671fb13187159a9661e0807baa518 | 4,859 | py | Python | SecML/src/secml/ml/kernels/c_kernel_poly.py | dsolanno/Poisoning-Attacks-on-Algorithmic-Fairness | 77698340906fd0ec68d857315283d849e236ebd7 | [
"MIT"
] | 5 | 2020-07-09T13:03:34.000Z | 2021-02-16T17:15:26.000Z | SecML/src/secml/ml/kernels/c_kernel_poly.py | dsolanno/Poisoning-Attacks-on-Algorithmic-Fairness | 77698340906fd0ec68d857315283d849e236ebd7 | [
"MIT"
] | 1 | 2021-12-30T21:11:50.000Z | 2021-12-30T21:11:50.000Z | SecML/src/secml/ml/kernels/c_kernel_poly.py | dsolanno/Poisoning-Attacks-on-Algorithmic-Fairness | 77698340906fd0ec68d857315283d849e236ebd7 | [
"MIT"
] | 2 | 2021-03-22T19:22:56.000Z | 2021-09-19T20:07:10.000Z | """
.. module:: CKernelPoly
:synopsis: Polynomial kernel
.. moduleauthor:: Battista Biggio <[email protected]>
.. moduleauthor:: Marco Melis <[email protected]>
"""
from sklearn import metrics
from secml.array import CArray
from secml.ml.kernels import CKernel
class CKernelPoly(CKernel):
"""Polynomial kernel.
Given matrices X and RV, this is computed by::
K(x, rv) = (coef0 + gamma * <x, rv>)^degree
for each pair of rows in X and in RV.
Parameters
----------
degree : int, optional
Kernel degree. Default 2.
gamma : float, optional
Free parameter to be used for balancing. Default 1.0.
coef0 : float, optional
Free parameter used for trading off the influence of higher-order
versus lower-order terms in the kernel. Default 1.0.
Attributes
----------
class_type : 'poly'
Examples
--------
>>> from secml.array import CArray
>>> from secml.ml.kernels.c_kernel_poly import CKernelPoly
>>> print(CKernelPoly(degree=3, gamma=0.001, coef0=2).k(CArray([[1,2],[3,4]]), CArray([[10,20],[30,40]])))
CArray([[ 8.615125 9.393931]
[ 9.393931 11.390625]])
>>> print(CKernelPoly().k(CArray([[1,2],[3,4]])))
CArray([[ 36. 144.]
[144. 676.]])
"""
__class_type = 'poly'
def __init__(self, degree=2, gamma=1.0, coef0=1.0):
# kernel parameters
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
super(CKernelPoly, self).__init__()
@property
def degree(self):
"""Degree parameter."""
return self._degree
@degree.setter
def degree(self, degree):
"""Sets degree parameter.
Parameters
----------
degree : int
Default is 2. Integer degree of the kernel.
"""
self._degree = int(degree)
@property
def gamma(self):
"""Gamma parameter."""
return self._gamma
@gamma.setter
def gamma(self, gamma):
"""Sets gamma parameter.
Parameters
----------
gamma : float
Default is 1.0. This is a free parameter to be used for balancing.
"""
self._gamma = float(gamma)
@property
def coef0(self):
"""Coef0 parameter."""
return self._coef0
@coef0.setter
def coef0(self, coef0):
"""Sets coef0 parameter.
Parameters
----------
coef0 : float
Default is 1.0. Free parameter used for trading off the influence
of higher-order versus lower-order terms in the kernel.
"""
self._coef0 = float(coef0)
def _forward(self, x):
"""Compute the polynomial kernel between x and cached rv.
Parameters
----------
x : CArray or array_like
Array of shape (n_x, n_features).
Returns
-------
kernel : CArray
Kernel between x and rv. Array of shape (n_x, n_rv).
"""
return CArray(metrics.pairwise.polynomial_kernel(
CArray(x).get_data(), CArray(self._rv).get_data(),
self.degree, self.gamma, self.coef0))
# TODO: check for high gamma,
# we may have uncontrolled behavior (too high values)
def _backward(self, w=None):
"""Calculate Polynomial kernel gradient wrt cached vector 'x'.
The gradient of Polynomial kernel is given by::
dK(rv,x)/dy = rv * gamma * degree * k(rv,x, degree-1)
Parameters
----------
w : CArray of shape (1, n_rv) or None
if CArray, it is pre-multiplied to the gradient
of the module, as in standard reverse-mode autodiff.
Returns
-------
kernel_gradient : CArray
Kernel gradient of rv with respect to vector x,
shape (n_rv, n_features) if n_rv > 1 and w is None,
else (1, n_features).
"""
# Checking if cached x is a vector
if not self._cached_x.is_vector_like:
raise ValueError(
"kernel gradient can be computed only wrt vector-like arrays.")
if self._rv is None:
raise ValueError("Please run forward with caching=True or set"
"`rv` first.")
k = CArray(metrics.pairwise.polynomial_kernel(
self._rv.get_data(), self._cached_x.get_data(),
self.degree - 1, self.gamma, self.coef0))
# Format of output array should be the same as cached x
if self._cached_x.issparse:
rv = self._rv.tosparse()
# Casting the kernel to sparse for efficient broadcasting
k = k.tosparse()
else:
rv = self._rv.todense()
grad = rv * k * self.gamma * self.degree
return grad if w is None else w.dot(grad)
| 27.297753 | 110 | 0.570076 | from sklearn import metrics
from secml.array import CArray
from secml.ml.kernels import CKernel
class CKernelPoly(CKernel):
__class_type = 'poly'
def __init__(self, degree=2, gamma=1.0, coef0=1.0):
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
super(CKernelPoly, self).__init__()
@property
def degree(self):
return self._degree
@degree.setter
def degree(self, degree):
self._degree = int(degree)
@property
def gamma(self):
return self._gamma
@gamma.setter
def gamma(self, gamma):
self._gamma = float(gamma)
@property
def coef0(self):
return self._coef0
@coef0.setter
def coef0(self, coef0):
self._coef0 = float(coef0)
def _forward(self, x):
return CArray(metrics.pairwise.polynomial_kernel(
CArray(x).get_data(), CArray(self._rv).get_data(),
self.degree, self.gamma, self.coef0))
def _backward(self, w=None):
if not self._cached_x.is_vector_like:
raise ValueError(
"kernel gradient can be computed only wrt vector-like arrays.")
if self._rv is None:
raise ValueError("Please run forward with caching=True or set"
"`rv` first.")
k = CArray(metrics.pairwise.polynomial_kernel(
self._rv.get_data(), self._cached_x.get_data(),
self.degree - 1, self.gamma, self.coef0))
if self._cached_x.issparse:
rv = self._rv.tosparse()
k = k.tosparse()
else:
rv = self._rv.todense()
grad = rv * k * self.gamma * self.degree
return grad if w is None else w.dot(grad)
| true | true |
1c49d916365ec2c44186d26762ebf015bff76d09 | 246 | py | Python | setup.py | dartmouthrobotics/gds_tools | 35b26b32b0d59fccf08050014bd60fd8b97fd5aa | [
"MIT"
] | null | null | null | setup.py | dartmouthrobotics/gds_tools | 35b26b32b0d59fccf08050014bd60fd8b97fd5aa | [
"MIT"
] | null | null | null | setup.py | dartmouthrobotics/gds_tools | 35b26b32b0d59fccf08050014bd60fd8b97fd5aa | [
"MIT"
] | null | null | null | from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['gds_tools'],
package_dir={'': 'src'},
)
setup(**setup_args)
| 22.363636 | 60 | 0.768293 | from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
setup_args = generate_distutils_setup(
packages=['gds_tools'],
package_dir={'': 'src'},
)
setup(**setup_args)
| true | true |
1c49da5e3caa5cc5693ed524f38852b909517aba | 19,014 | py | Python | nova/network/security_group/quantum_driver.py | bopopescu/Nova-31 | cabc3f7a905ea982cf9d2832a3990ae8e061d963 | [
"Apache-2.0"
] | 1 | 2021-04-08T10:13:03.000Z | 2021-04-08T10:13:03.000Z | nova/network/security_group/quantum_driver.py | bopopescu/Nova-31 | cabc3f7a905ea982cf9d2832a3990ae8e061d963 | [
"Apache-2.0"
] | null | null | null | nova/network/security_group/quantum_driver.py | bopopescu/Nova-31 | cabc3f7a905ea982cf9d2832a3990ae8e061d963 | [
"Apache-2.0"
] | 1 | 2020-07-24T08:19:18.000Z | 2020-07-24T08:19:18.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Aaron Rosen, Nicira Networks, Inc.
from oslo.config import cfg
from quantumclient.common import exceptions as q_exc
from quantumclient.quantum import v2_0 as quantumv20
from webob import exc
from nova.compute import api as compute_api
from nova import exception
from nova.network import quantumv2
from nova.network.security_group import security_group_base
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
from nova import utils
wrap_check_security_groups_policy = compute_api.policy_decorator(
scope='compute:security_groups')
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class SecurityGroupAPI(security_group_base.SecurityGroupBase):
id_is_uuid = True
def create_security_group(self, context, name, description):
quantum = quantumv2.get_client(context)
body = self._make_quantum_security_group_dict(name, description)
try:
security_group = quantum.create_security_group(
body).get('security_group')
except q_exc.QuantumClientException as e:
LOG.exception(_("Quantum Error creating security group %s"),
name)
if e.status_code == 401:
# TODO(arosen) Cannot raise generic response from quantum here
# as this error code could be related to bad input or over
# quota
raise exc.HTTPBadRequest()
raise e
return self._convert_to_nova_security_group_format(security_group)
def _convert_to_nova_security_group_format(self, security_group):
nova_group = {}
nova_group['id'] = security_group['id']
nova_group['description'] = security_group['description']
nova_group['name'] = security_group['name']
nova_group['project_id'] = security_group['tenant_id']
nova_group['rules'] = []
for rule in security_group.get('security_group_rules', []):
if rule['direction'] == 'ingress':
nova_group['rules'].append(
self._convert_to_nova_security_group_rule_format(rule))
return nova_group
def _convert_to_nova_security_group_rule_format(self, rule):
nova_rule = {}
nova_rule['id'] = rule['id']
nova_rule['parent_group_id'] = rule['security_group_id']
nova_rule['protocol'] = rule['protocol']
if rule['port_range_min'] is None:
nova_rule['from_port'] = -1
else:
nova_rule['from_port'] = rule['port_range_min']
if rule['port_range_max'] is None:
nova_rule['to_port'] = -1
else:
nova_rule['to_port'] = rule['port_range_max']
nova_rule['group_id'] = rule['remote_group_id']
nova_rule['cidr'] = rule['remote_ip_prefix']
return nova_rule
def get(self, context, name=None, id=None, map_exception=False):
quantum = quantumv2.get_client(context)
try:
if not id and name:
id = quantumv20.find_resourceid_by_name_or_id(
quantum, 'security_group', name)
group = quantum.show_security_group(id).get('security_group')
except q_exc.QuantumClientException as e:
if e.status_code == 404:
LOG.debug(_("Quantum security group %s not found"), name)
self.raise_not_found(e.message)
else:
LOG.error(_("Quantum Error: %s"), e)
raise e
return self._convert_to_nova_security_group_format(group)
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
"""Returns list of security group rules owned by tenant."""
quantum = quantumv2.get_client(context)
search_opts = {}
if names:
search_opts['name'] = names
if ids:
search_opts['id'] = ids
if project:
search_opts['tenant_id'] = project
try:
security_groups = quantum.list_security_groups(**search_opts).get(
'security_groups')
except q_exc.QuantumClientException as e:
LOG.exception(_("Quantum Error getting security groups"))
raise e
converted_rules = []
for security_group in security_groups:
converted_rules.append(
self._convert_to_nova_security_group_format(security_group))
return converted_rules
def validate_id(self, id):
if not uuidutils.is_uuid_like(id):
msg = _("Security group id should be uuid")
self.raise_invalid_property(msg)
return id
def destroy(self, context, security_group):
"""This function deletes a security group."""
quantum = quantumv2.get_client(context)
try:
quantum.delete_security_group(security_group['id'])
except q_exc.QuantumClientException as e:
if e.status_code == 404:
self.raise_not_found(e.message)
elif e.status_code == 409:
self.raise_invalid_property(e.message)
else:
LOG.error(_("Quantum Error: %s"), e)
raise e
def add_rules(self, context, id, name, vals):
"""Add security group rule(s) to security group.
Note: the Nova security group API doesn't support adding muliple
security group rules at once but the EC2 one does. Therefore,
this function is writen to support both. Multiple rules are
installed to a security group in quantum using bulk support."""
quantum = quantumv2.get_client(context)
body = self._make_quantum_security_group_rules_list(vals)
try:
rules = quantum.create_security_group_rule(
body).get('security_group_rules')
except q_exc.QuantumClientException as e:
if e.status_code == 409:
LOG.exception(_("Quantum Error getting security group %s"),
name)
self.raise_not_found(e.message)
else:
LOG.exception(_("Quantum Error:"))
raise e
converted_rules = []
for rule in rules:
converted_rules.append(
self._convert_to_nova_security_group_rule_format(rule))
return converted_rules
def _make_quantum_security_group_dict(self, name, description):
return {'security_group': {'name': name,
'description': description}}
def _make_quantum_security_group_rules_list(self, rules):
new_rules = []
for rule in rules:
new_rule = {}
# nova only supports ingress rules so all rules are ingress.
new_rule['direction'] = "ingress"
new_rule['protocol'] = rule.get('protocol')
# FIXME(arosen) Nova does not expose ethertype on security group
# rules. Therefore, in the case of self referential rules we
# should probably assume they want to allow both IPv4 and IPv6.
# Unfortunately, this would require adding two rules in quantum.
# The reason we do not do this is because when the user using the
# nova api wants to remove the rule we'd have to have some way to
# know that we should delete both of these rules in quantum.
# For now, self referential rules only support IPv4.
if not rule.get('cidr'):
new_rule['ethertype'] = 'IPv4'
else:
new_rule['ethertype'] = utils.get_ip_version(rule.get('cidr'))
new_rule['remote_ip_prefix'] = rule.get('cidr')
new_rule['security_group_id'] = rule.get('parent_group_id')
new_rule['remote_group_id'] = rule.get('group_id')
if rule['from_port'] != -1:
new_rule['port_range_min'] = rule['from_port']
if rule['to_port'] != -1:
new_rule['port_range_max'] = rule['to_port']
new_rules.append(new_rule)
return {'security_group_rules': new_rules}
def remove_rules(self, context, security_group, rule_ids):
quantum = quantumv2.get_client(context)
rule_ids = set(rule_ids)
try:
# The ec2 api allows one to delete multiple security group rules
# at once. Since there is no bulk delete for quantum the best
# thing we can do is delete the rules one by one and hope this
# works.... :/
for rule_id in range(0, len(rule_ids)):
quantum.delete_security_group_rule(rule_ids.pop())
except q_exc.QuantumClientException as e:
LOG.exception(_("Quantum Error unable to delete %s"),
rule_ids)
raise e
def get_rule(self, context, id):
quantum = quantumv2.get_client(context)
try:
rule = quantum.show_security_group_rule(
id).get('security_group_rule')
except q_exc.QuantumClientException as e:
if e.status_code == 404:
LOG.debug(_("Quantum security group rule %s not found"), id)
self.raise_not_found(e.message)
else:
LOG.error(_("Quantum Error: %s"), e)
raise e
return self._convert_to_nova_security_group_rule_format(rule)
def get_instances_security_groups_bindings(self, context):
"""Returns a dict(instance_id, [security_groups]) to allow obtaining
all of the instances and their security groups in one shot."""
quantum = quantumv2.get_client(context)
ports = quantum.list_ports().get('ports')
security_groups = quantum.list_security_groups().get('security_groups')
security_group_lookup = {}
instances_security_group_bindings = {}
for security_group in security_groups:
security_group_lookup[security_group['id']] = security_group
for port in ports:
for port_security_group in port.get('security_groups', []):
try:
sg = security_group_lookup[port_security_group]
# name is optional in quantum so if not specified return id
if sg.get('name'):
sg_entry = {'name': sg['name']}
else:
sg_entry = {'name': sg['id']}
instances_security_group_bindings.setdefault(
port['device_id'], []).append(sg_entry)
except KeyError:
# This should only happen due to a race condition
# if the security group on a port was deleted after the
# ports were returned. We pass since this security
# group is no longer on the port.
pass
return instances_security_group_bindings
def get_instance_security_groups(self, context, instance_id,
instance_uuid=None, detailed=False):
"""Returns the security groups that are associated with an instance.
If detailed is True then it also returns the full details of the
security groups associated with an instance.
"""
quantum = quantumv2.get_client(context)
if instance_uuid:
params = {'device_id': instance_uuid}
else:
params = {'device_id': instance_id}
ports = quantum.list_ports(**params)
security_groups = quantum.list_security_groups().get('security_groups')
security_group_lookup = {}
for security_group in security_groups:
security_group_lookup[security_group['id']] = security_group
ret = []
for port in ports['ports']:
for security_group in port.get('security_groups', []):
try:
if detailed:
ret.append(self._convert_to_nova_security_group_format(
security_group_lookup[security_group]))
else:
name = security_group_lookup[security_group].get(
'name')
# Since the name is optional for
# quantum security groups
if not name:
name = security_group['id']
ret.append({'name': name})
except KeyError:
# This should only happen due to a race condition
# if the security group on a port was deleted after the
# ports were returned. We pass since this security
# group is no longer on the port.
pass
return ret
def _has_security_group_requirements(self, port):
port_security_enabled = port.get('port_security_enabled')
has_ip = port.get('fixed_ips')
if port_security_enabled and has_ip:
return True
else:
return False
@wrap_check_security_groups_policy
def add_to_instance(self, context, instance, security_group_name):
"""Add security group to the instance."""
quantum = quantumv2.get_client(context)
try:
security_group_id = quantumv20.find_resourceid_by_name_or_id(
quantum, 'security_group', security_group_name)
except q_exc.QuantumClientException as e:
if e.status_code == 404:
msg = ("Security group %s is not found for project %s" %
(security_group_name, context.project_id))
self.raise_not_found(msg)
else:
LOG.exception(_("Quantum Error:"))
raise e
params = {'device_id': instance['uuid']}
try:
ports = quantum.list_ports(**params).get('ports')
except q_exc.QuantumClientException as e:
LOG.exception(_("Quantum Error:"))
raise e
if not ports:
msg = ("instance_id %s could not be found as device id on"
" any ports" % instance['uuid'])
self.raise_not_found(msg)
for port in ports:
if not self._has_security_group_requirements(port):
LOG.warn(_("Cannot add security group %(name)s to %(instance)s"
" since the port %(port_id)s does not meet security"
" requirements"), {'name': security_group_name,
'instance': instance['uuid'], 'port_id': port['id']})
raise exception.SecurityGroupCannotBeApplied()
if 'security_groups' not in port:
port['security_groups'] = []
port['security_groups'].append(security_group_id)
updated_port = {'security_groups': port['security_groups']}
try:
LOG.info(_("Adding security group %(security_group_id)s to "
"port %(port_id)s"),
{'security_group_id': security_group_id,
'port_id': port['id']})
quantum.update_port(port['id'], {'port': updated_port})
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Quantum Error:"))
@wrap_check_security_groups_policy
def remove_from_instance(self, context, instance, security_group_name):
"""Remove the security group associated with the instance."""
quantum = quantumv2.get_client(context)
try:
security_group_id = quantumv20.find_resourceid_by_name_or_id(
quantum, 'security_group', security_group_name)
except q_exc.QuantumClientException as e:
if e.status_code == 404:
msg = ("Security group %s is not found for project %s" %
(security_group_name, context.project_id))
self.raise_not_found(msg)
else:
LOG.exception(_("Quantum Error:"))
raise e
params = {'device_id': instance['uuid']}
try:
ports = quantum.list_ports(**params).get('ports')
except q_exc.QuantumClientException as e:
LOG.exception(_("Quantum Error:"))
raise e
if not ports:
msg = ("instance_id %s could not be found as device id on"
" any ports" % instance['uuid'])
self.raise_not_found(msg)
found_security_group = False
for port in ports:
try:
port.get('security_groups', []).remove(security_group_id)
except ValueError:
# When removing a security group from an instance the security
# group should be on both ports since it was added this way if
# done through the nova api. In case it is not a 404 is only
# raised if the security group is not found on any of the
# ports on the instance.
continue
updated_port = {'security_groups': port['security_groups']}
try:
LOG.info(_("Adding security group %(security_group_id)s to "
"port %(port_id)s"),
{'security_group_id': security_group_id,
'port_id': port['id']})
quantum.update_port(port['id'], {'port': updated_port})
found_security_group = True
except Exception:
LOG.exception(_("Quantum Error:"))
raise e
if not found_security_group:
msg = (_("Security group %(security_group_name)s not assocaited "
"with the instance %(instance)s"),
{'security_group_name': security_group_name,
'instance': instance['uuid']})
self.raise_not_found(msg)
def populate_security_groups(self, instance, security_groups):
# Setting to emply list since we do not want to populate this field
# in the nova database if using the quantum driver
instance['security_groups'] = []
| 43.610092 | 79 | 0.592879 |
from oslo.config import cfg
from quantumclient.common import exceptions as q_exc
from quantumclient.quantum import v2_0 as quantumv20
from webob import exc
from nova.compute import api as compute_api
from nova import exception
from nova.network import quantumv2
from nova.network.security_group import security_group_base
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
from nova import utils
wrap_check_security_groups_policy = compute_api.policy_decorator(
scope='compute:security_groups')
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class SecurityGroupAPI(security_group_base.SecurityGroupBase):
id_is_uuid = True
def create_security_group(self, context, name, description):
quantum = quantumv2.get_client(context)
body = self._make_quantum_security_group_dict(name, description)
try:
security_group = quantum.create_security_group(
body).get('security_group')
except q_exc.QuantumClientException as e:
LOG.exception(_("Quantum Error creating security group %s"),
name)
if e.status_code == 401:
raise exc.HTTPBadRequest()
raise e
return self._convert_to_nova_security_group_format(security_group)
def _convert_to_nova_security_group_format(self, security_group):
nova_group = {}
nova_group['id'] = security_group['id']
nova_group['description'] = security_group['description']
nova_group['name'] = security_group['name']
nova_group['project_id'] = security_group['tenant_id']
nova_group['rules'] = []
for rule in security_group.get('security_group_rules', []):
if rule['direction'] == 'ingress':
nova_group['rules'].append(
self._convert_to_nova_security_group_rule_format(rule))
return nova_group
def _convert_to_nova_security_group_rule_format(self, rule):
nova_rule = {}
nova_rule['id'] = rule['id']
nova_rule['parent_group_id'] = rule['security_group_id']
nova_rule['protocol'] = rule['protocol']
if rule['port_range_min'] is None:
nova_rule['from_port'] = -1
else:
nova_rule['from_port'] = rule['port_range_min']
if rule['port_range_max'] is None:
nova_rule['to_port'] = -1
else:
nova_rule['to_port'] = rule['port_range_max']
nova_rule['group_id'] = rule['remote_group_id']
nova_rule['cidr'] = rule['remote_ip_prefix']
return nova_rule
def get(self, context, name=None, id=None, map_exception=False):
quantum = quantumv2.get_client(context)
try:
if not id and name:
id = quantumv20.find_resourceid_by_name_or_id(
quantum, 'security_group', name)
group = quantum.show_security_group(id).get('security_group')
except q_exc.QuantumClientException as e:
if e.status_code == 404:
LOG.debug(_("Quantum security group %s not found"), name)
self.raise_not_found(e.message)
else:
LOG.error(_("Quantum Error: %s"), e)
raise e
return self._convert_to_nova_security_group_format(group)
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
quantum = quantumv2.get_client(context)
search_opts = {}
if names:
search_opts['name'] = names
if ids:
search_opts['id'] = ids
if project:
search_opts['tenant_id'] = project
try:
security_groups = quantum.list_security_groups(**search_opts).get(
'security_groups')
except q_exc.QuantumClientException as e:
LOG.exception(_("Quantum Error getting security groups"))
raise e
converted_rules = []
for security_group in security_groups:
converted_rules.append(
self._convert_to_nova_security_group_format(security_group))
return converted_rules
def validate_id(self, id):
if not uuidutils.is_uuid_like(id):
msg = _("Security group id should be uuid")
self.raise_invalid_property(msg)
return id
def destroy(self, context, security_group):
quantum = quantumv2.get_client(context)
try:
quantum.delete_security_group(security_group['id'])
except q_exc.QuantumClientException as e:
if e.status_code == 404:
self.raise_not_found(e.message)
elif e.status_code == 409:
self.raise_invalid_property(e.message)
else:
LOG.error(_("Quantum Error: %s"), e)
raise e
def add_rules(self, context, id, name, vals):
quantum = quantumv2.get_client(context)
body = self._make_quantum_security_group_rules_list(vals)
try:
rules = quantum.create_security_group_rule(
body).get('security_group_rules')
except q_exc.QuantumClientException as e:
if e.status_code == 409:
LOG.exception(_("Quantum Error getting security group %s"),
name)
self.raise_not_found(e.message)
else:
LOG.exception(_("Quantum Error:"))
raise e
converted_rules = []
for rule in rules:
converted_rules.append(
self._convert_to_nova_security_group_rule_format(rule))
return converted_rules
def _make_quantum_security_group_dict(self, name, description):
return {'security_group': {'name': name,
'description': description}}
def _make_quantum_security_group_rules_list(self, rules):
new_rules = []
for rule in rules:
new_rule = {}
new_rule['direction'] = "ingress"
new_rule['protocol'] = rule.get('protocol')
# know that we should delete both of these rules in quantum.
# For now, self referential rules only support IPv4.
if not rule.get('cidr'):
new_rule['ethertype'] = 'IPv4'
else:
new_rule['ethertype'] = utils.get_ip_version(rule.get('cidr'))
new_rule['remote_ip_prefix'] = rule.get('cidr')
new_rule['security_group_id'] = rule.get('parent_group_id')
new_rule['remote_group_id'] = rule.get('group_id')
if rule['from_port'] != -1:
new_rule['port_range_min'] = rule['from_port']
if rule['to_port'] != -1:
new_rule['port_range_max'] = rule['to_port']
new_rules.append(new_rule)
return {'security_group_rules': new_rules}
def remove_rules(self, context, security_group, rule_ids):
quantum = quantumv2.get_client(context)
rule_ids = set(rule_ids)
try:
# The ec2 api allows one to delete multiple security group rules
# at once. Since there is no bulk delete for quantum the best
# thing we can do is delete the rules one by one and hope this
# works.... :/
for rule_id in range(0, len(rule_ids)):
quantum.delete_security_group_rule(rule_ids.pop())
except q_exc.QuantumClientException as e:
LOG.exception(_("Quantum Error unable to delete %s"),
rule_ids)
raise e
def get_rule(self, context, id):
quantum = quantumv2.get_client(context)
try:
rule = quantum.show_security_group_rule(
id).get('security_group_rule')
except q_exc.QuantumClientException as e:
if e.status_code == 404:
LOG.debug(_("Quantum security group rule %s not found"), id)
self.raise_not_found(e.message)
else:
LOG.error(_("Quantum Error: %s"), e)
raise e
return self._convert_to_nova_security_group_rule_format(rule)
def get_instances_security_groups_bindings(self, context):
quantum = quantumv2.get_client(context)
ports = quantum.list_ports().get('ports')
security_groups = quantum.list_security_groups().get('security_groups')
security_group_lookup = {}
instances_security_group_bindings = {}
for security_group in security_groups:
security_group_lookup[security_group['id']] = security_group
for port in ports:
for port_security_group in port.get('security_groups', []):
try:
sg = security_group_lookup[port_security_group]
# name is optional in quantum so if not specified return id
if sg.get('name'):
sg_entry = {'name': sg['name']}
else:
sg_entry = {'name': sg['id']}
instances_security_group_bindings.setdefault(
port['device_id'], []).append(sg_entry)
except KeyError:
# This should only happen due to a race condition
# if the security group on a port was deleted after the
# ports were returned. We pass since this security
# group is no longer on the port.
pass
return instances_security_group_bindings
def get_instance_security_groups(self, context, instance_id,
instance_uuid=None, detailed=False):
quantum = quantumv2.get_client(context)
if instance_uuid:
params = {'device_id': instance_uuid}
else:
params = {'device_id': instance_id}
ports = quantum.list_ports(**params)
security_groups = quantum.list_security_groups().get('security_groups')
security_group_lookup = {}
for security_group in security_groups:
security_group_lookup[security_group['id']] = security_group
ret = []
for port in ports['ports']:
for security_group in port.get('security_groups', []):
try:
if detailed:
ret.append(self._convert_to_nova_security_group_format(
security_group_lookup[security_group]))
else:
name = security_group_lookup[security_group].get(
'name')
# Since the name is optional for
# quantum security groups
if not name:
name = security_group['id']
ret.append({'name': name})
except KeyError:
# This should only happen due to a race condition
# if the security group on a port was deleted after the
# ports were returned. We pass since this security
# group is no longer on the port.
pass
return ret
def _has_security_group_requirements(self, port):
port_security_enabled = port.get('port_security_enabled')
has_ip = port.get('fixed_ips')
if port_security_enabled and has_ip:
return True
else:
return False
@wrap_check_security_groups_policy
def add_to_instance(self, context, instance, security_group_name):
quantum = quantumv2.get_client(context)
try:
security_group_id = quantumv20.find_resourceid_by_name_or_id(
quantum, 'security_group', security_group_name)
except q_exc.QuantumClientException as e:
if e.status_code == 404:
msg = ("Security group %s is not found for project %s" %
(security_group_name, context.project_id))
self.raise_not_found(msg)
else:
LOG.exception(_("Quantum Error:"))
raise e
params = {'device_id': instance['uuid']}
try:
ports = quantum.list_ports(**params).get('ports')
except q_exc.QuantumClientException as e:
LOG.exception(_("Quantum Error:"))
raise e
if not ports:
msg = ("instance_id %s could not be found as device id on"
" any ports" % instance['uuid'])
self.raise_not_found(msg)
for port in ports:
if not self._has_security_group_requirements(port):
LOG.warn(_("Cannot add security group %(name)s to %(instance)s"
" since the port %(port_id)s does not meet security"
" requirements"), {'name': security_group_name,
'instance': instance['uuid'], 'port_id': port['id']})
raise exception.SecurityGroupCannotBeApplied()
if 'security_groups' not in port:
port['security_groups'] = []
port['security_groups'].append(security_group_id)
updated_port = {'security_groups': port['security_groups']}
try:
LOG.info(_("Adding security group %(security_group_id)s to "
"port %(port_id)s"),
{'security_group_id': security_group_id,
'port_id': port['id']})
quantum.update_port(port['id'], {'port': updated_port})
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Quantum Error:"))
@wrap_check_security_groups_policy
def remove_from_instance(self, context, instance, security_group_name):
quantum = quantumv2.get_client(context)
try:
security_group_id = quantumv20.find_resourceid_by_name_or_id(
quantum, 'security_group', security_group_name)
except q_exc.QuantumClientException as e:
if e.status_code == 404:
msg = ("Security group %s is not found for project %s" %
(security_group_name, context.project_id))
self.raise_not_found(msg)
else:
LOG.exception(_("Quantum Error:"))
raise e
params = {'device_id': instance['uuid']}
try:
ports = quantum.list_ports(**params).get('ports')
except q_exc.QuantumClientException as e:
LOG.exception(_("Quantum Error:"))
raise e
if not ports:
msg = ("instance_id %s could not be found as device id on"
" any ports" % instance['uuid'])
self.raise_not_found(msg)
found_security_group = False
for port in ports:
try:
port.get('security_groups', []).remove(security_group_id)
except ValueError:
# When removing a security group from an instance the security
# group should be on both ports since it was added this way if
# done through the nova api. In case it is not a 404 is only
# raised if the security group is not found on any of the
# ports on the instance.
continue
updated_port = {'security_groups': port['security_groups']}
try:
LOG.info(_("Adding security group %(security_group_id)s to "
"port %(port_id)s"),
{'security_group_id': security_group_id,
'port_id': port['id']})
quantum.update_port(port['id'], {'port': updated_port})
found_security_group = True
except Exception:
LOG.exception(_("Quantum Error:"))
raise e
if not found_security_group:
msg = (_("Security group %(security_group_name)s not assocaited "
"with the instance %(instance)s"),
{'security_group_name': security_group_name,
'instance': instance['uuid']})
self.raise_not_found(msg)
def populate_security_groups(self, instance, security_groups):
# Setting to emply list since we do not want to populate this field
# in the nova database if using the quantum driver
instance['security_groups'] = []
| true | true |
1c49db13db2a6633c33e03c70a86b0802f23aadc | 734 | py | Python | dashboard/migrations/0003_auto_20210922_0014.py | scholarsportal/sp_ask_admin_dashboard | 0aa99197a74d30f6b2634ce4d4e9a4654828e2ba | [
"MIT"
] | 1 | 2021-06-30T09:23:07.000Z | 2021-06-30T09:23:07.000Z | dashboard/migrations/0003_auto_20210922_0014.py | scholarsportal/sp_ask_admin_dashboard | 0aa99197a74d30f6b2634ce4d4e9a4654828e2ba | [
"MIT"
] | 9 | 2021-07-02T04:09:23.000Z | 2021-07-06T07:06:38.000Z | dashboard/migrations/0003_auto_20210922_0014.py | scholarsportal/sp_ask_admin_dashboard | 0aa99197a74d30f6b2634ce4d4e9a4654828e2ba | [
"MIT"
] | 1 | 2021-11-30T20:47:56.000Z | 2021-11-30T20:47:56.000Z | # Generated by Django 2.2.19 on 2021-09-22 04:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0002_auto_20210922_0012'),
]
operations = [
migrations.RenameModel(
old_name='Chat',
new_name='ChatLightAssessment',
),
migrations.AddIndex(
model_name='chatlightassessment',
index=models.Index(fields=['lh3ChatID', 'queueID'], name='dashboard_c_lh3Chat_07252a_idx'),
),
migrations.AddIndex(
model_name='chatreferencequestion',
index=models.Index(fields=['lh3ChatID', 'queueID'], name='dashboard_c_lh3Chat_780c55_idx'),
),
]
| 28.230769 | 103 | 0.621253 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0002_auto_20210922_0012'),
]
operations = [
migrations.RenameModel(
old_name='Chat',
new_name='ChatLightAssessment',
),
migrations.AddIndex(
model_name='chatlightassessment',
index=models.Index(fields=['lh3ChatID', 'queueID'], name='dashboard_c_lh3Chat_07252a_idx'),
),
migrations.AddIndex(
model_name='chatreferencequestion',
index=models.Index(fields=['lh3ChatID', 'queueID'], name='dashboard_c_lh3Chat_780c55_idx'),
),
]
| true | true |
1c49ddc3493976e66b9bc28cf93b54ccaa8092da | 1,496 | py | Python | person_sim_shr/turtle_tf_3d/scripts/turtle_tf_broadcaster.py | AssistiveRoboticsUNH/smart-home | 0ace1c619a4a2760640ba80866c5ae96a552243f | [
"MIT"
] | 1 | 2021-09-09T21:24:53.000Z | 2021-09-09T21:24:53.000Z | person_sim_shr/turtle_tf_3d/scripts/turtle_tf_broadcaster.py | AssistiveRoboticsUNH/smart-home | 0ace1c619a4a2760640ba80866c5ae96a552243f | [
"MIT"
] | 2 | 2020-06-25T06:44:59.000Z | 2020-06-25T13:54:36.000Z | person_sim_shr/turtle_tf_3d/scripts/turtle_tf_broadcaster.py | AssistiveRoboticsUNH/smart-home | 0ace1c619a4a2760640ba80866c5ae96a552243f | [
"MIT"
] | 2 | 2020-07-01T02:36:20.000Z | 2020-09-01T07:34:14.000Z | #!/usr/bin/env python
import rospy
import tf
from turtle_tf_3d.get_model_gazebo_pose import GazeboModel
import time
def handle_turtle_pose(pose_msg, robot_name):
br = tf.TransformBroadcaster()
br.sendTransform((pose_msg.position.x,pose_msg.position.y,pose_msg.position.z),
(pose_msg.orientation.x,pose_msg.orientation.y,pose_msg.orientation.z,pose_msg.orientation.w),
rospy.Time.now(),
robot_name,
"/world")
def publisher_of_tf():
rospy.init_node('publisher_of_tf_node', anonymous=True)
robot_name_list = ["turtle1","turtle2"]
gazebo_model_object = GazeboModel(robot_name_list)
for robot_name in robot_name_list:
pose_now = gazebo_model_object.get_model_pose(robot_name)
# Leave time enough to be sure the Gazebo Model data is initalised
time.sleep(1)
rospy.loginfo("Ready..Starting to Publish TF data now...")
rate = rospy.Rate(5) # 5hz
while not rospy.is_shutdown():
for robot_name in robot_name_list:
pose_now = gazebo_model_object.get_model_pose(robot_name)
if not pose_now:
print "The Pose is not yet"+str(robot_name)+" available...Please try again later"
else:
handle_turtle_pose(pose_now, robot_name)
rate.sleep()
if __name__ == '__main__':
try:
publisher_of_tf()
except rospy.ROSInterruptException:
pass
| 32.521739 | 115 | 0.653075 | import rospy
import tf
from turtle_tf_3d.get_model_gazebo_pose import GazeboModel
import time
def handle_turtle_pose(pose_msg, robot_name):
br = tf.TransformBroadcaster()
br.sendTransform((pose_msg.position.x,pose_msg.position.y,pose_msg.position.z),
(pose_msg.orientation.x,pose_msg.orientation.y,pose_msg.orientation.z,pose_msg.orientation.w),
rospy.Time.now(),
robot_name,
"/world")
def publisher_of_tf():
rospy.init_node('publisher_of_tf_node', anonymous=True)
robot_name_list = ["turtle1","turtle2"]
gazebo_model_object = GazeboModel(robot_name_list)
for robot_name in robot_name_list:
pose_now = gazebo_model_object.get_model_pose(robot_name)
time.sleep(1)
rospy.loginfo("Ready..Starting to Publish TF data now...")
rate = rospy.Rate(5) while not rospy.is_shutdown():
for robot_name in robot_name_list:
pose_now = gazebo_model_object.get_model_pose(robot_name)
if not pose_now:
print "The Pose is not yet"+str(robot_name)+" available...Please try again later"
else:
handle_turtle_pose(pose_now, robot_name)
rate.sleep()
if __name__ == '__main__':
try:
publisher_of_tf()
except rospy.ROSInterruptException:
pass
| false | true |
1c49e00453cdcd61b12b653597c4d0488629ad01 | 14,791 | py | Python | tests/tests/test_compressor.py | karlwnw/django-pipeline | eeb92660c18d969b955e0115ab909a64fb16d92e | [
"MIT"
] | 598 | 2015-12-18T01:25:23.000Z | 2022-03-31T13:57:01.000Z | tests/tests/test_compressor.py | karlwnw/django-pipeline | eeb92660c18d969b955e0115ab909a64fb16d92e | [
"MIT"
] | 288 | 2015-12-18T01:30:20.000Z | 2022-02-22T16:02:12.000Z | tests/tests/test_compressor.py | karlwnw/django-pipeline | eeb92660c18d969b955e0115ab909a64fb16d92e | [
"MIT"
] | 178 | 2015-12-20T06:58:57.000Z | 2022-03-04T21:53:43.000Z | import base64
import io
import os
import sys
try:
from mock import patch
except ImportError:
from unittest.mock import patch # noqa
from unittest import skipIf, skipUnless
from django.conf import settings
from django.test import TestCase
from django.test.client import RequestFactory
from pipeline.compressors import (
Compressor, TEMPLATE_FUNC, SubProcessCompressor)
from pipeline.compressors.yuglify import YuglifyCompressor
from pipeline.collector import default_collector
from tests.utils import _, pipeline_settings
@pipeline_settings(
CSS_COMPRESSOR='pipeline.compressors.yuglify.YuglifyCompressor',
JS_COMPRESSOR='pipeline.compressors.yuglify.YuglifyCompressor')
class CompressorTest(TestCase):
def setUp(self):
self.maxDiff = None
self.compressor = Compressor()
default_collector.collect()
def test_js_compressor_class(self):
self.assertEqual(self.compressor.js_compressor, YuglifyCompressor)
def test_css_compressor_class(self):
self.assertEqual(self.compressor.css_compressor, YuglifyCompressor)
def test_concatenate_and_rewrite(self):
css = self.compressor.concatenate_and_rewrite([
_('pipeline/css/first.css'),
_('pipeline/css/second.css')
], 'css/screen.css')
expected = """.concat {\n display: none;\n}\n\n.concatenate {\n display: block;\n}\n"""
self.assertEqual(expected, css)
def test_concatenate(self):
js = self.compressor.concatenate([
_('pipeline/js/first.js'),
_('pipeline/js/second.js')
])
expected = """(function() {\n window.concat = function() {\n console.log(arguments);\n }\n}()) // No semicolon\n\n;(function() {\n window.cat = function() {\n console.log("hello world");\n }\n}());\n"""
self.assertEqual(expected, js)
@patch.object(base64, 'b64encode')
def test_encoded_content(self, mock):
self.compressor.asset_contents.clear()
self.compressor.encoded_content(_('pipeline/images/arrow.png'))
self.assertTrue(mock.called)
mock.reset_mock()
self.compressor.encoded_content(_('pipeline/images/arrow.png'))
self.assertFalse(mock.called)
def test_encoded_content_output(self):
self.compressor.asset_contents.clear()
encoded = self.compressor.encoded_content(_('pipeline/images/arrow.png'))
expected = ('iVBORw0KGgoAAAANSUhEUgAAAAkAAAAGCAYAAAARx7TFAAAAMk'
'lEQVR42oXKwQkAMAxC0Q7rEk5voSEepCHC9/SOpLV3JPULgArV'
'RtDIMEEiQ4NECRNdciCfK3K3wvEAAAAASUVORK5CYII=')
self.assertEqual(encoded, expected)
def test_relative_path(self):
relative_path = self.compressor.relative_path("images/sprite.png", 'css/screen.css')
self.assertEqual(relative_path, '../images/sprite.png')
def test_base_path(self):
base_path = self.compressor.base_path([
_('js/templates/form.jst'), _('js/templates/field.jst')
])
self.assertEqual(base_path, _('js/templates'))
def test_absolute_path(self):
absolute_path = self.compressor.absolute_path(
'../../images/sprite.png', 'css/plugins/')
self.assertEqual(absolute_path, 'images/sprite.png')
absolute_path = self.compressor.absolute_path(
'/images/sprite.png', 'css/plugins/')
self.assertEqual(absolute_path, '/images/sprite.png')
def test_template_name(self):
name = self.compressor.template_name(
'templates/photo/detail.jst', 'templates/')
self.assertEqual(name, 'photo_detail')
name = self.compressor.template_name('templates/photo_edit.jst', '')
self.assertEqual(name, 'photo_edit')
name = self.compressor.template_name(
'templates\photo\detail.jst', 'templates\\')
self.assertEqual(name, 'photo_detail')
@pipeline_settings(TEMPLATE_SEPARATOR='/')
def test_template_name_separator(self):
name = self.compressor.template_name(
'templates/photo/detail.jst', 'templates/')
self.assertEqual(name, 'photo/detail')
name = self.compressor.template_name('templates/photo_edit.jst', '')
self.assertEqual(name, 'photo_edit')
name = self.compressor.template_name(
'templates\photo\detail.jst', 'templates\\')
self.assertEqual(name, 'photo/detail')
def test_compile_templates(self):
templates = self.compressor.compile_templates([_('pipeline/templates/photo/list.jst')])
self.assertEqual(templates, """window.JST = window.JST || {};\n%s\nwindow.JST[\'list\'] = template(\'<div class="photo">\\n <img src="<%%= src %%>" />\\n <div class="caption">\\n <%%= caption %%>\\n </div>\\n</div>\');\n""" % TEMPLATE_FUNC)
templates = self.compressor.compile_templates([
_('pipeline/templates/video/detail.jst'),
_('pipeline/templates/photo/detail.jst')
])
self.assertEqual(templates, """window.JST = window.JST || {};\n%s\nwindow.JST[\'video_detail\'] = template(\'<div class="video">\\n <video src="<%%= src %%>" />\\n <div class="caption">\\n <%%= description %%>\\n </div>\\n</div>\');\nwindow.JST[\'photo_detail\'] = template(\'<div class="photo">\\n <img src="<%%= src %%>" />\\n <div class="caption">\\n <%%= caption %%> by <%%= author %%>\\n </div>\\n</div>\');\n""" % TEMPLATE_FUNC)
def test_embeddable(self):
self.assertFalse(self.compressor.embeddable(_('pipeline/images/sprite.png'), None))
self.assertFalse(self.compressor.embeddable(_('pipeline/images/arrow.png'), 'datauri'))
self.assertTrue(self.compressor.embeddable(_('pipeline/images/embed/arrow.png'), 'datauri'))
self.assertFalse(self.compressor.embeddable(_('pipeline/images/arrow.dat'), 'datauri'))
def test_construct_asset_path(self):
asset_path = self.compressor.construct_asset_path(
"../../images/sprite.png", "css/plugins/gallery.css", "css/gallery.css")
self.assertEqual(asset_path, "../images/sprite.png")
asset_path = self.compressor.construct_asset_path(
"/images/sprite.png", "css/plugins/gallery.css", "css/gallery.css")
self.assertEqual(asset_path, "/images/sprite.png")
def test_url_rewrite(self):
output = self.compressor.concatenate_and_rewrite([
_('pipeline/css/urls.css'),
], 'css/screen.css')
self.assertEqual(""".embedded-url-svg {
background-image: url("data:image/svg+xml;charset=utf8,%3Csvg viewBox='0 0 32 32' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath stroke='rgba(255, 255, 255, 0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 8h24M4 16h24M4 24h24'/%3E% 3C/svg%3E");
}
@font-face {
font-family: 'Pipeline';
src: url(../pipeline/fonts/pipeline.eot);
src: url(../pipeline/fonts/pipeline.eot?#iefix) format('embedded-opentype');
src: local('☺'), url(../pipeline/fonts/pipeline.woff) format('woff'), url(../pipeline/fonts/pipeline.ttf) format('truetype'), url(../pipeline/fonts/pipeline.svg#IyfZbseF) format('svg');
font-weight: normal;
font-style: normal;
}
.relative-url {
background-image: url(../pipeline/images/sprite-buttons.png);
}
.relative-url-querystring {
background-image: url(../pipeline/images/sprite-buttons.png?v=1.0#foo=bar);
}
.absolute-url {
background-image: url(/images/sprite-buttons.png);
}
.absolute-full-url {
background-image: url(http://localhost/images/sprite-buttons.png);
}
.no-protocol-url {
background-image: url(//images/sprite-buttons.png);
}
.anchor-tag-url {
background-image: url(#image-gradient);
}
@font-face{src:url(../pipeline/fonts/pipeline.eot);src:url(../pipeline/fonts/pipeline.eot?#iefix) format('embedded-opentype'),url(../pipeline/fonts/pipeline.woff) format('woff'),url(../pipeline/fonts/pipeline.ttf) format('truetype');}
""", output)
def test_url_rewrite_data_uri(self):
output = self.compressor.concatenate_and_rewrite([
_('pipeline/css/nested/nested.css'),
], 'pipeline/screen.css')
self.assertEqual(""".data-url {
background-image: url(data:image/svg+xml;charset=US-ASCII,%3C%3Fxml%20version%3D%221.0%22%20encoding%3D%22iso-8859-1%22%3F%3E%3C!DOCTYPE%20svg%20PUBLIC%20%22-%2F%2FW3C%2F%2FDTD%20SVG%201.1%2F%2FEN%22%20%22http%3A%2F%2Fwww.w3.org%2FGraphics%2FSVG%2F1.1%2FDTD%2Fsvg11.dtd%22%3E%3Csvg%20version%3D%221.1%22%20id%3D%22Layer_1%22%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20xmlns%3Axlink%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxlink%22%20x%3D%220px%22%20y%3D%220px%22%20%20width%3D%2212px%22%20height%3D%2214px%22%20viewBox%3D%220%200%2012%2014%22%20style%3D%22enable-background%3Anew%200%200%2012%2014%3B%22%20xml%3Aspace%3D%22preserve%22%3E%3Cpath%20d%3D%22M11%2C6V5c0-2.762-2.239-5-5-5S1%2C2.238%2C1%2C5v1H0v8h12V6H11z%20M6.5%2C9.847V12h-1V9.847C5.207%2C9.673%2C5%2C9.366%2C5%2C9%20c0-0.553%2C0.448-1%2C1-1s1%2C0.447%2C1%2C1C7%2C9.366%2C6.793%2C9.673%2C6.5%2C9.847z%20M9%2C6H3V5c0-1.657%2C1.343-3%2C3-3s3%2C1.343%2C3%2C3V6z%22%2F%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3C%2Fsvg%3E);
}
.data-url-quoted {
background-image: url('data:image/svg+xml;charset=US-ASCII,%3C%3Fxml%20version%3D%221.0%22%20encoding%3D%22iso-8859-1%22%3F%3E%3C!DOCTYPE%20svg%20PUBLIC%20%22-%2F%2FW3C%2F%2FDTD%20SVG%201.1%2F%2FEN%22%20%22http%3A%2F%2Fwww.w3.org%2FGraphics%2FSVG%2F1.1%2FDTD%2Fsvg11.dtd%22%3E%3Csvg%20version%3D%221.1%22%20id%3D%22Layer_1%22%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20xmlns%3Axlink%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxlink%22%20x%3D%220px%22%20y%3D%220px%22%20%20width%3D%2212px%22%20height%3D%2214px%22%20viewBox%3D%220%200%2012%2014%22%20style%3D%22enable-background%3Anew%200%200%2012%2014%3B%22%20xml%3Aspace%3D%22preserve%22%3E%3Cpath%20d%3D%22M11%2C6V5c0-2.762-2.239-5-5-5S1%2C2.238%2C1%2C5v1H0v8h12V6H11z%20M6.5%2C9.847V12h-1V9.847C5.207%2C9.673%2C5%2C9.366%2C5%2C9%20c0-0.553%2C0.448-1%2C1-1s1%2C0.447%2C1%2C1C7%2C9.366%2C6.793%2C9.673%2C6.5%2C9.847z%20M9%2C6H3V5c0-1.657%2C1.343-3%2C3-3s3%2C1.343%2C3%2C3V6z%22%2F%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3C%2Fsvg%3E');
}
""", output)
@skipIf(sys.platform.startswith("win"), "requires posix platform")
def test_compressor_subprocess_unicode(self):
path = os.path.dirname(os.path.dirname(__file__))
content = io.open(path + '/assets/css/unicode.css', encoding="utf-8").read()
output = SubProcessCompressor(False).execute_command(('cat',), content)
self.assertEqual(""".some_class {
// Some unicode
content: "áéíóú";
}
""", output)
def tearDown(self):
default_collector.clear()
class CompressorImplementationTest(TestCase):
maxDiff = None
def setUp(self):
self.compressor = Compressor()
default_collector.collect(RequestFactory().get('/'))
def tearDown(self):
default_collector.clear()
def _test_compressor(self, compressor_cls, compress_type, expected_file):
override_settings = {
("%s_COMPRESSOR" % compress_type.upper()): compressor_cls,
}
with pipeline_settings(**override_settings):
if compress_type == 'js':
result = self.compressor.compress_js(
[_('pipeline/js/first.js'), _('pipeline/js/second.js')])
else:
result = self.compressor.compress_css(
[_('pipeline/css/first.css'), _('pipeline/css/second.css')],
os.path.join('pipeline', 'css', os.path.basename(expected_file)))
with self.compressor.storage.open(expected_file, 'r') as f:
expected = f.read()
self.assertEqual(result, expected)
def test_jsmin(self):
self._test_compressor('pipeline.compressors.jsmin.JSMinCompressor',
'js', 'pipeline/compressors/jsmin.js')
def test_slimit(self):
self._test_compressor('pipeline.compressors.slimit.SlimItCompressor',
'js', 'pipeline/compressors/slimit.js')
def test_csshtmljsminify(self):
self._test_compressor('pipeline.compressors.csshtmljsminify.CssHtmlJsMinifyCompressor',
'css', 'pipeline/compressors/csshtmljsminify.css')
self._test_compressor('pipeline.compressors.csshtmljsminify.CssHtmlJsMinifyCompressor',
'js', 'pipeline/compressors/csshtmljsminify.js')
@skipUnless(settings.HAS_NODE, "requires node")
def test_uglifyjs(self):
self._test_compressor('pipeline.compressors.uglifyjs.UglifyJSCompressor',
'js', 'pipeline/compressors/uglifyjs.js')
@skipUnless(settings.HAS_NODE, "requires node")
def test_terser(self):
self._test_compressor('pipeline.compressors.terser.TerserCompressor',
'js', 'pipeline/compressors/terser.js')
@skipUnless(settings.HAS_NODE, "requires node")
def test_yuglify(self):
self._test_compressor('pipeline.compressors.yuglify.YuglifyCompressor',
'css', 'pipeline/compressors/yuglify.css')
self._test_compressor('pipeline.compressors.yuglify.YuglifyCompressor',
'js', 'pipeline/compressors/yuglify.js')
@skipUnless(settings.HAS_NODE, "requires node")
def test_cssmin(self):
self._test_compressor('pipeline.compressors.cssmin.CSSMinCompressor',
'css', 'pipeline/compressors/cssmin.css')
@skipUnless(settings.HAS_NODE, "requires node")
@skipUnless(settings.HAS_JAVA, "requires java")
def test_closure(self):
self._test_compressor('pipeline.compressors.closure.ClosureCompressor',
'js', 'pipeline/compressors/closure.js')
@skipUnless(settings.HAS_NODE, "requires node")
@skipUnless(settings.HAS_JAVA, "requires java")
def test_yui_js(self):
self._test_compressor('pipeline.compressors.yui.YUICompressor',
'js', 'pipeline/compressors/yui.js')
@skipUnless(settings.HAS_NODE, "requires node")
@skipUnless(settings.HAS_JAVA, "requires java")
def test_yui_css(self):
self._test_compressor('pipeline.compressors.yui.YUICompressor',
'css', 'pipeline/compressors/yui.css')
@skipUnless(settings.HAS_CSSTIDY, "requires csstidy")
def test_csstidy(self):
self._test_compressor('pipeline.compressors.csstidy.CSSTidyCompressor',
'css', 'pipeline/compressors/csstidy.css')
| 51.898246 | 1,213 | 0.685687 | import base64
import io
import os
import sys
try:
from mock import patch
except ImportError:
from unittest.mock import patch
from unittest import skipIf, skipUnless
from django.conf import settings
from django.test import TestCase
from django.test.client import RequestFactory
from pipeline.compressors import (
Compressor, TEMPLATE_FUNC, SubProcessCompressor)
from pipeline.compressors.yuglify import YuglifyCompressor
from pipeline.collector import default_collector
from tests.utils import _, pipeline_settings
@pipeline_settings(
CSS_COMPRESSOR='pipeline.compressors.yuglify.YuglifyCompressor',
JS_COMPRESSOR='pipeline.compressors.yuglify.YuglifyCompressor')
class CompressorTest(TestCase):
def setUp(self):
self.maxDiff = None
self.compressor = Compressor()
default_collector.collect()
def test_js_compressor_class(self):
self.assertEqual(self.compressor.js_compressor, YuglifyCompressor)
def test_css_compressor_class(self):
self.assertEqual(self.compressor.css_compressor, YuglifyCompressor)
def test_concatenate_and_rewrite(self):
css = self.compressor.concatenate_and_rewrite([
_('pipeline/css/first.css'),
_('pipeline/css/second.css')
], 'css/screen.css')
expected = """.concat {\n display: none;\n}\n\n.concatenate {\n display: block;\n}\n"""
self.assertEqual(expected, css)
def test_concatenate(self):
js = self.compressor.concatenate([
_('pipeline/js/first.js'),
_('pipeline/js/second.js')
])
expected = """(function() {\n window.concat = function() {\n console.log(arguments);\n }\n}()) // No semicolon\n\n;(function() {\n window.cat = function() {\n console.log("hello world");\n }\n}());\n"""
self.assertEqual(expected, js)
@patch.object(base64, 'b64encode')
def test_encoded_content(self, mock):
self.compressor.asset_contents.clear()
self.compressor.encoded_content(_('pipeline/images/arrow.png'))
self.assertTrue(mock.called)
mock.reset_mock()
self.compressor.encoded_content(_('pipeline/images/arrow.png'))
self.assertFalse(mock.called)
def test_encoded_content_output(self):
self.compressor.asset_contents.clear()
encoded = self.compressor.encoded_content(_('pipeline/images/arrow.png'))
expected = ('iVBORw0KGgoAAAANSUhEUgAAAAkAAAAGCAYAAAARx7TFAAAAMk'
'lEQVR42oXKwQkAMAxC0Q7rEk5voSEepCHC9/SOpLV3JPULgArV'
'RtDIMEEiQ4NECRNdciCfK3K3wvEAAAAASUVORK5CYII=')
self.assertEqual(encoded, expected)
def test_relative_path(self):
relative_path = self.compressor.relative_path("images/sprite.png", 'css/screen.css')
self.assertEqual(relative_path, '../images/sprite.png')
def test_base_path(self):
base_path = self.compressor.base_path([
_('js/templates/form.jst'), _('js/templates/field.jst')
])
self.assertEqual(base_path, _('js/templates'))
def test_absolute_path(self):
absolute_path = self.compressor.absolute_path(
'../../images/sprite.png', 'css/plugins/')
self.assertEqual(absolute_path, 'images/sprite.png')
absolute_path = self.compressor.absolute_path(
'/images/sprite.png', 'css/plugins/')
self.assertEqual(absolute_path, '/images/sprite.png')
def test_template_name(self):
name = self.compressor.template_name(
'templates/photo/detail.jst', 'templates/')
self.assertEqual(name, 'photo_detail')
name = self.compressor.template_name('templates/photo_edit.jst', '')
self.assertEqual(name, 'photo_edit')
name = self.compressor.template_name(
'templates\photo\detail.jst', 'templates\\')
self.assertEqual(name, 'photo_detail')
@pipeline_settings(TEMPLATE_SEPARATOR='/')
def test_template_name_separator(self):
name = self.compressor.template_name(
'templates/photo/detail.jst', 'templates/')
self.assertEqual(name, 'photo/detail')
name = self.compressor.template_name('templates/photo_edit.jst', '')
self.assertEqual(name, 'photo_edit')
name = self.compressor.template_name(
'templates\photo\detail.jst', 'templates\\')
self.assertEqual(name, 'photo/detail')
def test_compile_templates(self):
templates = self.compressor.compile_templates([_('pipeline/templates/photo/list.jst')])
self.assertEqual(templates, """window.JST = window.JST || {};\n%s\nwindow.JST[\'list\'] = template(\'<div class="photo">\\n <img src="<%%= src %%>" />\\n <div class="caption">\\n <%%= caption %%>\\n </div>\\n</div>\');\n""" % TEMPLATE_FUNC)
templates = self.compressor.compile_templates([
_('pipeline/templates/video/detail.jst'),
_('pipeline/templates/photo/detail.jst')
])
self.assertEqual(templates, """window.JST = window.JST || {};\n%s\nwindow.JST[\'video_detail\'] = template(\'<div class="video">\\n <video src="<%%= src %%>" />\\n <div class="caption">\\n <%%= description %%>\\n </div>\\n</div>\');\nwindow.JST[\'photo_detail\'] = template(\'<div class="photo">\\n <img src="<%%= src %%>" />\\n <div class="caption">\\n <%%= caption %%> by <%%= author %%>\\n </div>\\n</div>\');\n""" % TEMPLATE_FUNC)
def test_embeddable(self):
self.assertFalse(self.compressor.embeddable(_('pipeline/images/sprite.png'), None))
self.assertFalse(self.compressor.embeddable(_('pipeline/images/arrow.png'), 'datauri'))
self.assertTrue(self.compressor.embeddable(_('pipeline/images/embed/arrow.png'), 'datauri'))
self.assertFalse(self.compressor.embeddable(_('pipeline/images/arrow.dat'), 'datauri'))
def test_construct_asset_path(self):
asset_path = self.compressor.construct_asset_path(
"../../images/sprite.png", "css/plugins/gallery.css", "css/gallery.css")
self.assertEqual(asset_path, "../images/sprite.png")
asset_path = self.compressor.construct_asset_path(
"/images/sprite.png", "css/plugins/gallery.css", "css/gallery.css")
self.assertEqual(asset_path, "/images/sprite.png")
def test_url_rewrite(self):
output = self.compressor.concatenate_and_rewrite([
_('pipeline/css/urls.css'),
], 'css/screen.css')
self.assertEqual(""".embedded-url-svg {
background-image: url("data:image/svg+xml;charset=utf8,%3Csvg viewBox='0 0 32 32' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath stroke='rgba(255, 255, 255, 0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 8h24M4 16h24M4 24h24'/%3E% 3C/svg%3E");
}
@font-face {
font-family: 'Pipeline';
src: url(../pipeline/fonts/pipeline.eot);
src: url(../pipeline/fonts/pipeline.eot?#iefix) format('embedded-opentype');
src: local('☺'), url(../pipeline/fonts/pipeline.woff) format('woff'), url(../pipeline/fonts/pipeline.ttf) format('truetype'), url(../pipeline/fonts/pipeline.svg#IyfZbseF) format('svg');
font-weight: normal;
font-style: normal;
}
.relative-url {
background-image: url(../pipeline/images/sprite-buttons.png);
}
.relative-url-querystring {
background-image: url(../pipeline/images/sprite-buttons.png?v=1.0#foo=bar);
}
.absolute-url {
background-image: url(/images/sprite-buttons.png);
}
.absolute-full-url {
background-image: url(http://localhost/images/sprite-buttons.png);
}
.no-protocol-url {
background-image: url(//images/sprite-buttons.png);
}
.anchor-tag-url {
background-image: url(#image-gradient);
}
@font-face{src:url(../pipeline/fonts/pipeline.eot);src:url(../pipeline/fonts/pipeline.eot?#iefix) format('embedded-opentype'),url(../pipeline/fonts/pipeline.woff) format('woff'),url(../pipeline/fonts/pipeline.ttf) format('truetype');}
""", output)
def test_url_rewrite_data_uri(self):
output = self.compressor.concatenate_and_rewrite([
_('pipeline/css/nested/nested.css'),
], 'pipeline/screen.css')
self.assertEqual(""".data-url {
background-image: url(data:image/svg+xml;charset=US-ASCII,%3C%3Fxml%20version%3D%221.0%22%20encoding%3D%22iso-8859-1%22%3F%3E%3C!DOCTYPE%20svg%20PUBLIC%20%22-%2F%2FW3C%2F%2FDTD%20SVG%201.1%2F%2FEN%22%20%22http%3A%2F%2Fwww.w3.org%2FGraphics%2FSVG%2F1.1%2FDTD%2Fsvg11.dtd%22%3E%3Csvg%20version%3D%221.1%22%20id%3D%22Layer_1%22%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20xmlns%3Axlink%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxlink%22%20x%3D%220px%22%20y%3D%220px%22%20%20width%3D%2212px%22%20height%3D%2214px%22%20viewBox%3D%220%200%2012%2014%22%20style%3D%22enable-background%3Anew%200%200%2012%2014%3B%22%20xml%3Aspace%3D%22preserve%22%3E%3Cpath%20d%3D%22M11%2C6V5c0-2.762-2.239-5-5-5S1%2C2.238%2C1%2C5v1H0v8h12V6H11z%20M6.5%2C9.847V12h-1V9.847C5.207%2C9.673%2C5%2C9.366%2C5%2C9%20c0-0.553%2C0.448-1%2C1-1s1%2C0.447%2C1%2C1C7%2C9.366%2C6.793%2C9.673%2C6.5%2C9.847z%20M9%2C6H3V5c0-1.657%2C1.343-3%2C3-3s3%2C1.343%2C3%2C3V6z%22%2F%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3C%2Fsvg%3E);
}
.data-url-quoted {
background-image: url('data:image/svg+xml;charset=US-ASCII,%3C%3Fxml%20version%3D%221.0%22%20encoding%3D%22iso-8859-1%22%3F%3E%3C!DOCTYPE%20svg%20PUBLIC%20%22-%2F%2FW3C%2F%2FDTD%20SVG%201.1%2F%2FEN%22%20%22http%3A%2F%2Fwww.w3.org%2FGraphics%2FSVG%2F1.1%2FDTD%2Fsvg11.dtd%22%3E%3Csvg%20version%3D%221.1%22%20id%3D%22Layer_1%22%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20xmlns%3Axlink%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxlink%22%20x%3D%220px%22%20y%3D%220px%22%20%20width%3D%2212px%22%20height%3D%2214px%22%20viewBox%3D%220%200%2012%2014%22%20style%3D%22enable-background%3Anew%200%200%2012%2014%3B%22%20xml%3Aspace%3D%22preserve%22%3E%3Cpath%20d%3D%22M11%2C6V5c0-2.762-2.239-5-5-5S1%2C2.238%2C1%2C5v1H0v8h12V6H11z%20M6.5%2C9.847V12h-1V9.847C5.207%2C9.673%2C5%2C9.366%2C5%2C9%20c0-0.553%2C0.448-1%2C1-1s1%2C0.447%2C1%2C1C7%2C9.366%2C6.793%2C9.673%2C6.5%2C9.847z%20M9%2C6H3V5c0-1.657%2C1.343-3%2C3-3s3%2C1.343%2C3%2C3V6z%22%2F%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3C%2Fsvg%3E');
}
""", output)
@skipIf(sys.platform.startswith("win"), "requires posix platform")
def test_compressor_subprocess_unicode(self):
path = os.path.dirname(os.path.dirname(__file__))
content = io.open(path + '/assets/css/unicode.css', encoding="utf-8").read()
output = SubProcessCompressor(False).execute_command(('cat',), content)
self.assertEqual(""".some_class {
// Some unicode
content: "áéíóú";
}
""", output)
def tearDown(self):
default_collector.clear()
class CompressorImplementationTest(TestCase):
maxDiff = None
def setUp(self):
self.compressor = Compressor()
default_collector.collect(RequestFactory().get('/'))
def tearDown(self):
default_collector.clear()
def _test_compressor(self, compressor_cls, compress_type, expected_file):
override_settings = {
("%s_COMPRESSOR" % compress_type.upper()): compressor_cls,
}
with pipeline_settings(**override_settings):
if compress_type == 'js':
result = self.compressor.compress_js(
[_('pipeline/js/first.js'), _('pipeline/js/second.js')])
else:
result = self.compressor.compress_css(
[_('pipeline/css/first.css'), _('pipeline/css/second.css')],
os.path.join('pipeline', 'css', os.path.basename(expected_file)))
with self.compressor.storage.open(expected_file, 'r') as f:
expected = f.read()
self.assertEqual(result, expected)
def test_jsmin(self):
self._test_compressor('pipeline.compressors.jsmin.JSMinCompressor',
'js', 'pipeline/compressors/jsmin.js')
def test_slimit(self):
self._test_compressor('pipeline.compressors.slimit.SlimItCompressor',
'js', 'pipeline/compressors/slimit.js')
def test_csshtmljsminify(self):
self._test_compressor('pipeline.compressors.csshtmljsminify.CssHtmlJsMinifyCompressor',
'css', 'pipeline/compressors/csshtmljsminify.css')
self._test_compressor('pipeline.compressors.csshtmljsminify.CssHtmlJsMinifyCompressor',
'js', 'pipeline/compressors/csshtmljsminify.js')
@skipUnless(settings.HAS_NODE, "requires node")
def test_uglifyjs(self):
self._test_compressor('pipeline.compressors.uglifyjs.UglifyJSCompressor',
'js', 'pipeline/compressors/uglifyjs.js')
@skipUnless(settings.HAS_NODE, "requires node")
def test_terser(self):
self._test_compressor('pipeline.compressors.terser.TerserCompressor',
'js', 'pipeline/compressors/terser.js')
@skipUnless(settings.HAS_NODE, "requires node")
def test_yuglify(self):
self._test_compressor('pipeline.compressors.yuglify.YuglifyCompressor',
'css', 'pipeline/compressors/yuglify.css')
self._test_compressor('pipeline.compressors.yuglify.YuglifyCompressor',
'js', 'pipeline/compressors/yuglify.js')
@skipUnless(settings.HAS_NODE, "requires node")
def test_cssmin(self):
self._test_compressor('pipeline.compressors.cssmin.CSSMinCompressor',
'css', 'pipeline/compressors/cssmin.css')
@skipUnless(settings.HAS_NODE, "requires node")
@skipUnless(settings.HAS_JAVA, "requires java")
def test_closure(self):
self._test_compressor('pipeline.compressors.closure.ClosureCompressor',
'js', 'pipeline/compressors/closure.js')
@skipUnless(settings.HAS_NODE, "requires node")
@skipUnless(settings.HAS_JAVA, "requires java")
def test_yui_js(self):
self._test_compressor('pipeline.compressors.yui.YUICompressor',
'js', 'pipeline/compressors/yui.js')
@skipUnless(settings.HAS_NODE, "requires node")
@skipUnless(settings.HAS_JAVA, "requires java")
def test_yui_css(self):
self._test_compressor('pipeline.compressors.yui.YUICompressor',
'css', 'pipeline/compressors/yui.css')
@skipUnless(settings.HAS_CSSTIDY, "requires csstidy")
def test_csstidy(self):
self._test_compressor('pipeline.compressors.csstidy.CSSTidyCompressor',
'css', 'pipeline/compressors/csstidy.css')
| true | true |
1c49e18ac0e8dba6da218abdc4c6d3a737ca47f4 | 34,988 | py | Python | flask_backend/anime_env/Lib/site-packages/gevent/tests/test__socket_dns.py | shafqatshad/AnmieRecommenderSystem | f58d6ab2b3614aa81208ec844ef99963c988c69d | [
"Apache-2.0"
] | null | null | null | flask_backend/anime_env/Lib/site-packages/gevent/tests/test__socket_dns.py | shafqatshad/AnmieRecommenderSystem | f58d6ab2b3614aa81208ec844ef99963c988c69d | [
"Apache-2.0"
] | null | null | null | flask_backend/anime_env/Lib/site-packages/gevent/tests/test__socket_dns.py | shafqatshad/AnmieRecommenderSystem | f58d6ab2b3614aa81208ec844ef99963c988c69d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import gevent
from gevent import monkey
import os
import re
import unittest
import socket
from time import time
import traceback
import gevent.socket as gevent_socket
import gevent.testing as greentest
from gevent.testing import util
from gevent.testing.six import xrange
from gevent.testing import flaky
from gevent.testing.skipping import skipWithoutExternalNetwork
resolver = gevent.get_hub().resolver
util.debug('Resolver: %s', resolver)
if getattr(resolver, 'pool', None) is not None:
resolver.pool.size = 1
from gevent.testing.sysinfo import RESOLVER_NOT_SYSTEM
from gevent.testing.sysinfo import RESOLVER_DNSPYTHON
from gevent.testing.sysinfo import RESOLVER_ARES
from gevent.testing.sysinfo import PY2
from gevent.testing.sysinfo import PYPY
import gevent.testing.timing
assert gevent_socket.gaierror is socket.gaierror
assert gevent_socket.error is socket.error
RUN_ALL_HOST_TESTS = os.getenv('GEVENTTEST_RUN_ALL_ETC_HOST_TESTS', '')
def add(klass, hostname, name=None,
skip=None, skip_reason=None):
call = callable(hostname)
def _setattr(k, n, func):
if skip:
func = greentest.skipIf(skip, skip_reason,)(func)
if not hasattr(k, n):
setattr(k, n, func)
if name is None:
if call:
name = hostname.__name__
else:
name = re.sub(r'[^\w]+', '_', repr(hostname))
assert name, repr(hostname)
def test_getaddrinfo_http(self):
x = hostname() if call else hostname
self._test('getaddrinfo', x, 'http')
test_getaddrinfo_http.__name__ = 'test_%s_getaddrinfo_http' % name
_setattr(klass, test_getaddrinfo_http.__name__, test_getaddrinfo_http)
def test_gethostbyname(self):
x = hostname() if call else hostname
ipaddr = self._test('gethostbyname', x)
if not isinstance(ipaddr, Exception):
self._test('gethostbyaddr', ipaddr)
test_gethostbyname.__name__ = 'test_%s_gethostbyname' % name
_setattr(klass, test_gethostbyname.__name__, test_gethostbyname)
def test3(self):
x = hostname() if call else hostname
self._test('gethostbyname_ex', x)
test3.__name__ = 'test_%s_gethostbyname_ex' % name
_setattr(klass, test3.__name__, test3)
def test4(self):
x = hostname() if call else hostname
self._test('gethostbyaddr', x)
test4.__name__ = 'test_%s_gethostbyaddr' % name
_setattr(klass, test4.__name__, test4)
def test5(self):
x = hostname() if call else hostname
self._test('getnameinfo', (x, 80), 0)
test5.__name__ = 'test_%s_getnameinfo' % name
_setattr(klass, test5.__name__, test5)
@skipWithoutExternalNetwork("Tries to resolve and compare hostnames/addrinfo")
class TestCase(greentest.TestCase):
maxDiff = None
__timeout__ = 30
switch_expected = None
TRACE = not util.QUIET and os.getenv('GEVENT_DEBUG', '') == 'trace'
verbose_dns = TRACE
def trace(self, message, *args, **kwargs):
if self.TRACE:
util.debug(message, *args, **kwargs)
# Things that the stdlib should never raise and neither should we;
# these indicate bugs in our code and we want to raise them.
REAL_ERRORS = (AttributeError, ValueError, NameError)
def __run_resolver(self, function, args):
try:
result = function(*args)
assert not isinstance(result, BaseException), repr(result)
return result
except self.REAL_ERRORS:
raise
except Exception as ex: # pylint:disable=broad-except
if self.TRACE:
traceback.print_exc()
return ex
def __trace_call(self, result, runtime, function, *args):
util.debug(self.__format_call(function, args))
self.__trace_fresult(result, runtime)
def __format_call(self, function, args):
args = repr(args)
if args.endswith(',)'):
args = args[:-2] + ')'
try:
module = function.__module__.replace('gevent._socketcommon', 'gevent')
name = function.__name__
return '%s:%s%s' % (module, name, args)
except AttributeError:
return function + args
def __trace_fresult(self, result, seconds):
if isinstance(result, Exception):
msg = ' -=> raised %r' % (result, )
else:
msg = ' -=> returned %r' % (result, )
time_ms = ' %.2fms' % (seconds * 1000.0, )
space = 80 - len(msg) - len(time_ms)
if space > 0:
space = ' ' * space
else:
space = ''
util.debug(msg + space + time_ms)
if not TRACE:
def run_resolver(self, function, func_args):
now = time()
return self.__run_resolver(function, func_args), time() - now
else:
def run_resolver(self, function, func_args):
self.trace(self.__format_call(function, func_args))
delta = time()
result = self.__run_resolver(function, func_args)
delta = time() - delta
self.__trace_fresult(result, delta)
return result, delta
def setUp(self):
super(TestCase, self).setUp()
if not self.verbose_dns:
# Silence the default reporting of errors from the ThreadPool,
# we handle those here.
gevent.get_hub().exception_stream = None
def tearDown(self):
if not self.verbose_dns:
try:
del gevent.get_hub().exception_stream
except AttributeError:
pass # Happens under leak tests
super(TestCase, self).tearDown()
def should_log_results(self, result1, result2):
if not self.verbose_dns:
return False
if isinstance(result1, BaseException) and isinstance(result2, BaseException):
return type(result1) is not type(result2)
return repr(result1) != repr(result2)
def _test(self, func_name, *args):
"""
Runs the function *func_name* with *args* and compares gevent and the system.
Returns the gevent result.
"""
gevent_func = getattr(gevent_socket, func_name)
real_func = monkey.get_original('socket', func_name)
tester = getattr(self, '_run_test_' + func_name, self._run_test_generic)
result = tester(func_name, real_func, gevent_func, args)
_real_result, time_real, gevent_result, time_gevent = result
if self.verbose_dns and time_gevent > time_real + 0.02 and time_gevent > 0.03:
msg = 'gevent:%s%s took %dms versus %dms stdlib' % (
func_name, args, time_gevent * 1000.0, time_real * 1000.0)
if time_gevent > time_real + 1:
word = 'VERY'
else:
word = 'quite'
util.log('\nWARNING: %s slow: %s', word, msg, color='warning')
return gevent_result
def _run_test_generic(self, func_name, real_func, gevent_func, func_args):
real_result, time_real = self.run_resolver(real_func, func_args)
gevent_result, time_gevent = self.run_resolver(gevent_func, func_args)
if util.QUIET and self.should_log_results(real_result, gevent_result):
util.log('')
self.__trace_call(real_result, time_real, real_func, func_args)
self.__trace_call(gevent_result, time_gevent, gevent_func, func_args)
self.assertEqualResults(real_result, gevent_result, func_name)
return real_result, time_real, gevent_result, time_gevent
def _normalize_result(self, result, func_name):
norm_name = '_normalize_result_' + func_name
if hasattr(self, norm_name):
return getattr(self, norm_name)(result)
return result
NORMALIZE_GAI_IGNORE_CANONICAL_NAME = RESOLVER_ARES # It tends to return them even when not asked for
if not RESOLVER_NOT_SYSTEM:
def _normalize_result_getaddrinfo(self, result):
return result
def _normalize_result_gethostbyname_ex(self, result):
return result
else:
def _normalize_result_gethostbyname_ex(self, result):
# Often the second and third part of the tuple (hostname, aliaslist, ipaddrlist)
# can be in different orders if we're hitting different servers,
# or using the native and ares resolvers due to load-balancing techniques.
# We sort them.
if isinstance(result, BaseException):
return result
# result[1].sort() # we wind up discarding this
# On Py2 in test_russion_gethostbyname_ex, this
# is actually an integer, for some reason. In TestLocalhost.tets__ip6_localhost,
# the result isn't this long (maybe an error?).
try:
result[2].sort()
except AttributeError:
pass
except IndexError:
return result
# On some systems, a random alias is found in the aliaslist
# by the system resolver, but not by cares, and vice versa. We deem the aliaslist
# unimportant and discard it.
# On some systems (Travis CI), the ipaddrlist for 'localhost' can come back
# with two entries 10.28.141.171 (presumably two interfaces?) for c-ares
ips = result[2]
if ips == ['10.28.141.171', '10.28.141.171']:
ips = ['10.28.141.171']
# On some systems, the hostname can get caps
return (result[0].lower(), [], ips)
def _normalize_result_getaddrinfo(self, result):
# Result is a list
# (family, socktype, proto, canonname, sockaddr)
# e.g.,
# (AF_INET, SOCK_STREAM, IPPROTO_TCP, 'readthedocs.io', (10.28.141.171, 80))
if isinstance(result, BaseException):
return result
# On Python 3, the builtin resolver can return SOCK_RAW results, but
# c-ares doesn't do that. So we remove those if we find them.
# Likewise, on certain Linux systems, even on Python 2, IPPROTO_SCTP (132)
# results may be returned --- but that may not even have a constant in the
# socket module! So to be safe, we strip out anything that's not
# SOCK_STREAM or SOCK_DGRAM
if isinstance(result, list):
result = [
x
for x in result
if x[1] in (socket.SOCK_STREAM, socket.SOCK_DGRAM)
and x[2] in (socket.IPPROTO_TCP, socket.IPPROTO_UDP)
]
if self.NORMALIZE_GAI_IGNORE_CANONICAL_NAME:
result = [
(family, kind, proto, '', addr)
for family, kind, proto, _, addr
in result
]
if isinstance(result, list):
result.sort()
return result
def _normalize_result_getnameinfo(self, result):
return result
NORMALIZE_GHBA_IGNORE_ALIAS = False
def _normalize_result_gethostbyaddr(self, result):
if not RESOLVER_NOT_SYSTEM:
return result
if self.NORMALIZE_GHBA_IGNORE_ALIAS and isinstance(result, tuple):
# On some systems, a random alias is found in the aliaslist
# by the system resolver, but not by cares and vice versa. This is *probably* only the
# case for localhost or things otherwise in /etc/hosts. We deem the aliaslist
# unimportant and discard it.
return (result[0], [], result[2])
return result
def _compare_exceptions_strict(self, real_result, gevent_result, func_name):
if repr(real_result) == repr(gevent_result):
# Catch things like `OverflowError('port must be 0-65535.',)```
return
msg = (func_name, 'system:', repr(real_result), 'gevent:', repr(gevent_result))
self.assertIs(type(gevent_result), type(real_result), msg)
if isinstance(real_result, TypeError):
return
if PYPY and isinstance(real_result, socket.herror):
# PyPy doesn't do errno or multiple arguments in herror;
# it just puts a string like 'host lookup failed: <thehost>';
# it must be doing that manually.
return
self.assertEqual(real_result.args, gevent_result.args, msg)
if hasattr(real_result, 'errno'):
self.assertEqual(real_result.errno, gevent_result.errno)
def _compare_exceptions_lenient(self, real_result, gevent_result, func_name):
try:
self._compare_exceptions_strict(real_result, gevent_result, func_name)
except AssertionError:
# Allow raising different things in a few rare cases.
if (
func_name not in (
'getaddrinfo',
'gethostbyaddr',
'gethostbyname',
'gethostbyname_ex',
'getnameinfo',
)
or type(real_result) not in (socket.herror, socket.gaierror)
or type(gevent_result) not in (socket.herror, socket.gaierror, socket.error)
):
raise
util.log('WARNING: error type mismatch for %s: %r (gevent) != %r (stdlib)',
func_name,
gevent_result, real_result,
color='warning')
_compare_exceptions = _compare_exceptions_lenient if RESOLVER_NOT_SYSTEM else _compare_exceptions_strict
def _compare_results(self, real_result, gevent_result, func_name):
if real_result == gevent_result:
return True
compare_func = getattr(self, '_compare_results_' + func_name,
self._generic_compare_results)
return compare_func(real_result, gevent_result, func_name)
def _generic_compare_results(self, real_result, gevent_result, func_name):
try:
if len(real_result) != len(gevent_result):
return False
except TypeError:
return False
return all(self._compare_results(x, y, func_name)
for (x, y)
in zip(real_result, gevent_result))
def _compare_results_getaddrinfo(self, real_result, gevent_result, func_name):
# On some systems, we find more results with
# one resolver than we do with the other resolver.
# So as long as they have some subset in common,
# we'll take it.
if not set(real_result).isdisjoint(set(gevent_result)):
return True
return self._generic_compare_results(real_result, gevent_result, func_name)
def _compare_address_strings(self, a, b):
# IPv6 address from different requests might be different
a_segments = a.count(':')
b_segments = b.count(':')
if a_segments and b_segments:
if a_segments == b_segments and a_segments in (4, 5, 6, 7):
return True
if a.rstrip(':').startswith(b.rstrip(':')) or b.rstrip(':').startswith(a.rstrip(':')):
return True
if a_segments >= 2 and b_segments >= 2 and a.split(':')[:2] == b.split(':')[:2]:
return True
return a.split('.', 1)[-1] == b.split('.', 1)[-1]
def _compare_results_gethostbyname(self, real_result, gevent_result, _func_name):
# Both strings.
return self._compare_address_strings(real_result, gevent_result)
def _compare_results_gethostbyname_ex(self, real_result, gevent_result, _func_name):
# Results are IPv4 only:
# (hostname, [aliaslist], [ipaddrlist])
# As for getaddrinfo, we'll just check the ipaddrlist has something in common.
return not set(real_result[2]).isdisjoint(set(gevent_result[2]))
def assertEqualResults(self, real_result, gevent_result, func_name):
errors = (
OverflowError,
TypeError,
UnicodeError,
socket.error,
socket.gaierror,
socket.herror,
)
if isinstance(real_result, errors) and isinstance(gevent_result, errors):
self._compare_exceptions(real_result, gevent_result, func_name)
return
real_result = self._normalize_result(real_result, func_name)
gevent_result = self._normalize_result(gevent_result, func_name)
if self._compare_results(real_result, gevent_result, func_name):
return
# If we're using a different resolver, allow the real resolver to generate an
# error that the gevent resolver actually gets an answer to.
if (
RESOLVER_NOT_SYSTEM
and isinstance(real_result, errors)
and not isinstance(gevent_result, errors)
):
return
# On PyPy, socket.getnameinfo() can produce results even when the hostname resolves to
# multiple addresses, like www.gevent.org does. DNSPython (and c-ares?) don't do that,
# they refuse to pick a name and raise ``socket.error``
if (
RESOLVER_NOT_SYSTEM
and PYPY
and func_name == 'getnameinfo'
and isinstance(gevent_result, socket.error)
and not isinstance(real_result, socket.error)
):
return
# From 2.7 on, assertEqual does a better job highlighting the results than we would
# because it calls assertSequenceEqual, which highlights the exact
# difference in the tuple
self.assertEqual(real_result, gevent_result)
class TestTypeError(TestCase):
pass
add(TestTypeError, None)
add(TestTypeError, 25)
class TestHostname(TestCase):
NORMALIZE_GHBA_IGNORE_ALIAS = True
def __normalize_name(self, result):
if (RESOLVER_ARES or RESOLVER_DNSPYTHON) and isinstance(result, tuple):
# The system resolver can return the FQDN, in the first result,
# when given certain configurations. But c-ares and dnspython
# do not.
name = result[0]
name = name.split('.', 1)[0]
result = (name,) + result[1:]
return result
def _normalize_result_gethostbyaddr(self, result):
result = TestCase._normalize_result_gethostbyaddr(self, result)
return self.__normalize_name(result)
def _normalize_result_getnameinfo(self, result):
result = TestCase._normalize_result_getnameinfo(self, result)
if PY2:
# Not sure why we only saw this on Python 2
result = self.__normalize_name(result)
return result
add(
TestHostname,
socket.gethostname,
skip=greentest.RUNNING_ON_TRAVIS and greentest.RESOLVER_NOT_SYSTEM,
skip_reason=("Sometimes get a different result for getaddrinfo "
"with dnspython; c-ares produces different results for "
"localhost on Travis beginning Sept 2019")
)
class TestLocalhost(TestCase):
# certain tests in test_patched_socket.py only work if getaddrinfo('localhost') does not switch
# (e.g. NetworkConnectionAttributesTest.testSourceAddress)
#switch_expected = False
# XXX: The above has been commented out for some time. Apparently this isn't the case
# anymore.
def _normalize_result_getaddrinfo(self, result):
if RESOLVER_NOT_SYSTEM:
# We see that some impls (OS X) return extra results
# like DGRAM that ares does not.
return ()
return super(TestLocalhost, self)._normalize_result_getaddrinfo(result)
NORMALIZE_GHBA_IGNORE_ALIAS = True
if greentest.RUNNING_ON_TRAVIS and greentest.PY2 and RESOLVER_NOT_SYSTEM:
def _normalize_result_gethostbyaddr(self, result):
# Beginning in November 2017 after an upgrade to Travis,
# we started seeing ares return ::1 for localhost, but
# the system resolver is still returning 10.28.141.171 under Python 2
result = super(TestLocalhost, self)._normalize_result_gethostbyaddr(result)
if isinstance(result, tuple):
result = (result[0], result[1], ['10.28.141.171'])
return result
add(
TestLocalhost, 'ip6-localhost',
skip=RESOLVER_DNSPYTHON, # XXX: Fix these.
skip_reason="Can return gaierror(-2)"
)
add(
TestLocalhost, 'localhost',
skip=greentest.RUNNING_ON_TRAVIS,
skip_reason="Can return gaierror(-2)"
)
class TestNonexistent(TestCase):
pass
add(TestNonexistent, 'nonexistentxxxyyy')
class Test1234(TestCase):
pass
add(Test1234, '1.2.3.4')
class Test127001(TestCase):
NORMALIZE_GHBA_IGNORE_ALIAS = True
add(
Test127001, '10.28.141.171',
# skip=RESOLVER_DNSPYTHON,
# skip_reason="Beginning Dec 1 2017, ares started returning ip6-localhost "
# "instead of localhost"
)
class TestBroadcast(TestCase):
switch_expected = False
if RESOLVER_DNSPYTHON:
# dnspython raises errors for broadcasthost/255.255.255.255, but the system
# can resolve it.
@unittest.skip('ares raises errors for broadcasthost/255.255.255.255')
def test__broadcast__gethostbyaddr(self):
return
test__broadcast__gethostbyname = test__broadcast__gethostbyaddr
add(TestBroadcast, '<broadcast>')
from gevent.resolver._hostsfile import HostsFile
class SanitizedHostsFile(HostsFile):
def iter_all_host_addr_pairs(self):
for name, addr in super(SanitizedHostsFile, self).iter_all_host_addr_pairs():
if (RESOLVER_NOT_SYSTEM
and (name.endswith('local') # ignore bonjour, ares can't find them
# ignore common aliases that ares can't find
or addr == '255.255.255.255'
or name == 'broadcasthost'
# We get extra results from some impls, like OS X
# it returns DGRAM results
or name == 'localhost')):
continue # pragma: no cover
if name.endswith('local'):
# These can only be found if bonjour is running,
# and are very slow to do so with the system resolver on OS X
continue
yield name, addr
@greentest.skipIf(greentest.RUNNING_ON_CI,
"This sometimes randomly fails on Travis with ares and on appveyor, beginning Feb 13, 2018")
# Probably due to round-robin DNS,
# since this is not actually the system's etc hosts file.
# TODO: Rethink this. We need something reliable. Go back to using
# the system's etc hosts?
class TestEtcHosts(TestCase):
MAX_HOSTS = int(os.getenv('GEVENTTEST_MAX_ETC_HOSTS', '10'))
@classmethod
def populate_tests(cls):
hf = SanitizedHostsFile(os.path.join(os.path.dirname(__file__),
'hosts_file.txt'))
all_etc_hosts = sorted(hf.iter_all_host_addr_pairs())
if len(all_etc_hosts) > cls.MAX_HOSTS and not RUN_ALL_HOST_TESTS:
all_etc_hosts = all_etc_hosts[:cls.MAX_HOSTS]
for host, ip in all_etc_hosts:
add(cls, host)
add(cls, ip)
TestEtcHosts.populate_tests()
class TestGeventOrg(TestCase):
# For this test to work correctly, it needs to resolve to
# an address with a single A record; round-robin DNS and multiple A records
# may mess it up (subsequent requests---and we always make two---may return
# unequal results). We used to use gevent.org, but that now has multiple A records;
# trying www.gevent.org which is a CNAME to readthedocs.org then worked, but it became
# an alias for python-gevent.readthedocs.org, which is an alias for readthedocs.io,
# and which also has multiple addresses. So we run the resolver twice to try to get
# the different answers, if needed.
HOSTNAME = 'www.gevent.org'
if RESOLVER_NOT_SYSTEM:
def _normalize_result_gethostbyname(self, result):
if result == '104.17.33.82':
result = '104.17.32.82'
return result
def _normalize_result_gethostbyname_ex(self, result):
result = super(TestGeventOrg, self)._normalize_result_gethostbyname_ex(result)
if result[0] == 'python-gevent.readthedocs.org':
result = ('readthedocs.io', ) + result[1:]
return result
def test_AI_CANONNAME(self):
# Not all systems support AI_CANONNAME; notably tha manylinux
# resolvers *sometimes* do not. Specifically, sometimes they
# provide the canonical name *only* on the first result.
args = (
# host
TestGeventOrg.HOSTNAME,
# port
None,
# family
socket.AF_INET,
# type
0,
# proto
0,
# flags
socket.AI_CANONNAME
)
gevent_result = gevent_socket.getaddrinfo(*args)
self.assertEqual(gevent_result[0][3], 'readthedocs.io')
real_result = socket.getaddrinfo(*args)
self.NORMALIZE_GAI_IGNORE_CANONICAL_NAME = not all(r[3] for r in real_result)
try:
self.assertEqualResults(real_result, gevent_result, 'getaddrinfo')
finally:
del self.NORMALIZE_GAI_IGNORE_CANONICAL_NAME
add(TestGeventOrg, TestGeventOrg.HOSTNAME)
class TestFamily(TestCase):
def test_inet(self):
self._test('getaddrinfo', TestGeventOrg.HOSTNAME, None, socket.AF_INET)
def test_unspec(self):
self._test('getaddrinfo', TestGeventOrg.HOSTNAME, None, socket.AF_UNSPEC)
def test_badvalue(self):
self._test('getaddrinfo', TestGeventOrg.HOSTNAME, None, 255)
self._test('getaddrinfo', TestGeventOrg.HOSTNAME, None, 255000)
self._test('getaddrinfo', TestGeventOrg.HOSTNAME, None, -1)
@unittest.skipIf(RESOLVER_DNSPYTHON, "Raises the wrong errno")
def test_badtype(self):
self._test('getaddrinfo', TestGeventOrg.HOSTNAME, 'x')
class Test_getaddrinfo(TestCase):
def _test_getaddrinfo(self, *args):
self._test('getaddrinfo', *args)
def test_80(self):
self._test_getaddrinfo(TestGeventOrg.HOSTNAME, 80)
def test_int_string(self):
self._test_getaddrinfo(TestGeventOrg.HOSTNAME, '80')
def test_0(self):
self._test_getaddrinfo(TestGeventOrg.HOSTNAME, 0)
def test_http(self):
self._test_getaddrinfo(TestGeventOrg.HOSTNAME, 'http')
def test_notexistent_tld(self):
self._test_getaddrinfo('myhost.mytld', 53)
def test_notexistent_dot_com(self):
self._test_getaddrinfo('sdfsdfgu5e66098032453245wfdggd.com', 80)
def test1(self):
return self._test_getaddrinfo(TestGeventOrg.HOSTNAME, 52, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, 0)
def test2(self):
return self._test_getaddrinfo(TestGeventOrg.HOSTNAME, 53, socket.AF_INET, socket.SOCK_DGRAM, 17)
@unittest.skipIf(RESOLVER_DNSPYTHON,
"dnspython only returns some of the possibilities")
def test3(self):
return self._test_getaddrinfo('google.com', 'http', socket.AF_INET6)
@greentest.skipIf(PY2, "Enums only on Python 3.4+")
def test_enums(self):
# https://github.com/gevent/gevent/issues/1310
# On Python 3, getaddrinfo does special things to make sure that
# the fancy enums are returned.
gai = gevent_socket.getaddrinfo('example.com', 80,
socket.AF_INET,
socket.SOCK_STREAM, socket.IPPROTO_TCP)
af, socktype, _proto, _canonname, _sa = gai[0]
self.assertIs(socktype, socket.SOCK_STREAM)
self.assertIs(af, socket.AF_INET)
class TestInternational(TestCase):
if PY2:
# We expect these to raise UnicodeEncodeError, which is a
# subclass of ValueError
REAL_ERRORS = set(TestCase.REAL_ERRORS) - {ValueError,}
if RESOLVER_ARES:
def test_russian_getaddrinfo_http(self):
# And somehow, test_russion_getaddrinfo_http (``getaddrinfo(name, 'http')``)
# manages to work with recent versions of Python 2, but our preemptive encoding
# to ASCII causes it to fail with the c-ares resolver; but only that one test out of
# all of them.
self.skipTest("ares fails to encode.")
# dns python can actually resolve these: it uses
# the 2008 version of idna encoding, whereas on Python 2,
# with the default resolver, it tries to encode to ascii and
# raises a UnicodeEncodeError. So we get different results.
add(TestInternational, u'президент.рф', 'russian',
skip=(PY2 and RESOLVER_DNSPYTHON),
skip_reason="dnspython can actually resolve these")
add(TestInternational, u'президент.рф'.encode('idna'), 'idna')
@skipWithoutExternalNetwork("Tries to resolve and compare hostnames/addrinfo")
class TestInterrupted_gethostbyname(gevent.testing.timing.AbstractGenericWaitTestCase):
# There are refs to a Waiter in the C code that don't go
# away yet; one gc may or may not do it.
@greentest.ignores_leakcheck
def test_returns_none_after_timeout(self):
super(TestInterrupted_gethostbyname, self).test_returns_none_after_timeout()
def wait(self, timeout):
with gevent.Timeout(timeout, False):
for index in xrange(1000000):
try:
gevent_socket.gethostbyname('www.x%s.com' % index)
except socket.error:
pass
raise AssertionError('Timeout was not raised')
def cleanup(self):
# Depending on timing, this can raise:
# (This suddenly started happening on Apr 6 2016; www.x1000000.com
# is apparently no longer around)
# File "test__socket_dns.py", line 538, in cleanup
# gevent.get_hub().threadpool.join()
# File "/home/travis/build/gevent/gevent/src/gevent/threadpool.py", line 108, in join
# sleep(delay)
# File "/home/travis/build/gevent/gevent/src/gevent/hub.py", line 169, in sleep
# hub.wait(loop.timer(seconds, ref=ref))
# File "/home/travis/build/gevent/gevent/src/gevent/hub.py", line 651, in wait
# result = waiter.get()
# File "/home/travis/build/gevent/gevent/src/gevent/hub.py", line 899, in get
# return self.hub.switch()
# File "/home/travis/build/gevent/gevent/src/greentest/greentest.py", line 520, in switch
# return _original_Hub.switch(self, *args)
# File "/home/travis/build/gevent/gevent/src/gevent/hub.py", line 630, in switch
# return RawGreenlet.switch(self)
# gaierror: [Errno -2] Name or service not known
try:
gevent.get_hub().threadpool.join()
except Exception: # pragma: no cover pylint:disable=broad-except
traceback.print_exc()
# class TestInterrupted_getaddrinfo(greentest.GenericWaitTestCase):
#
# def wait(self, timeout):
# with gevent.Timeout(timeout, False):
# for index in range(1000):
# try:
# gevent_socket.getaddrinfo('www.a%s.com' % index, 'http')
# except socket.gaierror:
# pass
class TestBadName(TestCase):
pass
add(TestBadName, 'xxxxxxxxxxxx')
class TestBadIP(TestCase):
pass
add(TestBadIP, '1.2.3.400')
@greentest.skipIf(greentest.RUNNING_ON_TRAVIS, "Travis began returning ip6-localhost")
class Test_getnameinfo_127001(TestCase):
def test(self):
self._test('getnameinfo', ('10.28.141.171', 80), 0)
def test_DGRAM(self):
self._test('getnameinfo', ('10.28.141.171', 779), 0)
self._test('getnameinfo', ('10.28.141.171', 779), socket.NI_DGRAM)
def test_NOFQDN(self):
# I get ('localhost', 'www') with _socket but ('localhost.localdomain', 'www') with gevent.socket
self._test('getnameinfo', ('10.28.141.171', 80), socket.NI_NOFQDN)
def test_NAMEREQD(self):
self._test('getnameinfo', ('10.28.141.171', 80), socket.NI_NAMEREQD)
class Test_getnameinfo_geventorg(TestCase):
@unittest.skipIf(RESOLVER_DNSPYTHON,
"dnspython raises an error when multiple results are returned")
def test_NUMERICHOST(self):
self._test('getnameinfo', (TestGeventOrg.HOSTNAME, 80), 0)
self._test('getnameinfo', (TestGeventOrg.HOSTNAME, 80), socket.NI_NUMERICHOST)
@unittest.skipIf(RESOLVER_DNSPYTHON,
"dnspython raises an error when multiple results are returned")
def test_NUMERICSERV(self):
self._test('getnameinfo', (TestGeventOrg.HOSTNAME, 80), socket.NI_NUMERICSERV)
def test_domain1(self):
self._test('getnameinfo', (TestGeventOrg.HOSTNAME, 80), 0)
def test_domain2(self):
self._test('getnameinfo', ('www.gevent.org', 80), 0)
def test_port_zero(self):
self._test('getnameinfo', ('www.gevent.org', 0), 0)
class Test_getnameinfo_fail(TestCase):
def test_port_string(self):
self._test('getnameinfo', ('www.gevent.org', 'http'), 0)
def test_bad_flags(self):
self._test('getnameinfo', ('localhost', 80), 55555555)
class TestInvalidPort(TestCase):
@flaky.reraises_flaky_race_condition()
def test_overflow_neg_one(self):
# An Appveyor beginning 2019-03-21, the system resolver
# sometimes returns ('23.100.69.251', '65535') instead of
# raising an error. That IP address belongs to
# readthedocs[.io?] which is where www.gevent.org is a CNAME
# to...but it doesn't actually *reverse* to readthedocs.io.
# Can't reproduce locally, not sure what's happening
self._test('getnameinfo', ('www.gevent.org', -1), 0)
# Beginning with PyPy 2.7 7.1 on Appveyor, we sometimes see this
# return an OverflowError instead of the TypeError about None
@greentest.skipOnLibuvOnPyPyOnWin("Errors dont match")
def test_typeerror_none(self):
self._test('getnameinfo', ('www.gevent.org', None), 0)
# Beginning with PyPy 2.7 7.1 on Appveyor, we sometimes see this
# return an TypeError instead of the OverflowError.
# XXX: But see Test_getnameinfo_fail.test_port_string where this does work.
@greentest.skipOnLibuvOnPyPyOnWin("Errors don't match")
def test_typeerror_str(self):
self._test('getnameinfo', ('www.gevent.org', 'x'), 0)
def test_overflow_port_too_large(self):
self._test('getnameinfo', ('www.gevent.org', 65536), 0)
if __name__ == '__main__':
greentest.main()
| 37.865801 | 110 | 0.636933 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import gevent
from gevent import monkey
import os
import re
import unittest
import socket
from time import time
import traceback
import gevent.socket as gevent_socket
import gevent.testing as greentest
from gevent.testing import util
from gevent.testing.six import xrange
from gevent.testing import flaky
from gevent.testing.skipping import skipWithoutExternalNetwork
resolver = gevent.get_hub().resolver
util.debug('Resolver: %s', resolver)
if getattr(resolver, 'pool', None) is not None:
resolver.pool.size = 1
from gevent.testing.sysinfo import RESOLVER_NOT_SYSTEM
from gevent.testing.sysinfo import RESOLVER_DNSPYTHON
from gevent.testing.sysinfo import RESOLVER_ARES
from gevent.testing.sysinfo import PY2
from gevent.testing.sysinfo import PYPY
import gevent.testing.timing
assert gevent_socket.gaierror is socket.gaierror
assert gevent_socket.error is socket.error
RUN_ALL_HOST_TESTS = os.getenv('GEVENTTEST_RUN_ALL_ETC_HOST_TESTS', '')
def add(klass, hostname, name=None,
skip=None, skip_reason=None):
call = callable(hostname)
def _setattr(k, n, func):
if skip:
func = greentest.skipIf(skip, skip_reason,)(func)
if not hasattr(k, n):
setattr(k, n, func)
if name is None:
if call:
name = hostname.__name__
else:
name = re.sub(r'[^\w]+', '_', repr(hostname))
assert name, repr(hostname)
def test_getaddrinfo_http(self):
x = hostname() if call else hostname
self._test('getaddrinfo', x, 'http')
test_getaddrinfo_http.__name__ = 'test_%s_getaddrinfo_http' % name
_setattr(klass, test_getaddrinfo_http.__name__, test_getaddrinfo_http)
def test_gethostbyname(self):
x = hostname() if call else hostname
ipaddr = self._test('gethostbyname', x)
if not isinstance(ipaddr, Exception):
self._test('gethostbyaddr', ipaddr)
test_gethostbyname.__name__ = 'test_%s_gethostbyname' % name
_setattr(klass, test_gethostbyname.__name__, test_gethostbyname)
def test3(self):
x = hostname() if call else hostname
self._test('gethostbyname_ex', x)
test3.__name__ = 'test_%s_gethostbyname_ex' % name
_setattr(klass, test3.__name__, test3)
def test4(self):
x = hostname() if call else hostname
self._test('gethostbyaddr', x)
test4.__name__ = 'test_%s_gethostbyaddr' % name
_setattr(klass, test4.__name__, test4)
def test5(self):
x = hostname() if call else hostname
self._test('getnameinfo', (x, 80), 0)
test5.__name__ = 'test_%s_getnameinfo' % name
_setattr(klass, test5.__name__, test5)
@skipWithoutExternalNetwork("Tries to resolve and compare hostnames/addrinfo")
class TestCase(greentest.TestCase):
maxDiff = None
__timeout__ = 30
switch_expected = None
TRACE = not util.QUIET and os.getenv('GEVENT_DEBUG', '') == 'trace'
verbose_dns = TRACE
def trace(self, message, *args, **kwargs):
if self.TRACE:
util.debug(message, *args, **kwargs)
REAL_ERRORS = (AttributeError, ValueError, NameError)
def __run_resolver(self, function, args):
try:
result = function(*args)
assert not isinstance(result, BaseException), repr(result)
return result
except self.REAL_ERRORS:
raise
except Exception as ex: if self.TRACE:
traceback.print_exc()
return ex
def __trace_call(self, result, runtime, function, *args):
util.debug(self.__format_call(function, args))
self.__trace_fresult(result, runtime)
def __format_call(self, function, args):
args = repr(args)
if args.endswith(',)'):
args = args[:-2] + ')'
try:
module = function.__module__.replace('gevent._socketcommon', 'gevent')
name = function.__name__
return '%s:%s%s' % (module, name, args)
except AttributeError:
return function + args
def __trace_fresult(self, result, seconds):
if isinstance(result, Exception):
msg = ' -=> raised %r' % (result, )
else:
msg = ' -=> returned %r' % (result, )
time_ms = ' %.2fms' % (seconds * 1000.0, )
space = 80 - len(msg) - len(time_ms)
if space > 0:
space = ' ' * space
else:
space = ''
util.debug(msg + space + time_ms)
if not TRACE:
def run_resolver(self, function, func_args):
now = time()
return self.__run_resolver(function, func_args), time() - now
else:
def run_resolver(self, function, func_args):
self.trace(self.__format_call(function, func_args))
delta = time()
result = self.__run_resolver(function, func_args)
delta = time() - delta
self.__trace_fresult(result, delta)
return result, delta
def setUp(self):
super(TestCase, self).setUp()
if not self.verbose_dns:
gevent.get_hub().exception_stream = None
def tearDown(self):
if not self.verbose_dns:
try:
del gevent.get_hub().exception_stream
except AttributeError:
pass super(TestCase, self).tearDown()
def should_log_results(self, result1, result2):
if not self.verbose_dns:
return False
if isinstance(result1, BaseException) and isinstance(result2, BaseException):
return type(result1) is not type(result2)
return repr(result1) != repr(result2)
def _test(self, func_name, *args):
gevent_func = getattr(gevent_socket, func_name)
real_func = monkey.get_original('socket', func_name)
tester = getattr(self, '_run_test_' + func_name, self._run_test_generic)
result = tester(func_name, real_func, gevent_func, args)
_real_result, time_real, gevent_result, time_gevent = result
if self.verbose_dns and time_gevent > time_real + 0.02 and time_gevent > 0.03:
msg = 'gevent:%s%s took %dms versus %dms stdlib' % (
func_name, args, time_gevent * 1000.0, time_real * 1000.0)
if time_gevent > time_real + 1:
word = 'VERY'
else:
word = 'quite'
util.log('\nWARNING: %s slow: %s', word, msg, color='warning')
return gevent_result
def _run_test_generic(self, func_name, real_func, gevent_func, func_args):
real_result, time_real = self.run_resolver(real_func, func_args)
gevent_result, time_gevent = self.run_resolver(gevent_func, func_args)
if util.QUIET and self.should_log_results(real_result, gevent_result):
util.log('')
self.__trace_call(real_result, time_real, real_func, func_args)
self.__trace_call(gevent_result, time_gevent, gevent_func, func_args)
self.assertEqualResults(real_result, gevent_result, func_name)
return real_result, time_real, gevent_result, time_gevent
def _normalize_result(self, result, func_name):
norm_name = '_normalize_result_' + func_name
if hasattr(self, norm_name):
return getattr(self, norm_name)(result)
return result
NORMALIZE_GAI_IGNORE_CANONICAL_NAME = RESOLVER_ARES if not RESOLVER_NOT_SYSTEM:
def _normalize_result_getaddrinfo(self, result):
return result
def _normalize_result_gethostbyname_ex(self, result):
return result
else:
def _normalize_result_gethostbyname_ex(self, result):
# or using the native and ares resolvers due to load-balancing techniques.
# We sort them.
if isinstance(result, BaseException):
return result
# result[1].sort() # we wind up discarding this
# On Py2 in test_russion_gethostbyname_ex, this
# is actually an integer, for some reason. In TestLocalhost.tets__ip6_localhost,
# the result isn't this long (maybe an error?).
try:
result[2].sort()
except AttributeError:
pass
except IndexError:
return result
ips = result[2]
if ips == ['10.28.141.171', '10.28.141.171']:
ips = ['10.28.141.171']
return (result[0].lower(), [], ips)
def _normalize_result_getaddrinfo(self, result):
if isinstance(result, BaseException):
return result
# Likewise, on certain Linux systems, even on Python 2, IPPROTO_SCTP (132)
# results may be returned --- but that may not even have a constant in the
# socket module! So to be safe, we strip out anything that's not
if isinstance(result, list):
result = [
x
for x in result
if x[1] in (socket.SOCK_STREAM, socket.SOCK_DGRAM)
and x[2] in (socket.IPPROTO_TCP, socket.IPPROTO_UDP)
]
if self.NORMALIZE_GAI_IGNORE_CANONICAL_NAME:
result = [
(family, kind, proto, '', addr)
for family, kind, proto, _, addr
in result
]
if isinstance(result, list):
result.sort()
return result
def _normalize_result_getnameinfo(self, result):
return result
NORMALIZE_GHBA_IGNORE_ALIAS = False
def _normalize_result_gethostbyaddr(self, result):
if not RESOLVER_NOT_SYSTEM:
return result
if self.NORMALIZE_GHBA_IGNORE_ALIAS and isinstance(result, tuple):
return (result[0], [], result[2])
return result
def _compare_exceptions_strict(self, real_result, gevent_result, func_name):
if repr(real_result) == repr(gevent_result):
return
msg = (func_name, 'system:', repr(real_result), 'gevent:', repr(gevent_result))
self.assertIs(type(gevent_result), type(real_result), msg)
if isinstance(real_result, TypeError):
return
if PYPY and isinstance(real_result, socket.herror):
# it just puts a string like 'host lookup failed: <thehost>';
# it must be doing that manually.
return
self.assertEqual(real_result.args, gevent_result.args, msg)
if hasattr(real_result, 'errno'):
self.assertEqual(real_result.errno, gevent_result.errno)
def _compare_exceptions_lenient(self, real_result, gevent_result, func_name):
try:
self._compare_exceptions_strict(real_result, gevent_result, func_name)
except AssertionError:
# Allow raising different things in a few rare cases.
if (
func_name not in (
'getaddrinfo',
'gethostbyaddr',
'gethostbyname',
'gethostbyname_ex',
'getnameinfo',
)
or type(real_result) not in (socket.herror, socket.gaierror)
or type(gevent_result) not in (socket.herror, socket.gaierror, socket.error)
):
raise
util.log('WARNING: error type mismatch for %s: %r (gevent) != %r (stdlib)',
func_name,
gevent_result, real_result,
color='warning')
_compare_exceptions = _compare_exceptions_lenient if RESOLVER_NOT_SYSTEM else _compare_exceptions_strict
def _compare_results(self, real_result, gevent_result, func_name):
if real_result == gevent_result:
return True
compare_func = getattr(self, '_compare_results_' + func_name,
self._generic_compare_results)
return compare_func(real_result, gevent_result, func_name)
def _generic_compare_results(self, real_result, gevent_result, func_name):
try:
if len(real_result) != len(gevent_result):
return False
except TypeError:
return False
return all(self._compare_results(x, y, func_name)
for (x, y)
in zip(real_result, gevent_result))
def _compare_results_getaddrinfo(self, real_result, gevent_result, func_name):
# On some systems, we find more results with
# one resolver than we do with the other resolver.
# So as long as they have some subset in common,
# we'll take it.
if not set(real_result).isdisjoint(set(gevent_result)):
return True
return self._generic_compare_results(real_result, gevent_result, func_name)
def _compare_address_strings(self, a, b):
a_segments = a.count(':')
b_segments = b.count(':')
if a_segments and b_segments:
if a_segments == b_segments and a_segments in (4, 5, 6, 7):
return True
if a.rstrip(':').startswith(b.rstrip(':')) or b.rstrip(':').startswith(a.rstrip(':')):
return True
if a_segments >= 2 and b_segments >= 2 and a.split(':')[:2] == b.split(':')[:2]:
return True
return a.split('.', 1)[-1] == b.split('.', 1)[-1]
def _compare_results_gethostbyname(self, real_result, gevent_result, _func_name):
return self._compare_address_strings(real_result, gevent_result)
def _compare_results_gethostbyname_ex(self, real_result, gevent_result, _func_name):
return not set(real_result[2]).isdisjoint(set(gevent_result[2]))
def assertEqualResults(self, real_result, gevent_result, func_name):
errors = (
OverflowError,
TypeError,
UnicodeError,
socket.error,
socket.gaierror,
socket.herror,
)
if isinstance(real_result, errors) and isinstance(gevent_result, errors):
self._compare_exceptions(real_result, gevent_result, func_name)
return
real_result = self._normalize_result(real_result, func_name)
gevent_result = self._normalize_result(gevent_result, func_name)
if self._compare_results(real_result, gevent_result, func_name):
return
# If we're using a different resolver, allow the real resolver to generate an
if (
RESOLVER_NOT_SYSTEM
and isinstance(real_result, errors)
and not isinstance(gevent_result, errors)
):
return
# they refuse to pick a name and raise ``socket.error``
if (
RESOLVER_NOT_SYSTEM
and PYPY
and func_name == 'getnameinfo'
and isinstance(gevent_result, socket.error)
and not isinstance(real_result, socket.error)
):
return
# From 2.7 on, assertEqual does a better job highlighting the results than we would
# because it calls assertSequenceEqual, which highlights the exact
# difference in the tuple
self.assertEqual(real_result, gevent_result)
class TestTypeError(TestCase):
pass
add(TestTypeError, None)
add(TestTypeError, 25)
class TestHostname(TestCase):
NORMALIZE_GHBA_IGNORE_ALIAS = True
def __normalize_name(self, result):
if (RESOLVER_ARES or RESOLVER_DNSPYTHON) and isinstance(result, tuple):
# The system resolver can return the FQDN, in the first result,
# when given certain configurations. But c-ares and dnspython
# do not.
name = result[0]
name = name.split('.', 1)[0]
result = (name,) + result[1:]
return result
def _normalize_result_gethostbyaddr(self, result):
result = TestCase._normalize_result_gethostbyaddr(self, result)
return self.__normalize_name(result)
def _normalize_result_getnameinfo(self, result):
result = TestCase._normalize_result_getnameinfo(self, result)
if PY2:
# Not sure why we only saw this on Python 2
result = self.__normalize_name(result)
return result
add(
TestHostname,
socket.gethostname,
skip=greentest.RUNNING_ON_TRAVIS and greentest.RESOLVER_NOT_SYSTEM,
skip_reason=("Sometimes get a different result for getaddrinfo "
"with dnspython; c-ares produces different results for "
"localhost on Travis beginning Sept 2019")
)
class TestLocalhost(TestCase):
# certain tests in test_patched_socket.py only work if getaddrinfo('localhost') does not switch
# (e.g. NetworkConnectionAttributesTest.testSourceAddress)
#switch_expected = False
# XXX: The above has been commented out for some time. Apparently this isn't the case
def _normalize_result_getaddrinfo(self, result):
if RESOLVER_NOT_SYSTEM:
return ()
return super(TestLocalhost, self)._normalize_result_getaddrinfo(result)
NORMALIZE_GHBA_IGNORE_ALIAS = True
if greentest.RUNNING_ON_TRAVIS and greentest.PY2 and RESOLVER_NOT_SYSTEM:
def _normalize_result_gethostbyaddr(self, result):
result = super(TestLocalhost, self)._normalize_result_gethostbyaddr(result)
if isinstance(result, tuple):
result = (result[0], result[1], ['10.28.141.171'])
return result
add(
TestLocalhost, 'ip6-localhost',
skip=RESOLVER_DNSPYTHON, skip_reason="Can return gaierror(-2)"
)
add(
TestLocalhost, 'localhost',
skip=greentest.RUNNING_ON_TRAVIS,
skip_reason="Can return gaierror(-2)"
)
class TestNonexistent(TestCase):
pass
add(TestNonexistent, 'nonexistentxxxyyy')
class Test1234(TestCase):
pass
add(Test1234, '1.2.3.4')
class Test127001(TestCase):
NORMALIZE_GHBA_IGNORE_ALIAS = True
add(
Test127001, '10.28.141.171',
)
class TestBroadcast(TestCase):
switch_expected = False
if RESOLVER_DNSPYTHON:
@unittest.skip('ares raises errors for broadcasthost/255.255.255.255')
def test__broadcast__gethostbyaddr(self):
return
test__broadcast__gethostbyname = test__broadcast__gethostbyaddr
add(TestBroadcast, '<broadcast>')
from gevent.resolver._hostsfile import HostsFile
class SanitizedHostsFile(HostsFile):
def iter_all_host_addr_pairs(self):
for name, addr in super(SanitizedHostsFile, self).iter_all_host_addr_pairs():
if (RESOLVER_NOT_SYSTEM
and (name.endswith('local') # ignore common aliases that ares can't find
or addr == '255.255.255.255'
or name == 'broadcasthost'
or name == 'localhost')):
continue if name.endswith('local'):
continue
yield name, addr
@greentest.skipIf(greentest.RUNNING_ON_CI,
"This sometimes randomly fails on Travis with ares and on appveyor, beginning Feb 13, 2018")
# TODO: Rethink this. We need something reliable. Go back to using
# the system's etc hosts?
class TestEtcHosts(TestCase):
MAX_HOSTS = int(os.getenv('GEVENTTEST_MAX_ETC_HOSTS', '10'))
@classmethod
def populate_tests(cls):
hf = SanitizedHostsFile(os.path.join(os.path.dirname(__file__),
'hosts_file.txt'))
all_etc_hosts = sorted(hf.iter_all_host_addr_pairs())
if len(all_etc_hosts) > cls.MAX_HOSTS and not RUN_ALL_HOST_TESTS:
all_etc_hosts = all_etc_hosts[:cls.MAX_HOSTS]
for host, ip in all_etc_hosts:
add(cls, host)
add(cls, ip)
TestEtcHosts.populate_tests()
class TestGeventOrg(TestCase):
HOSTNAME = 'www.gevent.org'
if RESOLVER_NOT_SYSTEM:
def _normalize_result_gethostbyname(self, result):
if result == '104.17.33.82':
result = '104.17.32.82'
return result
def _normalize_result_gethostbyname_ex(self, result):
result = super(TestGeventOrg, self)._normalize_result_gethostbyname_ex(result)
if result[0] == 'python-gevent.readthedocs.org':
result = ('readthedocs.io', ) + result[1:]
return result
def test_AI_CANONNAME(self):
args = (
TestGeventOrg.HOSTNAME,
None,
socket.AF_INET,
0,
0,
socket.AI_CANONNAME
)
gevent_result = gevent_socket.getaddrinfo(*args)
self.assertEqual(gevent_result[0][3], 'readthedocs.io')
real_result = socket.getaddrinfo(*args)
self.NORMALIZE_GAI_IGNORE_CANONICAL_NAME = not all(r[3] for r in real_result)
try:
self.assertEqualResults(real_result, gevent_result, 'getaddrinfo')
finally:
del self.NORMALIZE_GAI_IGNORE_CANONICAL_NAME
add(TestGeventOrg, TestGeventOrg.HOSTNAME)
class TestFamily(TestCase):
def test_inet(self):
self._test('getaddrinfo', TestGeventOrg.HOSTNAME, None, socket.AF_INET)
def test_unspec(self):
self._test('getaddrinfo', TestGeventOrg.HOSTNAME, None, socket.AF_UNSPEC)
def test_badvalue(self):
self._test('getaddrinfo', TestGeventOrg.HOSTNAME, None, 255)
self._test('getaddrinfo', TestGeventOrg.HOSTNAME, None, 255000)
self._test('getaddrinfo', TestGeventOrg.HOSTNAME, None, -1)
@unittest.skipIf(RESOLVER_DNSPYTHON, "Raises the wrong errno")
def test_badtype(self):
self._test('getaddrinfo', TestGeventOrg.HOSTNAME, 'x')
class Test_getaddrinfo(TestCase):
def _test_getaddrinfo(self, *args):
self._test('getaddrinfo', *args)
def test_80(self):
self._test_getaddrinfo(TestGeventOrg.HOSTNAME, 80)
def test_int_string(self):
self._test_getaddrinfo(TestGeventOrg.HOSTNAME, '80')
def test_0(self):
self._test_getaddrinfo(TestGeventOrg.HOSTNAME, 0)
def test_http(self):
self._test_getaddrinfo(TestGeventOrg.HOSTNAME, 'http')
def test_notexistent_tld(self):
self._test_getaddrinfo('myhost.mytld', 53)
def test_notexistent_dot_com(self):
self._test_getaddrinfo('sdfsdfgu5e66098032453245wfdggd.com', 80)
def test1(self):
return self._test_getaddrinfo(TestGeventOrg.HOSTNAME, 52, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, 0)
def test2(self):
return self._test_getaddrinfo(TestGeventOrg.HOSTNAME, 53, socket.AF_INET, socket.SOCK_DGRAM, 17)
@unittest.skipIf(RESOLVER_DNSPYTHON,
"dnspython only returns some of the possibilities")
def test3(self):
return self._test_getaddrinfo('google.com', 'http', socket.AF_INET6)
@greentest.skipIf(PY2, "Enums only on Python 3.4+")
def test_enums(self):
gai = gevent_socket.getaddrinfo('example.com', 80,
socket.AF_INET,
socket.SOCK_STREAM, socket.IPPROTO_TCP)
af, socktype, _proto, _canonname, _sa = gai[0]
self.assertIs(socktype, socket.SOCK_STREAM)
self.assertIs(af, socket.AF_INET)
class TestInternational(TestCase):
if PY2:
REAL_ERRORS = set(TestCase.REAL_ERRORS) - {ValueError,}
if RESOLVER_ARES:
def test_russian_getaddrinfo_http(self):
self.skipTest("ares fails to encode.")
add(TestInternational, u'президент.рф', 'russian',
skip=(PY2 and RESOLVER_DNSPYTHON),
skip_reason="dnspython can actually resolve these")
add(TestInternational, u'президент.рф'.encode('idna'), 'idna')
@skipWithoutExternalNetwork("Tries to resolve and compare hostnames/addrinfo")
class TestInterrupted_gethostbyname(gevent.testing.timing.AbstractGenericWaitTestCase):
# away yet; one gc may or may not do it.
@greentest.ignores_leakcheck
def test_returns_none_after_timeout(self):
super(TestInterrupted_gethostbyname, self).test_returns_none_after_timeout()
def wait(self, timeout):
with gevent.Timeout(timeout, False):
for index in xrange(1000000):
try:
gevent_socket.gethostbyname('www.x%s.com' % index)
except socket.error:
pass
raise AssertionError('Timeout was not raised')
def cleanup(self):
# Depending on timing, this can raise:
# (This suddenly started happening on Apr 6 2016; www.x1000000.com
# is apparently no longer around)
# File "test__socket_dns.py", line 538, in cleanup
# gevent.get_hub().threadpool.join()
# File "/home/travis/build/gevent/gevent/src/gevent/threadpool.py", line 108, in join
# sleep(delay)
# File "/home/travis/build/gevent/gevent/src/gevent/hub.py", line 169, in sleep
# hub.wait(loop.timer(seconds, ref=ref))
# File "/home/travis/build/gevent/gevent/src/gevent/hub.py", line 651, in wait
# result = waiter.get()
# File "/home/travis/build/gevent/gevent/src/gevent/hub.py", line 899, in get
# return self.hub.switch()
# File "/home/travis/build/gevent/gevent/src/greentest/greentest.py", line 520, in switch
# return _original_Hub.switch(self, *args)
# File "/home/travis/build/gevent/gevent/src/gevent/hub.py", line 630, in switch
# return RawGreenlet.switch(self)
# gaierror: [Errno -2] Name or service not known
try:
gevent.get_hub().threadpool.join()
except Exception: # pragma: no cover pylint:disable=broad-except
traceback.print_exc()
# class TestInterrupted_getaddrinfo(greentest.GenericWaitTestCase):
#
# def wait(self, timeout):
# with gevent.Timeout(timeout, False):
# for index in range(1000):
# try:
# gevent_socket.getaddrinfo('www.a%s.com' % index, 'http')
# except socket.gaierror:
# pass
class TestBadName(TestCase):
pass
add(TestBadName, 'xxxxxxxxxxxx')
class TestBadIP(TestCase):
pass
add(TestBadIP, '1.2.3.400')
@greentest.skipIf(greentest.RUNNING_ON_TRAVIS, "Travis began returning ip6-localhost")
class Test_getnameinfo_127001(TestCase):
def test(self):
self._test('getnameinfo', ('10.28.141.171', 80), 0)
def test_DGRAM(self):
self._test('getnameinfo', ('10.28.141.171', 779), 0)
self._test('getnameinfo', ('10.28.141.171', 779), socket.NI_DGRAM)
def test_NOFQDN(self):
# I get ('localhost', 'www') with _socket but ('localhost.localdomain', 'www') with gevent.socket
self._test('getnameinfo', ('10.28.141.171', 80), socket.NI_NOFQDN)
def test_NAMEREQD(self):
self._test('getnameinfo', ('10.28.141.171', 80), socket.NI_NAMEREQD)
class Test_getnameinfo_geventorg(TestCase):
@unittest.skipIf(RESOLVER_DNSPYTHON,
"dnspython raises an error when multiple results are returned")
def test_NUMERICHOST(self):
self._test('getnameinfo', (TestGeventOrg.HOSTNAME, 80), 0)
self._test('getnameinfo', (TestGeventOrg.HOSTNAME, 80), socket.NI_NUMERICHOST)
@unittest.skipIf(RESOLVER_DNSPYTHON,
"dnspython raises an error when multiple results are returned")
def test_NUMERICSERV(self):
self._test('getnameinfo', (TestGeventOrg.HOSTNAME, 80), socket.NI_NUMERICSERV)
def test_domain1(self):
self._test('getnameinfo', (TestGeventOrg.HOSTNAME, 80), 0)
def test_domain2(self):
self._test('getnameinfo', ('www.gevent.org', 80), 0)
def test_port_zero(self):
self._test('getnameinfo', ('www.gevent.org', 0), 0)
class Test_getnameinfo_fail(TestCase):
def test_port_string(self):
self._test('getnameinfo', ('www.gevent.org', 'http'), 0)
def test_bad_flags(self):
self._test('getnameinfo', ('localhost', 80), 55555555)
class TestInvalidPort(TestCase):
@flaky.reraises_flaky_race_condition()
def test_overflow_neg_one(self):
# An Appveyor beginning 2019-03-21, the system resolver
# sometimes returns ('23.100.69.251', '65535') instead of
# raising an error. That IP address belongs to
# readthedocs[.io?] which is where www.gevent.org is a CNAME
# to...but it doesn't actually *reverse* to readthedocs.io.
self._test('getnameinfo', ('www.gevent.org', -1), 0)
@greentest.skipOnLibuvOnPyPyOnWin("Errors dont match")
def test_typeerror_none(self):
self._test('getnameinfo', ('www.gevent.org', None), 0)
@greentest.skipOnLibuvOnPyPyOnWin("Errors don't match")
def test_typeerror_str(self):
self._test('getnameinfo', ('www.gevent.org', 'x'), 0)
def test_overflow_port_too_large(self):
self._test('getnameinfo', ('www.gevent.org', 65536), 0)
if __name__ == '__main__':
greentest.main()
| true | true |
1c49e25c6e75fd9b0b06b279779a858490d11a7e | 9,520 | py | Python | nova/api/openstack/placement/microversion.py | viveknandavanam/nova | 556377b6915936467436c9d5bb33bc0e22244e1e | [
"Apache-2.0"
] | 1 | 2015-11-30T19:44:00.000Z | 2015-11-30T19:44:00.000Z | nova/api/openstack/placement/microversion.py | viveknandavanam/nova | 556377b6915936467436c9d5bb33bc0e22244e1e | [
"Apache-2.0"
] | 5 | 2018-04-12T16:44:34.000Z | 2018-05-08T13:33:05.000Z | nova/api/openstack/placement/microversion.py | viveknandavanam/nova | 556377b6915936467436c9d5bb33bc0e22244e1e | [
"Apache-2.0"
] | 3 | 2018-04-04T15:15:01.000Z | 2018-04-19T18:14:25.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Microversion handling."""
# NOTE(cdent): This code is taken from enamel:
# https://github.com/jaypipes/enamel and was the original source of
# the code now used in microversion_parse library.
import collections
import inspect
import microversion_parse
import webob
# NOTE(cdent): avoid cyclical import conflict between util and
# microversion
import nova.api.openstack.placement.util
from nova.i18n import _
SERVICE_TYPE = 'placement'
MICROVERSION_ENVIRON = '%s.microversion' % SERVICE_TYPE
VERSIONED_METHODS = collections.defaultdict(list)
# The Canonical Version List
VERSIONS = [
'1.0',
'1.1', # initial support for aggregate.get_aggregates and set_aggregates
'1.2', # Adds /resource_classes resource endpoint
'1.3', # Adds 'member_of' query parameter to get resource providers
# that are members of any of the listed aggregates
'1.4', # Adds resources query string parameter in GET /resource_providers
'1.5', # Adds DELETE /resource_providers/{uuid}/inventories
]
def max_version_string():
return VERSIONS[-1]
def min_version_string():
return VERSIONS[0]
def parse_version_string(version_string):
"""Turn a version string into a Version
:param version_string: A string of two numerals, X.Y, or 'latest'
:returns: a Version
:raises: TypeError
"""
if version_string == 'latest':
version_string = max_version_string()
try:
# The combination of int and a limited split with the
# named tuple means that this incantation will raise
# ValueError or TypeError when the incoming data is
# poorly formed but will, however, naturally adapt to
# extraneous whitespace.
return Version(*(int(value) for value
in version_string.split('.', 1)))
except (ValueError, TypeError) as exc:
raise TypeError('invalid version string: %s; %s' % (
version_string, exc))
def raise_http_status_code_if_not_version(req, status_code, min_version,
max_version=None):
"""Utility to raise a http status code if the wanted microversion does not
match.
:param req: The HTTP request for the placement api
:param status_code: HTTP status code (integer value) to be raised
:param min_version: Minimum placement microversion level
:param max_version: Maximum placement microversion level
:returns: None
:raises: HTTP status code if the specified microversion does not match
:raises: KeyError if status_code is not a valid HTTP status code
"""
if not isinstance(min_version, tuple):
min_version = parse_version_string(min_version)
if max_version and not isinstance(max_version, tuple):
max_version = parse_version_string(max_version)
want_version = req.environ[MICROVERSION_ENVIRON]
if not want_version.matches(min_version, max_version):
raise webob.exc.status_map[status_code]
class MicroversionMiddleware(object):
"""WSGI middleware for getting microversion info."""
def __init__(self, application):
self.application = application
@webob.dec.wsgify
def __call__(self, req):
util = nova.api.openstack.placement.util
try:
microversion = extract_version(req.headers)
except ValueError as exc:
raise webob.exc.HTTPNotAcceptable(
_('Invalid microversion: %(error)s') % {'error': exc},
json_formatter=util.json_error_formatter)
except TypeError as exc:
raise webob.exc.HTTPBadRequest(
_('Invalid microversion: %(error)s') % {'error': exc},
json_formatter=util.json_error_formatter)
req.environ[MICROVERSION_ENVIRON] = microversion
microversion_header = '%s %s' % (SERVICE_TYPE, microversion)
try:
response = req.get_response(self.application)
except webob.exc.HTTPError as exc:
# If there was an error in the application we still need
# to send the microversion header, so add the header and
# re-raise the exception.
exc.headers.add(Version.HEADER, microversion_header)
raise exc
response.headers.add(Version.HEADER, microversion_header)
response.headers.add('vary', Version.HEADER)
return response
class Version(collections.namedtuple('Version', 'major minor')):
"""A namedtuple containing major and minor values.
Since it is a tuple is automatically comparable.
"""
HEADER = 'OpenStack-API-Version'
MIN_VERSION = None
MAX_VERSION = None
def __str__(self):
return '%s.%s' % (self.major, self.minor)
@property
def max_version(self):
if not self.MAX_VERSION:
self.MAX_VERSION = parse_version_string(max_version_string())
return self.MAX_VERSION
@property
def min_version(self):
if not self.MIN_VERSION:
self.MIN_VERSION = parse_version_string(min_version_string())
return self.MIN_VERSION
def matches(self, min_version=None, max_version=None):
if min_version is None:
min_version = self.min_version
if max_version is None:
max_version = self.max_version
return min_version <= self <= max_version
def extract_version(headers):
"""Extract the microversion from Version.HEADER
There may be multiple headers and some which don't match our
service.
"""
found_version = microversion_parse.get_version(headers,
service_type=SERVICE_TYPE)
version_string = found_version or min_version_string()
request_version = parse_version_string(version_string)
# We need a version that is in VERSION and within MIX and MAX.
# This gives us the option to administratively disable a
# version if we really need to.
if (str(request_version) in VERSIONS and request_version.matches()):
return request_version
raise ValueError('Unacceptable version header: %s' % version_string)
# From twisted
# https://github.com/twisted/twisted/blob/trunk/twisted/python/deprecate.py
def _fully_qualified_name(obj):
"""Return the fully qualified name of a module, class, method or function.
Classes and functions need to be module level ones to be correctly
qualified.
"""
try:
name = obj.__qualname__
except AttributeError:
name = obj.__name__
if inspect.isclass(obj) or inspect.isfunction(obj):
moduleName = obj.__module__
return "%s.%s" % (moduleName, name)
elif inspect.ismethod(obj):
try:
cls = obj.im_class
except AttributeError:
# Python 3 eliminates im_class, substitutes __module__ and
# __qualname__ to provide similar information.
return "%s.%s" % (obj.__module__, obj.__qualname__)
else:
className = _fully_qualified_name(cls)
return "%s.%s" % (className, name)
return name
def _find_method(f, version):
"""Look in VERSIONED_METHODS for method with right name matching version.
If no match is found raise a 404.
"""
qualified_name = _fully_qualified_name(f)
# A KeyError shouldn't be possible here, but let's be robust
# just in case.
method_list = VERSIONED_METHODS.get(qualified_name, [])
for min_version, max_version, func in method_list:
if min_version <= version <= max_version:
return func
raise webob.exc.HTTPNotFound()
def version_handler(min_ver, max_ver=None):
"""Decorator for versioning API methods.
Add as a decorator to a placement API handler to constrain
the microversions at which it will run. Add after the
``wsgify`` decorator.
This does not check for version intersections. That's the
domain of tests.
:param min_ver: A string of two numerals, X.Y indicating the
minimum version allowed for the decorated method.
:param min_ver: A string of two numerals, X.Y, indicating the
maximum version allowed for the decorated method.
"""
def decorator(f):
min_version = parse_version_string(min_ver)
if max_ver:
max_version = parse_version_string(max_ver)
else:
max_version = parse_version_string(max_version_string())
qualified_name = _fully_qualified_name(f)
VERSIONED_METHODS[qualified_name].append(
(min_version, max_version, f))
def decorated_func(req, *args, **kwargs):
version = req.environ[MICROVERSION_ENVIRON]
return _find_method(f, version)(req, *args, **kwargs)
# Sort highest min version to beginning of list.
VERSIONED_METHODS[qualified_name].sort(key=lambda x: x[0],
reverse=True)
return decorated_func
return decorator
| 35.522388 | 78 | 0.67542 |
import collections
import inspect
import microversion_parse
import webob
import nova.api.openstack.placement.util
from nova.i18n import _
SERVICE_TYPE = 'placement'
MICROVERSION_ENVIRON = '%s.microversion' % SERVICE_TYPE
VERSIONED_METHODS = collections.defaultdict(list)
VERSIONS = [
'1.0',
'1.1', '1.2', '1.3', '1.4', '1.5', ]
def max_version_string():
return VERSIONS[-1]
def min_version_string():
return VERSIONS[0]
def parse_version_string(version_string):
if version_string == 'latest':
version_string = max_version_string()
try:
return Version(*(int(value) for value
in version_string.split('.', 1)))
except (ValueError, TypeError) as exc:
raise TypeError('invalid version string: %s; %s' % (
version_string, exc))
def raise_http_status_code_if_not_version(req, status_code, min_version,
max_version=None):
if not isinstance(min_version, tuple):
min_version = parse_version_string(min_version)
if max_version and not isinstance(max_version, tuple):
max_version = parse_version_string(max_version)
want_version = req.environ[MICROVERSION_ENVIRON]
if not want_version.matches(min_version, max_version):
raise webob.exc.status_map[status_code]
class MicroversionMiddleware(object):
def __init__(self, application):
self.application = application
@webob.dec.wsgify
def __call__(self, req):
util = nova.api.openstack.placement.util
try:
microversion = extract_version(req.headers)
except ValueError as exc:
raise webob.exc.HTTPNotAcceptable(
_('Invalid microversion: %(error)s') % {'error': exc},
json_formatter=util.json_error_formatter)
except TypeError as exc:
raise webob.exc.HTTPBadRequest(
_('Invalid microversion: %(error)s') % {'error': exc},
json_formatter=util.json_error_formatter)
req.environ[MICROVERSION_ENVIRON] = microversion
microversion_header = '%s %s' % (SERVICE_TYPE, microversion)
try:
response = req.get_response(self.application)
except webob.exc.HTTPError as exc:
exc.headers.add(Version.HEADER, microversion_header)
raise exc
response.headers.add(Version.HEADER, microversion_header)
response.headers.add('vary', Version.HEADER)
return response
class Version(collections.namedtuple('Version', 'major minor')):
HEADER = 'OpenStack-API-Version'
MIN_VERSION = None
MAX_VERSION = None
def __str__(self):
return '%s.%s' % (self.major, self.minor)
@property
def max_version(self):
if not self.MAX_VERSION:
self.MAX_VERSION = parse_version_string(max_version_string())
return self.MAX_VERSION
@property
def min_version(self):
if not self.MIN_VERSION:
self.MIN_VERSION = parse_version_string(min_version_string())
return self.MIN_VERSION
def matches(self, min_version=None, max_version=None):
if min_version is None:
min_version = self.min_version
if max_version is None:
max_version = self.max_version
return min_version <= self <= max_version
def extract_version(headers):
found_version = microversion_parse.get_version(headers,
service_type=SERVICE_TYPE)
version_string = found_version or min_version_string()
request_version = parse_version_string(version_string)
if (str(request_version) in VERSIONS and request_version.matches()):
return request_version
raise ValueError('Unacceptable version header: %s' % version_string)
def _fully_qualified_name(obj):
try:
name = obj.__qualname__
except AttributeError:
name = obj.__name__
if inspect.isclass(obj) or inspect.isfunction(obj):
moduleName = obj.__module__
return "%s.%s" % (moduleName, name)
elif inspect.ismethod(obj):
try:
cls = obj.im_class
except AttributeError:
return "%s.%s" % (obj.__module__, obj.__qualname__)
else:
className = _fully_qualified_name(cls)
return "%s.%s" % (className, name)
return name
def _find_method(f, version):
qualified_name = _fully_qualified_name(f)
method_list = VERSIONED_METHODS.get(qualified_name, [])
for min_version, max_version, func in method_list:
if min_version <= version <= max_version:
return func
raise webob.exc.HTTPNotFound()
def version_handler(min_ver, max_ver=None):
def decorator(f):
min_version = parse_version_string(min_ver)
if max_ver:
max_version = parse_version_string(max_ver)
else:
max_version = parse_version_string(max_version_string())
qualified_name = _fully_qualified_name(f)
VERSIONED_METHODS[qualified_name].append(
(min_version, max_version, f))
def decorated_func(req, *args, **kwargs):
version = req.environ[MICROVERSION_ENVIRON]
return _find_method(f, version)(req, *args, **kwargs)
VERSIONED_METHODS[qualified_name].sort(key=lambda x: x[0],
reverse=True)
return decorated_func
return decorator
| true | true |
1c49e3447e6cad31e2cefda415baed1335d3fa12 | 16,213 | py | Python | homeassistant/const.py | 84KaliPleXon3/home-assistant-core | 7194b74580535395b5f100de98643e029bd0f1b6 | [
"Apache-2.0"
] | 2 | 2021-09-13T21:44:02.000Z | 2021-12-17T21:20:51.000Z | homeassistant/const.py | 84KaliPleXon3/home-assistant-core | 7194b74580535395b5f100de98643e029bd0f1b6 | [
"Apache-2.0"
] | 4 | 2021-02-08T20:47:39.000Z | 2022-03-12T00:33:22.000Z | homeassistant/const.py | 84KaliPleXon3/home-assistant-core | 7194b74580535395b5f100de98643e029bd0f1b6 | [
"Apache-2.0"
] | 2 | 2020-11-04T07:40:01.000Z | 2021-09-13T21:44:03.000Z | """Constants used by Home Assistant components."""
MAJOR_VERSION = 0
MINOR_VERSION = 110
PATCH_VERSION = "4"
__short_version__ = f"{MAJOR_VERSION}.{MINOR_VERSION}"
__version__ = f"{__short_version__}.{PATCH_VERSION}"
REQUIRED_PYTHON_VER = (3, 7, 0)
# Truthy date string triggers showing related deprecation warning messages.
REQUIRED_NEXT_PYTHON_VER = (3, 8, 0)
REQUIRED_NEXT_PYTHON_DATE = ""
# Format for platform files
PLATFORM_FORMAT = "{platform}.{domain}"
# Can be used to specify a catch all when registering state or event listeners.
MATCH_ALL = "*"
# Entity target all constant
ENTITY_MATCH_NONE = "none"
ENTITY_MATCH_ALL = "all"
# If no name is specified
DEVICE_DEFAULT_NAME = "Unnamed Device"
# Sun events
SUN_EVENT_SUNSET = "sunset"
SUN_EVENT_SUNRISE = "sunrise"
# #### CONFIG ####
CONF_ABOVE = "above"
CONF_ACCESS_TOKEN = "access_token"
CONF_ADDRESS = "address"
CONF_AFTER = "after"
CONF_ALIAS = "alias"
CONF_API_KEY = "api_key"
CONF_API_VERSION = "api_version"
CONF_ARMING_TIME = "arming_time"
CONF_AT = "at"
CONF_AUTH_MFA_MODULES = "auth_mfa_modules"
CONF_AUTH_PROVIDERS = "auth_providers"
CONF_AUTHENTICATION = "authentication"
CONF_BASE = "base"
CONF_BEFORE = "before"
CONF_BELOW = "below"
CONF_BINARY_SENSORS = "binary_sensors"
CONF_BLACKLIST = "blacklist"
CONF_BRIGHTNESS = "brightness"
CONF_BROADCAST_ADDRESS = "broadcast_address"
CONF_CLIENT_ID = "client_id"
CONF_CLIENT_SECRET = "client_secret"
CONF_CODE = "code"
CONF_COLOR_TEMP = "color_temp"
CONF_COMMAND = "command"
CONF_COMMAND_CLOSE = "command_close"
CONF_COMMAND_OFF = "command_off"
CONF_COMMAND_ON = "command_on"
CONF_COMMAND_OPEN = "command_open"
CONF_COMMAND_STATE = "command_state"
CONF_COMMAND_STOP = "command_stop"
CONF_CONDITION = "condition"
CONF_CONTINUE_ON_TIMEOUT = "continue_on_timeout"
CONF_COVERS = "covers"
CONF_CURRENCY = "currency"
CONF_CUSTOMIZE = "customize"
CONF_CUSTOMIZE_DOMAIN = "customize_domain"
CONF_CUSTOMIZE_GLOB = "customize_glob"
CONF_DELAY = "delay"
CONF_DELAY_TIME = "delay_time"
CONF_DEVICE = "device"
CONF_DEVICE_CLASS = "device_class"
CONF_DEVICE_ID = "device_id"
CONF_DEVICES = "devices"
CONF_DISARM_AFTER_TRIGGER = "disarm_after_trigger"
CONF_DISCOVERY = "discovery"
CONF_DISKS = "disks"
CONF_DISPLAY_CURRENCY = "display_currency"
CONF_DISPLAY_OPTIONS = "display_options"
CONF_DOMAIN = "domain"
CONF_DOMAINS = "domains"
CONF_EFFECT = "effect"
CONF_ELEVATION = "elevation"
CONF_EMAIL = "email"
CONF_ENTITIES = "entities"
CONF_ENTITY_ID = "entity_id"
CONF_ENTITY_NAMESPACE = "entity_namespace"
CONF_ENTITY_PICTURE_TEMPLATE = "entity_picture_template"
CONF_EVENT = "event"
CONF_EVENT_DATA = "event_data"
CONF_EVENT_DATA_TEMPLATE = "event_data_template"
CONF_EXCLUDE = "exclude"
CONF_EXTERNAL_URL = "external_url"
CONF_FILE_PATH = "file_path"
CONF_FILENAME = "filename"
CONF_FOR = "for"
CONF_FORCE_UPDATE = "force_update"
CONF_FRIENDLY_NAME = "friendly_name"
CONF_FRIENDLY_NAME_TEMPLATE = "friendly_name_template"
CONF_HEADERS = "headers"
CONF_HOST = "host"
CONF_HOSTS = "hosts"
CONF_HS = "hs"
CONF_ICON = "icon"
CONF_ICON_TEMPLATE = "icon_template"
CONF_ID = "id"
CONF_INCLUDE = "include"
CONF_INTERNAL_URL = "internal_url"
CONF_IP_ADDRESS = "ip_address"
CONF_LATITUDE = "latitude"
CONF_LIGHTS = "lights"
CONF_LONGITUDE = "longitude"
CONF_MAC = "mac"
CONF_MAXIMUM = "maximum"
CONF_METHOD = "method"
CONF_MINIMUM = "minimum"
CONF_MODE = "mode"
CONF_MONITORED_CONDITIONS = "monitored_conditions"
CONF_MONITORED_VARIABLES = "monitored_variables"
CONF_NAME = "name"
CONF_OFFSET = "offset"
CONF_OPTIMISTIC = "optimistic"
CONF_PACKAGES = "packages"
CONF_PASSWORD = "password"
CONF_PATH = "path"
CONF_PAYLOAD = "payload"
CONF_PAYLOAD_OFF = "payload_off"
CONF_PAYLOAD_ON = "payload_on"
CONF_PENDING_TIME = "pending_time"
CONF_PIN = "pin"
CONF_PLATFORM = "platform"
CONF_PORT = "port"
CONF_PREFIX = "prefix"
CONF_PROFILE_NAME = "profile_name"
CONF_PROTOCOL = "protocol"
CONF_PROXY_SSL = "proxy_ssl"
CONF_QUOTE = "quote"
CONF_RADIUS = "radius"
CONF_RECIPIENT = "recipient"
CONF_REGION = "region"
CONF_RESOURCE = "resource"
CONF_RESOURCE_TEMPLATE = "resource_template"
CONF_RESOURCES = "resources"
CONF_RGB = "rgb"
CONF_ROOM = "room"
CONF_SCAN_INTERVAL = "scan_interval"
CONF_SCENE = "scene"
CONF_SENDER = "sender"
CONF_SENSOR_TYPE = "sensor_type"
CONF_SENSORS = "sensors"
CONF_SERVICE = "service"
CONF_SERVICE_DATA = "data"
CONF_SERVICE_TEMPLATE = "service_template"
CONF_SHOW_ON_MAP = "show_on_map"
CONF_SLAVE = "slave"
CONF_SOURCE = "source"
CONF_SSL = "ssl"
CONF_STATE = "state"
CONF_STATE_TEMPLATE = "state_template"
CONF_STRUCTURE = "structure"
CONF_SWITCHES = "switches"
CONF_TEMPERATURE_UNIT = "temperature_unit"
CONF_TIME_ZONE = "time_zone"
CONF_TIMEOUT = "timeout"
CONF_TOKEN = "token"
CONF_TRIGGER_TIME = "trigger_time"
CONF_TTL = "ttl"
CONF_TYPE = "type"
CONF_UNIT_OF_MEASUREMENT = "unit_of_measurement"
CONF_UNIT_SYSTEM = "unit_system"
CONF_URL = "url"
CONF_USERNAME = "username"
CONF_VALUE_TEMPLATE = "value_template"
CONF_VERIFY_SSL = "verify_ssl"
CONF_WAIT_TEMPLATE = "wait_template"
CONF_WEBHOOK_ID = "webhook_id"
CONF_WEEKDAY = "weekday"
CONF_WHITE_VALUE = "white_value"
CONF_WHITELIST = "whitelist"
CONF_WHITELIST_EXTERNAL_DIRS = "whitelist_external_dirs"
CONF_XY = "xy"
CONF_ZONE = "zone"
# #### EVENTS ####
EVENT_AUTOMATION_TRIGGERED = "automation_triggered"
EVENT_CALL_SERVICE = "call_service"
EVENT_COMPONENT_LOADED = "component_loaded"
EVENT_CORE_CONFIG_UPDATE = "core_config_updated"
EVENT_HOMEASSISTANT_CLOSE = "homeassistant_close"
EVENT_HOMEASSISTANT_START = "homeassistant_start"
EVENT_HOMEASSISTANT_STARTED = "homeassistant_started"
EVENT_HOMEASSISTANT_STOP = "homeassistant_stop"
EVENT_HOMEASSISTANT_FINAL_WRITE = "homeassistant_final_write"
EVENT_LOGBOOK_ENTRY = "logbook_entry"
EVENT_PLATFORM_DISCOVERED = "platform_discovered"
EVENT_SCRIPT_STARTED = "script_started"
EVENT_SERVICE_REGISTERED = "service_registered"
EVENT_SERVICE_REMOVED = "service_removed"
EVENT_STATE_CHANGED = "state_changed"
EVENT_THEMES_UPDATED = "themes_updated"
EVENT_TIMER_OUT_OF_SYNC = "timer_out_of_sync"
EVENT_TIME_CHANGED = "time_changed"
# #### DEVICE CLASSES ####
DEVICE_CLASS_BATTERY = "battery"
DEVICE_CLASS_HUMIDITY = "humidity"
DEVICE_CLASS_ILLUMINANCE = "illuminance"
DEVICE_CLASS_SIGNAL_STRENGTH = "signal_strength"
DEVICE_CLASS_TEMPERATURE = "temperature"
DEVICE_CLASS_TIMESTAMP = "timestamp"
DEVICE_CLASS_PRESSURE = "pressure"
DEVICE_CLASS_POWER = "power"
# #### STATES ####
STATE_ON = "on"
STATE_OFF = "off"
STATE_HOME = "home"
STATE_NOT_HOME = "not_home"
STATE_UNKNOWN = "unknown"
STATE_OPEN = "open"
STATE_OPENING = "opening"
STATE_CLOSED = "closed"
STATE_CLOSING = "closing"
STATE_PLAYING = "playing"
STATE_PAUSED = "paused"
STATE_IDLE = "idle"
STATE_STANDBY = "standby"
STATE_ALARM_DISARMED = "disarmed"
STATE_ALARM_ARMED_HOME = "armed_home"
STATE_ALARM_ARMED_AWAY = "armed_away"
STATE_ALARM_ARMED_NIGHT = "armed_night"
STATE_ALARM_ARMED_CUSTOM_BYPASS = "armed_custom_bypass"
STATE_ALARM_PENDING = "pending"
STATE_ALARM_ARMING = "arming"
STATE_ALARM_DISARMING = "disarming"
STATE_ALARM_TRIGGERED = "triggered"
STATE_LOCKED = "locked"
STATE_UNLOCKED = "unlocked"
STATE_UNAVAILABLE = "unavailable"
STATE_OK = "ok"
STATE_PROBLEM = "problem"
# #### STATE AND EVENT ATTRIBUTES ####
# Attribution
ATTR_ATTRIBUTION = "attribution"
# Credentials
ATTR_CREDENTIALS = "credentials"
# Contains time-related attributes
ATTR_NOW = "now"
ATTR_DATE = "date"
ATTR_TIME = "time"
ATTR_SECONDS = "seconds"
# Contains domain, service for a SERVICE_CALL event
ATTR_DOMAIN = "domain"
ATTR_SERVICE = "service"
ATTR_SERVICE_DATA = "service_data"
# IDs
ATTR_ID = "id"
# Name
ATTR_NAME = "name"
# Contains one string or a list of strings, each being an entity id
ATTR_ENTITY_ID = "entity_id"
# Contains one string or a list of strings, each being an area id
ATTR_AREA_ID = "area_id"
# String with a friendly name for the entity
ATTR_FRIENDLY_NAME = "friendly_name"
# A picture to represent entity
ATTR_ENTITY_PICTURE = "entity_picture"
# Icon to use in the frontend
ATTR_ICON = "icon"
# The unit of measurement if applicable
ATTR_UNIT_OF_MEASUREMENT = "unit_of_measurement"
CONF_UNIT_SYSTEM_METRIC: str = "metric"
CONF_UNIT_SYSTEM_IMPERIAL: str = "imperial"
# Electrical attributes
ATTR_VOLTAGE = "voltage"
# Contains the information that is discovered
ATTR_DISCOVERED = "discovered"
# Location of the device/sensor
ATTR_LOCATION = "location"
ATTR_MODE = "mode"
ATTR_BATTERY_CHARGING = "battery_charging"
ATTR_BATTERY_LEVEL = "battery_level"
ATTR_WAKEUP = "wake_up_interval"
# For devices which support a code attribute
ATTR_CODE = "code"
ATTR_CODE_FORMAT = "code_format"
# For calling a device specific command
ATTR_COMMAND = "command"
# For devices which support an armed state
ATTR_ARMED = "device_armed"
# For devices which support a locked state
ATTR_LOCKED = "locked"
# For sensors that support 'tripping', eg. motion and door sensors
ATTR_TRIPPED = "device_tripped"
# For sensors that support 'tripping' this holds the most recent
# time the device was tripped
ATTR_LAST_TRIP_TIME = "last_tripped_time"
# For all entity's, this hold whether or not it should be hidden
ATTR_HIDDEN = "hidden"
# Location of the entity
ATTR_LATITUDE = "latitude"
ATTR_LONGITUDE = "longitude"
# Accuracy of location in meters
ATTR_GPS_ACCURACY = "gps_accuracy"
# If state is assumed
ATTR_ASSUMED_STATE = "assumed_state"
ATTR_STATE = "state"
ATTR_EDITABLE = "editable"
ATTR_OPTION = "option"
# Bitfield of supported component features for the entity
ATTR_SUPPORTED_FEATURES = "supported_features"
# Class of device within its domain
ATTR_DEVICE_CLASS = "device_class"
# Temperature attribute
ATTR_TEMPERATURE = "temperature"
# #### UNITS OF MEASUREMENT ####
# Power units
POWER_WATT = "W"
POWER_KILO_WATT = f"k{POWER_WATT}"
# Voltage units
VOLT = "V"
# Energy units
ENERGY_WATT_HOUR = f"{POWER_WATT}h"
ENERGY_KILO_WATT_HOUR = f"k{ENERGY_WATT_HOUR}"
# Degree units
DEGREE = "°"
# Temperature units
TEMP_CELSIUS = f"{DEGREE}C"
TEMP_FAHRENHEIT = f"{DEGREE}F"
TEMP_KELVIN = f"{DEGREE}K"
# Time units
TIME_MICROSECONDS = "μs"
TIME_MILLISECONDS = "ms"
TIME_SECONDS = "s"
TIME_MINUTES = "min"
TIME_HOURS = "h"
TIME_DAYS = "d"
TIME_WEEKS = "w"
TIME_MONTHS = "m"
TIME_YEARS = "y"
# Length units
LENGTH_CENTIMETERS: str = "cm"
LENGTH_METERS: str = "m"
LENGTH_KILOMETERS: str = "km"
LENGTH_INCHES: str = "in"
LENGTH_FEET: str = "ft"
LENGTH_YARD: str = "yd"
LENGTH_MILES: str = "mi"
# Frequency units
FREQUENCY_HERTZ = "Hz"
FREQUENCY_GIGAHERTZ = f"G{FREQUENCY_HERTZ}"
# Pressure units
PRESSURE_PA: str = "Pa"
PRESSURE_HPA: str = "hPa"
PRESSURE_BAR: str = "bar"
PRESSURE_MBAR: str = "mbar"
PRESSURE_INHG: str = "inHg"
PRESSURE_PSI: str = "psi"
# Volume units
VOLUME_LITERS: str = "L"
VOLUME_MILLILITERS: str = "mL"
VOLUME_CUBIC_METERS = f"{LENGTH_METERS}³"
VOLUME_GALLONS: str = "gal"
VOLUME_FLUID_OUNCE: str = "fl. oz."
# Area units
AREA_SQUARE_METERS = f"{LENGTH_METERS}²"
# Mass units
MASS_GRAMS: str = "g"
MASS_KILOGRAMS: str = "kg"
MASS_MILLIGRAMS = "mg"
MASS_MICROGRAMS = "µg"
MASS_OUNCES: str = "oz"
MASS_POUNDS: str = "lb"
# Conductivity units
CONDUCTIVITY: str = f"µS/{LENGTH_CENTIMETERS}"
# UV Index units
UV_INDEX: str = "UV index"
# Percentage units
UNIT_PERCENTAGE = "%"
# Irradiation units
IRRADIATION_WATTS_PER_SQUARE_METER = f"{POWER_WATT}/{AREA_SQUARE_METERS}"
# Concentration units
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER = f"{MASS_MICROGRAMS}/{VOLUME_CUBIC_METERS}"
CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER = f"{MASS_MILLIGRAMS}/{VOLUME_CUBIC_METERS}"
CONCENTRATION_PARTS_PER_MILLION = "ppm"
CONCENTRATION_PARTS_PER_BILLION = "ppb"
# Speed units
SPEED_METERS_PER_SECOND = f"{LENGTH_METERS}/{TIME_SECONDS}"
SPEED_KILOMETERS_PER_HOUR = f"{LENGTH_KILOMETERS}/{TIME_HOURS}"
SPEED_MILES_PER_HOUR = "mph"
# Data units
DATA_BITS = "bit"
DATA_KILOBITS = "kbit"
DATA_MEGABITS = "Mbit"
DATA_GIGABITS = "Gbit"
DATA_BYTES = "B"
DATA_KILOBYTES = "kB"
DATA_MEGABYTES = "MB"
DATA_GIGABYTES = "GB"
DATA_TERABYTES = "TB"
DATA_PETABYTES = "PB"
DATA_EXABYTES = "EB"
DATA_ZETTABYTES = "ZB"
DATA_YOTTABYTES = "YB"
DATA_KIBIBYTES = "KiB"
DATA_MEBIBYTES = "MiB"
DATA_GIBIBYTES = "GiB"
DATA_TEBIBYTES = "TiB"
DATA_PEBIBYTES = "PiB"
DATA_EXBIBYTES = "EiB"
DATA_ZEBIBYTES = "ZiB"
DATA_YOBIBYTES = "YiB"
DATA_RATE_BITS_PER_SECOND = f"{DATA_BITS}/{TIME_SECONDS}"
DATA_RATE_KILOBITS_PER_SECOND = f"{DATA_KILOBITS}/{TIME_SECONDS}"
DATA_RATE_MEGABITS_PER_SECOND = f"{DATA_MEGABITS}/{TIME_SECONDS}"
DATA_RATE_GIGABITS_PER_SECOND = f"{DATA_GIGABITS}/{TIME_SECONDS}"
DATA_RATE_BYTES_PER_SECOND = f"{DATA_BYTES}/{TIME_SECONDS}"
DATA_RATE_KILOBYTES_PER_SECOND = f"{DATA_KILOBYTES}/{TIME_SECONDS}"
DATA_RATE_MEGABYTES_PER_SECOND = f"{DATA_MEGABYTES}/{TIME_SECONDS}"
DATA_RATE_GIGABYTES_PER_SECOND = f"{DATA_GIGABYTES}/{TIME_SECONDS}"
DATA_RATE_KIBIBYTES_PER_SECOND = f"{DATA_KIBIBYTES}/{TIME_SECONDS}"
DATA_RATE_MEBIBYTES_PER_SECOND = f"{DATA_MEBIBYTES}/{TIME_SECONDS}"
DATA_RATE_GIBIBYTES_PER_SECOND = f"{DATA_GIBIBYTES}/{TIME_SECONDS}"
# #### SERVICES ####
SERVICE_HOMEASSISTANT_STOP = "stop"
SERVICE_HOMEASSISTANT_RESTART = "restart"
SERVICE_TURN_ON = "turn_on"
SERVICE_TURN_OFF = "turn_off"
SERVICE_TOGGLE = "toggle"
SERVICE_RELOAD = "reload"
SERVICE_VOLUME_UP = "volume_up"
SERVICE_VOLUME_DOWN = "volume_down"
SERVICE_VOLUME_MUTE = "volume_mute"
SERVICE_VOLUME_SET = "volume_set"
SERVICE_MEDIA_PLAY_PAUSE = "media_play_pause"
SERVICE_MEDIA_PLAY = "media_play"
SERVICE_MEDIA_PAUSE = "media_pause"
SERVICE_MEDIA_STOP = "media_stop"
SERVICE_MEDIA_NEXT_TRACK = "media_next_track"
SERVICE_MEDIA_PREVIOUS_TRACK = "media_previous_track"
SERVICE_MEDIA_SEEK = "media_seek"
SERVICE_SHUFFLE_SET = "shuffle_set"
SERVICE_ALARM_DISARM = "alarm_disarm"
SERVICE_ALARM_ARM_HOME = "alarm_arm_home"
SERVICE_ALARM_ARM_AWAY = "alarm_arm_away"
SERVICE_ALARM_ARM_NIGHT = "alarm_arm_night"
SERVICE_ALARM_ARM_CUSTOM_BYPASS = "alarm_arm_custom_bypass"
SERVICE_ALARM_TRIGGER = "alarm_trigger"
SERVICE_LOCK = "lock"
SERVICE_UNLOCK = "unlock"
SERVICE_OPEN = "open"
SERVICE_CLOSE = "close"
SERVICE_CLOSE_COVER = "close_cover"
SERVICE_CLOSE_COVER_TILT = "close_cover_tilt"
SERVICE_OPEN_COVER = "open_cover"
SERVICE_OPEN_COVER_TILT = "open_cover_tilt"
SERVICE_SET_COVER_POSITION = "set_cover_position"
SERVICE_SET_COVER_TILT_POSITION = "set_cover_tilt_position"
SERVICE_STOP_COVER = "stop_cover"
SERVICE_STOP_COVER_TILT = "stop_cover_tilt"
SERVICE_TOGGLE_COVER_TILT = "toggle_cover_tilt"
SERVICE_SELECT_OPTION = "select_option"
# #### API / REMOTE ####
SERVER_PORT = 8123
URL_ROOT = "/"
URL_API = "/api/"
URL_API_STREAM = "/api/stream"
URL_API_CONFIG = "/api/config"
URL_API_DISCOVERY_INFO = "/api/discovery_info"
URL_API_STATES = "/api/states"
URL_API_STATES_ENTITY = "/api/states/{}"
URL_API_EVENTS = "/api/events"
URL_API_EVENTS_EVENT = "/api/events/{}"
URL_API_SERVICES = "/api/services"
URL_API_SERVICES_SERVICE = "/api/services/{}/{}"
URL_API_COMPONENTS = "/api/components"
URL_API_ERROR_LOG = "/api/error_log"
URL_API_LOG_OUT = "/api/log_out"
URL_API_TEMPLATE = "/api/template"
HTTP_OK = 200
HTTP_CREATED = 201
HTTP_MOVED_PERMANENTLY = 301
HTTP_BAD_REQUEST = 400
HTTP_UNAUTHORIZED = 401
HTTP_FORBIDDEN = 403
HTTP_NOT_FOUND = 404
HTTP_METHOD_NOT_ALLOWED = 405
HTTP_UNPROCESSABLE_ENTITY = 422
HTTP_TOO_MANY_REQUESTS = 429
HTTP_INTERNAL_SERVER_ERROR = 500
HTTP_SERVICE_UNAVAILABLE = 503
HTTP_BASIC_AUTHENTICATION = "basic"
HTTP_DIGEST_AUTHENTICATION = "digest"
HTTP_HEADER_X_REQUESTED_WITH = "X-Requested-With"
CONTENT_TYPE_JSON = "application/json"
CONTENT_TYPE_MULTIPART = "multipart/x-mixed-replace; boundary={}"
CONTENT_TYPE_TEXT_PLAIN = "text/plain"
# The exit code to send to request a restart
RESTART_EXIT_CODE = 100
UNIT_NOT_RECOGNIZED_TEMPLATE: str = "{} is not a recognized {} unit."
LENGTH: str = "length"
MASS: str = "mass"
PRESSURE: str = "pressure"
VOLUME: str = "volume"
TEMPERATURE: str = "temperature"
SPEED_MS: str = "speed_ms"
ILLUMINANCE: str = "illuminance"
WEEKDAYS = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"]
# The degree of precision for platforms
PRECISION_WHOLE = 1
PRECISION_HALVES = 0.5
PRECISION_TENTHS = 0.1
# Static list of entities that will never be exposed to
# cloud, alexa, or google_home components
CLOUD_NEVER_EXPOSED_ENTITIES = ["group.all_locks"]
| 27.479661 | 85 | 0.784247 | MAJOR_VERSION = 0
MINOR_VERSION = 110
PATCH_VERSION = "4"
__short_version__ = f"{MAJOR_VERSION}.{MINOR_VERSION}"
__version__ = f"{__short_version__}.{PATCH_VERSION}"
REQUIRED_PYTHON_VER = (3, 7, 0)
REQUIRED_NEXT_PYTHON_VER = (3, 8, 0)
REQUIRED_NEXT_PYTHON_DATE = ""
PLATFORM_FORMAT = "{platform}.{domain}"
MATCH_ALL = "*"
ENTITY_MATCH_NONE = "none"
ENTITY_MATCH_ALL = "all"
DEVICE_DEFAULT_NAME = "Unnamed Device"
SUN_EVENT_SUNSET = "sunset"
SUN_EVENT_SUNRISE = "sunrise"
CONF_ABOVE = "above"
CONF_ACCESS_TOKEN = "access_token"
CONF_ADDRESS = "address"
CONF_AFTER = "after"
CONF_ALIAS = "alias"
CONF_API_KEY = "api_key"
CONF_API_VERSION = "api_version"
CONF_ARMING_TIME = "arming_time"
CONF_AT = "at"
CONF_AUTH_MFA_MODULES = "auth_mfa_modules"
CONF_AUTH_PROVIDERS = "auth_providers"
CONF_AUTHENTICATION = "authentication"
CONF_BASE = "base"
CONF_BEFORE = "before"
CONF_BELOW = "below"
CONF_BINARY_SENSORS = "binary_sensors"
CONF_BLACKLIST = "blacklist"
CONF_BRIGHTNESS = "brightness"
CONF_BROADCAST_ADDRESS = "broadcast_address"
CONF_CLIENT_ID = "client_id"
CONF_CLIENT_SECRET = "client_secret"
CONF_CODE = "code"
CONF_COLOR_TEMP = "color_temp"
CONF_COMMAND = "command"
CONF_COMMAND_CLOSE = "command_close"
CONF_COMMAND_OFF = "command_off"
CONF_COMMAND_ON = "command_on"
CONF_COMMAND_OPEN = "command_open"
CONF_COMMAND_STATE = "command_state"
CONF_COMMAND_STOP = "command_stop"
CONF_CONDITION = "condition"
CONF_CONTINUE_ON_TIMEOUT = "continue_on_timeout"
CONF_COVERS = "covers"
CONF_CURRENCY = "currency"
CONF_CUSTOMIZE = "customize"
CONF_CUSTOMIZE_DOMAIN = "customize_domain"
CONF_CUSTOMIZE_GLOB = "customize_glob"
CONF_DELAY = "delay"
CONF_DELAY_TIME = "delay_time"
CONF_DEVICE = "device"
CONF_DEVICE_CLASS = "device_class"
CONF_DEVICE_ID = "device_id"
CONF_DEVICES = "devices"
CONF_DISARM_AFTER_TRIGGER = "disarm_after_trigger"
CONF_DISCOVERY = "discovery"
CONF_DISKS = "disks"
CONF_DISPLAY_CURRENCY = "display_currency"
CONF_DISPLAY_OPTIONS = "display_options"
CONF_DOMAIN = "domain"
CONF_DOMAINS = "domains"
CONF_EFFECT = "effect"
CONF_ELEVATION = "elevation"
CONF_EMAIL = "email"
CONF_ENTITIES = "entities"
CONF_ENTITY_ID = "entity_id"
CONF_ENTITY_NAMESPACE = "entity_namespace"
CONF_ENTITY_PICTURE_TEMPLATE = "entity_picture_template"
CONF_EVENT = "event"
CONF_EVENT_DATA = "event_data"
CONF_EVENT_DATA_TEMPLATE = "event_data_template"
CONF_EXCLUDE = "exclude"
CONF_EXTERNAL_URL = "external_url"
CONF_FILE_PATH = "file_path"
CONF_FILENAME = "filename"
CONF_FOR = "for"
CONF_FORCE_UPDATE = "force_update"
CONF_FRIENDLY_NAME = "friendly_name"
CONF_FRIENDLY_NAME_TEMPLATE = "friendly_name_template"
CONF_HEADERS = "headers"
CONF_HOST = "host"
CONF_HOSTS = "hosts"
CONF_HS = "hs"
CONF_ICON = "icon"
CONF_ICON_TEMPLATE = "icon_template"
CONF_ID = "id"
CONF_INCLUDE = "include"
CONF_INTERNAL_URL = "internal_url"
CONF_IP_ADDRESS = "ip_address"
CONF_LATITUDE = "latitude"
CONF_LIGHTS = "lights"
CONF_LONGITUDE = "longitude"
CONF_MAC = "mac"
CONF_MAXIMUM = "maximum"
CONF_METHOD = "method"
CONF_MINIMUM = "minimum"
CONF_MODE = "mode"
CONF_MONITORED_CONDITIONS = "monitored_conditions"
CONF_MONITORED_VARIABLES = "monitored_variables"
CONF_NAME = "name"
CONF_OFFSET = "offset"
CONF_OPTIMISTIC = "optimistic"
CONF_PACKAGES = "packages"
CONF_PASSWORD = "password"
CONF_PATH = "path"
CONF_PAYLOAD = "payload"
CONF_PAYLOAD_OFF = "payload_off"
CONF_PAYLOAD_ON = "payload_on"
CONF_PENDING_TIME = "pending_time"
CONF_PIN = "pin"
CONF_PLATFORM = "platform"
CONF_PORT = "port"
CONF_PREFIX = "prefix"
CONF_PROFILE_NAME = "profile_name"
CONF_PROTOCOL = "protocol"
CONF_PROXY_SSL = "proxy_ssl"
CONF_QUOTE = "quote"
CONF_RADIUS = "radius"
CONF_RECIPIENT = "recipient"
CONF_REGION = "region"
CONF_RESOURCE = "resource"
CONF_RESOURCE_TEMPLATE = "resource_template"
CONF_RESOURCES = "resources"
CONF_RGB = "rgb"
CONF_ROOM = "room"
CONF_SCAN_INTERVAL = "scan_interval"
CONF_SCENE = "scene"
CONF_SENDER = "sender"
CONF_SENSOR_TYPE = "sensor_type"
CONF_SENSORS = "sensors"
CONF_SERVICE = "service"
CONF_SERVICE_DATA = "data"
CONF_SERVICE_TEMPLATE = "service_template"
CONF_SHOW_ON_MAP = "show_on_map"
CONF_SLAVE = "slave"
CONF_SOURCE = "source"
CONF_SSL = "ssl"
CONF_STATE = "state"
CONF_STATE_TEMPLATE = "state_template"
CONF_STRUCTURE = "structure"
CONF_SWITCHES = "switches"
CONF_TEMPERATURE_UNIT = "temperature_unit"
CONF_TIME_ZONE = "time_zone"
CONF_TIMEOUT = "timeout"
CONF_TOKEN = "token"
CONF_TRIGGER_TIME = "trigger_time"
CONF_TTL = "ttl"
CONF_TYPE = "type"
CONF_UNIT_OF_MEASUREMENT = "unit_of_measurement"
CONF_UNIT_SYSTEM = "unit_system"
CONF_URL = "url"
CONF_USERNAME = "username"
CONF_VALUE_TEMPLATE = "value_template"
CONF_VERIFY_SSL = "verify_ssl"
CONF_WAIT_TEMPLATE = "wait_template"
CONF_WEBHOOK_ID = "webhook_id"
CONF_WEEKDAY = "weekday"
CONF_WHITE_VALUE = "white_value"
CONF_WHITELIST = "whitelist"
CONF_WHITELIST_EXTERNAL_DIRS = "whitelist_external_dirs"
CONF_XY = "xy"
CONF_ZONE = "zone"
EVENT_AUTOMATION_TRIGGERED = "automation_triggered"
EVENT_CALL_SERVICE = "call_service"
EVENT_COMPONENT_LOADED = "component_loaded"
EVENT_CORE_CONFIG_UPDATE = "core_config_updated"
EVENT_HOMEASSISTANT_CLOSE = "homeassistant_close"
EVENT_HOMEASSISTANT_START = "homeassistant_start"
EVENT_HOMEASSISTANT_STARTED = "homeassistant_started"
EVENT_HOMEASSISTANT_STOP = "homeassistant_stop"
EVENT_HOMEASSISTANT_FINAL_WRITE = "homeassistant_final_write"
EVENT_LOGBOOK_ENTRY = "logbook_entry"
EVENT_PLATFORM_DISCOVERED = "platform_discovered"
EVENT_SCRIPT_STARTED = "script_started"
EVENT_SERVICE_REGISTERED = "service_registered"
EVENT_SERVICE_REMOVED = "service_removed"
EVENT_STATE_CHANGED = "state_changed"
EVENT_THEMES_UPDATED = "themes_updated"
EVENT_TIMER_OUT_OF_SYNC = "timer_out_of_sync"
EVENT_TIME_CHANGED = "time_changed"
DEVICE_CLASS_BATTERY = "battery"
DEVICE_CLASS_HUMIDITY = "humidity"
DEVICE_CLASS_ILLUMINANCE = "illuminance"
DEVICE_CLASS_SIGNAL_STRENGTH = "signal_strength"
DEVICE_CLASS_TEMPERATURE = "temperature"
DEVICE_CLASS_TIMESTAMP = "timestamp"
DEVICE_CLASS_PRESSURE = "pressure"
DEVICE_CLASS_POWER = "power"
STATE_ON = "on"
STATE_OFF = "off"
STATE_HOME = "home"
STATE_NOT_HOME = "not_home"
STATE_UNKNOWN = "unknown"
STATE_OPEN = "open"
STATE_OPENING = "opening"
STATE_CLOSED = "closed"
STATE_CLOSING = "closing"
STATE_PLAYING = "playing"
STATE_PAUSED = "paused"
STATE_IDLE = "idle"
STATE_STANDBY = "standby"
STATE_ALARM_DISARMED = "disarmed"
STATE_ALARM_ARMED_HOME = "armed_home"
STATE_ALARM_ARMED_AWAY = "armed_away"
STATE_ALARM_ARMED_NIGHT = "armed_night"
STATE_ALARM_ARMED_CUSTOM_BYPASS = "armed_custom_bypass"
STATE_ALARM_PENDING = "pending"
STATE_ALARM_ARMING = "arming"
STATE_ALARM_DISARMING = "disarming"
STATE_ALARM_TRIGGERED = "triggered"
STATE_LOCKED = "locked"
STATE_UNLOCKED = "unlocked"
STATE_UNAVAILABLE = "unavailable"
STATE_OK = "ok"
STATE_PROBLEM = "problem"
ATTR_ATTRIBUTION = "attribution"
ATTR_CREDENTIALS = "credentials"
ATTR_NOW = "now"
ATTR_DATE = "date"
ATTR_TIME = "time"
ATTR_SECONDS = "seconds"
ATTR_DOMAIN = "domain"
ATTR_SERVICE = "service"
ATTR_SERVICE_DATA = "service_data"
ATTR_ID = "id"
ATTR_NAME = "name"
ATTR_ENTITY_ID = "entity_id"
ATTR_AREA_ID = "area_id"
ATTR_FRIENDLY_NAME = "friendly_name"
ATTR_ENTITY_PICTURE = "entity_picture"
ATTR_ICON = "icon"
ATTR_UNIT_OF_MEASUREMENT = "unit_of_measurement"
CONF_UNIT_SYSTEM_METRIC: str = "metric"
CONF_UNIT_SYSTEM_IMPERIAL: str = "imperial"
ATTR_VOLTAGE = "voltage"
ATTR_DISCOVERED = "discovered"
ATTR_LOCATION = "location"
ATTR_MODE = "mode"
ATTR_BATTERY_CHARGING = "battery_charging"
ATTR_BATTERY_LEVEL = "battery_level"
ATTR_WAKEUP = "wake_up_interval"
ATTR_CODE = "code"
ATTR_CODE_FORMAT = "code_format"
ATTR_COMMAND = "command"
ATTR_ARMED = "device_armed"
ATTR_LOCKED = "locked"
ATTR_TRIPPED = "device_tripped"
ATTR_LAST_TRIP_TIME = "last_tripped_time"
ATTR_HIDDEN = "hidden"
# Location of the entity
ATTR_LATITUDE = "latitude"
ATTR_LONGITUDE = "longitude"
# Accuracy of location in meters
ATTR_GPS_ACCURACY = "gps_accuracy"
# If state is assumed
ATTR_ASSUMED_STATE = "assumed_state"
ATTR_STATE = "state"
ATTR_EDITABLE = "editable"
ATTR_OPTION = "option"
# Bitfield of supported component features for the entity
ATTR_SUPPORTED_FEATURES = "supported_features"
# Class of device within its domain
ATTR_DEVICE_CLASS = "device_class"
# Temperature attribute
ATTR_TEMPERATURE = "temperature"
# #### UNITS OF MEASUREMENT ####
# Power units
POWER_WATT = "W"
POWER_KILO_WATT = f"k{POWER_WATT}"
# Voltage units
VOLT = "V"
# Energy units
ENERGY_WATT_HOUR = f"{POWER_WATT}h"
ENERGY_KILO_WATT_HOUR = f"k{ENERGY_WATT_HOUR}"
# Degree units
DEGREE = "°"
# Temperature units
TEMP_CELSIUS = f"{DEGREE}C"
TEMP_FAHRENHEIT = f"{DEGREE}F"
TEMP_KELVIN = f"{DEGREE}K"
# Time units
TIME_MICROSECONDS = "μs"
TIME_MILLISECONDS = "ms"
TIME_SECONDS = "s"
TIME_MINUTES = "min"
TIME_HOURS = "h"
TIME_DAYS = "d"
TIME_WEEKS = "w"
TIME_MONTHS = "m"
TIME_YEARS = "y"
# Length units
LENGTH_CENTIMETERS: str = "cm"
LENGTH_METERS: str = "m"
LENGTH_KILOMETERS: str = "km"
LENGTH_INCHES: str = "in"
LENGTH_FEET: str = "ft"
LENGTH_YARD: str = "yd"
LENGTH_MILES: str = "mi"
# Frequency units
FREQUENCY_HERTZ = "Hz"
FREQUENCY_GIGAHERTZ = f"G{FREQUENCY_HERTZ}"
# Pressure units
PRESSURE_PA: str = "Pa"
PRESSURE_HPA: str = "hPa"
PRESSURE_BAR: str = "bar"
PRESSURE_MBAR: str = "mbar"
PRESSURE_INHG: str = "inHg"
PRESSURE_PSI: str = "psi"
# Volume units
VOLUME_LITERS: str = "L"
VOLUME_MILLILITERS: str = "mL"
VOLUME_CUBIC_METERS = f"{LENGTH_METERS}³"
VOLUME_GALLONS: str = "gal"
VOLUME_FLUID_OUNCE: str = "fl. oz."
# Area units
AREA_SQUARE_METERS = f"{LENGTH_METERS}²"
# Mass units
MASS_GRAMS: str = "g"
MASS_KILOGRAMS: str = "kg"
MASS_MILLIGRAMS = "mg"
MASS_MICROGRAMS = "µg"
MASS_OUNCES: str = "oz"
MASS_POUNDS: str = "lb"
# Conductivity units
CONDUCTIVITY: str = f"µS/{LENGTH_CENTIMETERS}"
# UV Index units
UV_INDEX: str = "UV index"
# Percentage units
UNIT_PERCENTAGE = "%"
# Irradiation units
IRRADIATION_WATTS_PER_SQUARE_METER = f"{POWER_WATT}/{AREA_SQUARE_METERS}"
# Concentration units
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER = f"{MASS_MICROGRAMS}/{VOLUME_CUBIC_METERS}"
CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER = f"{MASS_MILLIGRAMS}/{VOLUME_CUBIC_METERS}"
CONCENTRATION_PARTS_PER_MILLION = "ppm"
CONCENTRATION_PARTS_PER_BILLION = "ppb"
# Speed units
SPEED_METERS_PER_SECOND = f"{LENGTH_METERS}/{TIME_SECONDS}"
SPEED_KILOMETERS_PER_HOUR = f"{LENGTH_KILOMETERS}/{TIME_HOURS}"
SPEED_MILES_PER_HOUR = "mph"
# Data units
DATA_BITS = "bit"
DATA_KILOBITS = "kbit"
DATA_MEGABITS = "Mbit"
DATA_GIGABITS = "Gbit"
DATA_BYTES = "B"
DATA_KILOBYTES = "kB"
DATA_MEGABYTES = "MB"
DATA_GIGABYTES = "GB"
DATA_TERABYTES = "TB"
DATA_PETABYTES = "PB"
DATA_EXABYTES = "EB"
DATA_ZETTABYTES = "ZB"
DATA_YOTTABYTES = "YB"
DATA_KIBIBYTES = "KiB"
DATA_MEBIBYTES = "MiB"
DATA_GIBIBYTES = "GiB"
DATA_TEBIBYTES = "TiB"
DATA_PEBIBYTES = "PiB"
DATA_EXBIBYTES = "EiB"
DATA_ZEBIBYTES = "ZiB"
DATA_YOBIBYTES = "YiB"
DATA_RATE_BITS_PER_SECOND = f"{DATA_BITS}/{TIME_SECONDS}"
DATA_RATE_KILOBITS_PER_SECOND = f"{DATA_KILOBITS}/{TIME_SECONDS}"
DATA_RATE_MEGABITS_PER_SECOND = f"{DATA_MEGABITS}/{TIME_SECONDS}"
DATA_RATE_GIGABITS_PER_SECOND = f"{DATA_GIGABITS}/{TIME_SECONDS}"
DATA_RATE_BYTES_PER_SECOND = f"{DATA_BYTES}/{TIME_SECONDS}"
DATA_RATE_KILOBYTES_PER_SECOND = f"{DATA_KILOBYTES}/{TIME_SECONDS}"
DATA_RATE_MEGABYTES_PER_SECOND = f"{DATA_MEGABYTES}/{TIME_SECONDS}"
DATA_RATE_GIGABYTES_PER_SECOND = f"{DATA_GIGABYTES}/{TIME_SECONDS}"
DATA_RATE_KIBIBYTES_PER_SECOND = f"{DATA_KIBIBYTES}/{TIME_SECONDS}"
DATA_RATE_MEBIBYTES_PER_SECOND = f"{DATA_MEBIBYTES}/{TIME_SECONDS}"
DATA_RATE_GIBIBYTES_PER_SECOND = f"{DATA_GIBIBYTES}/{TIME_SECONDS}"
# #### SERVICES ####
SERVICE_HOMEASSISTANT_STOP = "stop"
SERVICE_HOMEASSISTANT_RESTART = "restart"
SERVICE_TURN_ON = "turn_on"
SERVICE_TURN_OFF = "turn_off"
SERVICE_TOGGLE = "toggle"
SERVICE_RELOAD = "reload"
SERVICE_VOLUME_UP = "volume_up"
SERVICE_VOLUME_DOWN = "volume_down"
SERVICE_VOLUME_MUTE = "volume_mute"
SERVICE_VOLUME_SET = "volume_set"
SERVICE_MEDIA_PLAY_PAUSE = "media_play_pause"
SERVICE_MEDIA_PLAY = "media_play"
SERVICE_MEDIA_PAUSE = "media_pause"
SERVICE_MEDIA_STOP = "media_stop"
SERVICE_MEDIA_NEXT_TRACK = "media_next_track"
SERVICE_MEDIA_PREVIOUS_TRACK = "media_previous_track"
SERVICE_MEDIA_SEEK = "media_seek"
SERVICE_SHUFFLE_SET = "shuffle_set"
SERVICE_ALARM_DISARM = "alarm_disarm"
SERVICE_ALARM_ARM_HOME = "alarm_arm_home"
SERVICE_ALARM_ARM_AWAY = "alarm_arm_away"
SERVICE_ALARM_ARM_NIGHT = "alarm_arm_night"
SERVICE_ALARM_ARM_CUSTOM_BYPASS = "alarm_arm_custom_bypass"
SERVICE_ALARM_TRIGGER = "alarm_trigger"
SERVICE_LOCK = "lock"
SERVICE_UNLOCK = "unlock"
SERVICE_OPEN = "open"
SERVICE_CLOSE = "close"
SERVICE_CLOSE_COVER = "close_cover"
SERVICE_CLOSE_COVER_TILT = "close_cover_tilt"
SERVICE_OPEN_COVER = "open_cover"
SERVICE_OPEN_COVER_TILT = "open_cover_tilt"
SERVICE_SET_COVER_POSITION = "set_cover_position"
SERVICE_SET_COVER_TILT_POSITION = "set_cover_tilt_position"
SERVICE_STOP_COVER = "stop_cover"
SERVICE_STOP_COVER_TILT = "stop_cover_tilt"
SERVICE_TOGGLE_COVER_TILT = "toggle_cover_tilt"
SERVICE_SELECT_OPTION = "select_option"
# #### API / REMOTE ####
SERVER_PORT = 8123
URL_ROOT = "/"
URL_API = "/api/"
URL_API_STREAM = "/api/stream"
URL_API_CONFIG = "/api/config"
URL_API_DISCOVERY_INFO = "/api/discovery_info"
URL_API_STATES = "/api/states"
URL_API_STATES_ENTITY = "/api/states/{}"
URL_API_EVENTS = "/api/events"
URL_API_EVENTS_EVENT = "/api/events/{}"
URL_API_SERVICES = "/api/services"
URL_API_SERVICES_SERVICE = "/api/services/{}/{}"
URL_API_COMPONENTS = "/api/components"
URL_API_ERROR_LOG = "/api/error_log"
URL_API_LOG_OUT = "/api/log_out"
URL_API_TEMPLATE = "/api/template"
HTTP_OK = 200
HTTP_CREATED = 201
HTTP_MOVED_PERMANENTLY = 301
HTTP_BAD_REQUEST = 400
HTTP_UNAUTHORIZED = 401
HTTP_FORBIDDEN = 403
HTTP_NOT_FOUND = 404
HTTP_METHOD_NOT_ALLOWED = 405
HTTP_UNPROCESSABLE_ENTITY = 422
HTTP_TOO_MANY_REQUESTS = 429
HTTP_INTERNAL_SERVER_ERROR = 500
HTTP_SERVICE_UNAVAILABLE = 503
HTTP_BASIC_AUTHENTICATION = "basic"
HTTP_DIGEST_AUTHENTICATION = "digest"
HTTP_HEADER_X_REQUESTED_WITH = "X-Requested-With"
CONTENT_TYPE_JSON = "application/json"
CONTENT_TYPE_MULTIPART = "multipart/x-mixed-replace; boundary={}"
CONTENT_TYPE_TEXT_PLAIN = "text/plain"
# The exit code to send to request a restart
RESTART_EXIT_CODE = 100
UNIT_NOT_RECOGNIZED_TEMPLATE: str = "{} is not a recognized {} unit."
LENGTH: str = "length"
MASS: str = "mass"
PRESSURE: str = "pressure"
VOLUME: str = "volume"
TEMPERATURE: str = "temperature"
SPEED_MS: str = "speed_ms"
ILLUMINANCE: str = "illuminance"
WEEKDAYS = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"]
# The degree of precision for platforms
PRECISION_WHOLE = 1
PRECISION_HALVES = 0.5
PRECISION_TENTHS = 0.1
# Static list of entities that will never be exposed to
# cloud, alexa, or google_home components
CLOUD_NEVER_EXPOSED_ENTITIES = ["group.all_locks"]
| true | true |
1c49e547ddbba557b3ea3778bdb44456db6149f8 | 10,716 | py | Python | astromodels/sources/extended_source.py | abtinshahidi/astromodels | 580e972ccc69f4fad57e22030923ee27f9d59ee3 | [
"BSD-3-Clause"
] | 1 | 2019-07-05T18:36:59.000Z | 2019-07-05T18:36:59.000Z | astromodels/sources/extended_source.py | abtinshahidi/astromodels | 580e972ccc69f4fad57e22030923ee27f9d59ee3 | [
"BSD-3-Clause"
] | null | null | null | astromodels/sources/extended_source.py | abtinshahidi/astromodels | 580e972ccc69f4fad57e22030923ee27f9d59ee3 | [
"BSD-3-Clause"
] | null | null | null | import collections
import astropy.units as u
import numpy as np
from astromodels.core.spectral_component import SpectralComponent
from astromodels.core.tree import Node
from astromodels.core.units import get_units
from astromodels.functions.functions import Constant
from astromodels.sources.source import Source, EXTENDED_SOURCE
from astromodels.utils.pretty_list import dict_to_list
class ExtendedSource(Source, Node):
def __init__(self, source_name, spatial_shape, spectral_shape=None, components=None):
# Check that we have all the required information
# and set the units
current_u = get_units()
if spatial_shape.n_dim == 2:
# Now gather the component(s)
# We need either a single component, or a list of components, but not both
# (that's the ^ symbol)
assert (spectral_shape is not None) ^ (components is not None), "You have to provide either a single " \
"component, or a list of components " \
"(but not both)."
# If the user specified only one component, make a list of one element with a default name ("main")
if spectral_shape is not None:
components = [SpectralComponent("main", spectral_shape)]
# Components in this case have energy as x and differential flux as y
diff_flux_units = (current_u.energy * current_u.area * current_u.time) ** (-1)
# Now set the units of the components
for component in components:
component.shape.set_units(current_u.energy, diff_flux_units)
# Set the units of the brightness
spatial_shape.set_units(current_u.angle, current_u.angle, current_u.angle**(-2))
elif spatial_shape.n_dim == 3:
# If there is no spectral component then assume that the input is a template, which will provide the
# spectrum by itself. We just use a renormalization (a bias)
if spectral_shape is None and components is None:
# This is a template. Add a component which is just a renormalization
spectral_shape = Constant()
components = [SpectralComponent("main", spectral_shape)]
# set the units
diff_flux_units = (current_u.energy * current_u.area * current_u.time *
current_u.angle**2) ** (-1)
spatial_shape.set_units(current_u.angle, current_u.angle, current_u.energy, diff_flux_units)
else:
# the spectral shape has been given, so this is a case where the spatial template gives an
# energy-dependent shape and the spectral components give the spectrum
assert (spectral_shape is not None) ^ (components is not None), "You can provide either a single " \
"component, or a list of components " \
"(but not both)."
if spectral_shape is not None:
components = [SpectralComponent("main", spectral_shape)]
# Assign units
diff_flux_units = (current_u.energy * current_u.area * current_u.time) ** (-1)
# Now set the units of the components
for component in components:
component.shape.set_units(current_u.energy, diff_flux_units)
# Set the unit of the spatial template
spatial_shape.set_units(current_u.angle, current_u.angle, current_u.energy, current_u.angle**(-2))
else:
raise RuntimeError("The spatial shape must have either 2 or 3 dimensions.")
# Here we have a list of components
Source.__init__(self, components, EXTENDED_SOURCE)
# A source is also a Node in the tree
Node.__init__(self, source_name)
# Add the spatial shape as a child node, with an explicit name
self._spatial_shape = spatial_shape
self._add_child(self._spatial_shape)
# Add the same node also with the name of the function
#self._add_child(self._shape, self._shape.__name__)
# Add a node called 'spectrum'
spectrum_node = Node('spectrum')
spectrum_node._add_children(self._components.values())
self._add_child(spectrum_node)
@property
def spatial_shape(self):
"""
A generic name for the spatial shape.
:return: the spatial shape instance
"""
return self._spatial_shape
def get_spatially_integrated_flux( self, energies):
"""
Returns total flux of source at the given energy
:param energies: energies (array or float)
:return: differential flux at given energy
"""
if not isinstance(energies, np.ndarray):
energies = np.array(energies, ndmin=1)
# Get the differential flux from the spectral components
results = [self.spatial_shape.get_total_spatial_integral(energies) * component.shape(energies) for component in self.components.values()]
if isinstance(energies, u.Quantity):
# Slow version with units
# We need to sum like this (slower) because using np.sum will not preserve the units
# (thanks astropy.units)
differential_flux = sum(results)
else:
# Fast version without units, where x is supposed to be in the same units as currently defined in
# units.get_units()
differential_flux = np.sum(results, 0)
return differential_flux
def __call__(self, lon, lat, energies):
"""
Returns brightness of source at the given position and energy
:param lon: longitude (array or float)
:param lat: latitude (array or float)
:param energies: energies (array or float)
:return: differential flux at given position and energy
"""
assert type(lat) == type(lon) and type(lon) == type(energies), "Type mismatch in input of call"
if not isinstance(lat, np.ndarray):
lat = np.array(lat, ndmin=1)
lon = np.array(lon, ndmin=1)
energies = np.array(energies, ndmin=1)
# Get the differential flux from the spectral components
results = [component.shape(energies) for component in self.components.values()]
if isinstance(energies, u.Quantity):
# Slow version with units
# We need to sum like this (slower) because using np.sum will not preserve the units
# (thanks astropy.units)
differential_flux = sum(results)
else:
# Fast version without units, where x is supposed to be in the same units as currently defined in
# units.get_units()
differential_flux = np.sum(results, 0)
# Get brightness from spatial model
if self._spatial_shape.n_dim == 2:
brightness = self._spatial_shape(lon, lat)
# In this case the spectrum is the same everywhere
n_points = lat.shape[0]
n_energies = differential_flux.shape[0]
# The following is a little obscure, but it is 6x faster than doing a for loop
cube = np.repeat(differential_flux, n_points).reshape(n_energies, n_points).T
result = (cube.T * brightness).T
else:
result = self._spatial_shape(lon, lat, energies) * differential_flux
# Do not clip the output, otherwise it will not be possible to use ext. sources
# with negative fluxes
return np.squeeze(result)
def has_free_parameters(self):
"""
Returns True or False whether there is any parameter in this source
:return:
"""
for component in self._components.values():
for par in component.shape.parameters.values():
if par.free:
return True
for par in self.spatial_shape.parameters.values():
if par.free:
return True
return False
@property
def free_parameters(self):
"""
Returns a dictionary of free parameters for this source
We use the parameter path as the key because it's
guaranteed to be unique, unlike the parameter name.
:return:
"""
free_parameters = collections.OrderedDict()
for component in self._components.values():
for par in component.shape.parameters.values():
if par.free:
free_parameters[par.path] = par
for par in self.spatial_shape.parameters.values():
if par.free:
free_parameters[par.path] = par
return free_parameters
@property
def parameters(self):
"""
Returns a dictionary of all parameters for this source.
We use the parameter path as the key because it's
guaranteed to be unique, unlike the parameter name.
:return:
"""
all_parameters = collections.OrderedDict()
for component in self._components.values():
for par in component.shape.parameters.values():
all_parameters[par.path] = par
for par in self.spatial_shape.parameters.values():
all_parameters[par.path] = par
return all_parameters
def _repr__base(self, rich_output=False):
"""
Representation of the object
:param rich_output: if True, generates HTML, otherwise text
:return: the representation
"""
# Make a dictionary which will then be transformed in a list
repr_dict = collections.OrderedDict()
key = '%s (extended source)' % self.name
repr_dict[key] = collections.OrderedDict()
repr_dict[key]['shape'] = self._spatial_shape.to_dict(minimal=True)
repr_dict[key]['spectrum'] = collections.OrderedDict()
for component_name, component in self.components.iteritems():
repr_dict[key]['spectrum'][component_name] = component.to_dict(minimal=True)
return dict_to_list(repr_dict, rich_output)
def get_boundaries(self):
"""
Returns the boundaries for this extended source
:return: a tuple of tuples ((min. lon, max. lon), (min lat, max lat))
"""
return self._spatial_shape.get_boundaries()
| 32.871166 | 145 | 0.60713 | import collections
import astropy.units as u
import numpy as np
from astromodels.core.spectral_component import SpectralComponent
from astromodels.core.tree import Node
from astromodels.core.units import get_units
from astromodels.functions.functions import Constant
from astromodels.sources.source import Source, EXTENDED_SOURCE
from astromodels.utils.pretty_list import dict_to_list
class ExtendedSource(Source, Node):
def __init__(self, source_name, spatial_shape, spectral_shape=None, components=None):
current_u = get_units()
if spatial_shape.n_dim == 2:
assert (spectral_shape is not None) ^ (components is not None), "You have to provide either a single " \
"component, or a list of components " \
"(but not both)."
# If the user specified only one component, make a list of one element with a default name ("main")
if spectral_shape is not None:
components = [SpectralComponent("main", spectral_shape)]
# Components in this case have energy as x and differential flux as y
diff_flux_units = (current_u.energy * current_u.area * current_u.time) ** (-1)
# Now set the units of the components
for component in components:
component.shape.set_units(current_u.energy, diff_flux_units)
# Set the units of the brightness
spatial_shape.set_units(current_u.angle, current_u.angle, current_u.angle**(-2))
elif spatial_shape.n_dim == 3:
# If there is no spectral component then assume that the input is a template, which will provide the
# spectrum by itself. We just use a renormalization (a bias)
if spectral_shape is None and components is None:
# This is a template. Add a component which is just a renormalization
spectral_shape = Constant()
components = [SpectralComponent("main", spectral_shape)]
# set the units
diff_flux_units = (current_u.energy * current_u.area * current_u.time *
current_u.angle**2) ** (-1)
spatial_shape.set_units(current_u.angle, current_u.angle, current_u.energy, diff_flux_units)
else:
# the spectral shape has been given, so this is a case where the spatial template gives an
# energy-dependent shape and the spectral components give the spectrum
assert (spectral_shape is not None) ^ (components is not None), "You can provide either a single " \
"component, or a list of components " \
"(but not both)."
if spectral_shape is not None:
components = [SpectralComponent("main", spectral_shape)]
# Assign units
diff_flux_units = (current_u.energy * current_u.area * current_u.time) ** (-1)
# Now set the units of the components
for component in components:
component.shape.set_units(current_u.energy, diff_flux_units)
# Set the unit of the spatial template
spatial_shape.set_units(current_u.angle, current_u.angle, current_u.energy, current_u.angle**(-2))
else:
raise RuntimeError("The spatial shape must have either 2 or 3 dimensions.")
# Here we have a list of components
Source.__init__(self, components, EXTENDED_SOURCE)
# A source is also a Node in the tree
Node.__init__(self, source_name)
# Add the spatial shape as a child node, with an explicit name
self._spatial_shape = spatial_shape
self._add_child(self._spatial_shape)
# Add the same node also with the name of the function
#self._add_child(self._shape, self._shape.__name__)
# Add a node called 'spectrum'
spectrum_node = Node('spectrum')
spectrum_node._add_children(self._components.values())
self._add_child(spectrum_node)
@property
def spatial_shape(self):
return self._spatial_shape
def get_spatially_integrated_flux( self, energies):
if not isinstance(energies, np.ndarray):
energies = np.array(energies, ndmin=1)
# Get the differential flux from the spectral components
results = [self.spatial_shape.get_total_spatial_integral(energies) * component.shape(energies) for component in self.components.values()]
if isinstance(energies, u.Quantity):
# Slow version with units
# We need to sum like this (slower) because using np.sum will not preserve the units
# (thanks astropy.units)
differential_flux = sum(results)
else:
# Fast version without units, where x is supposed to be in the same units as currently defined in
# units.get_units()
differential_flux = np.sum(results, 0)
return differential_flux
def __call__(self, lon, lat, energies):
assert type(lat) == type(lon) and type(lon) == type(energies), "Type mismatch in input of call"
if not isinstance(lat, np.ndarray):
lat = np.array(lat, ndmin=1)
lon = np.array(lon, ndmin=1)
energies = np.array(energies, ndmin=1)
# Get the differential flux from the spectral components
results = [component.shape(energies) for component in self.components.values()]
if isinstance(energies, u.Quantity):
# Slow version with units
# We need to sum like this (slower) because using np.sum will not preserve the units
# (thanks astropy.units)
differential_flux = sum(results)
else:
# Fast version without units, where x is supposed to be in the same units as currently defined in
# units.get_units()
differential_flux = np.sum(results, 0)
# Get brightness from spatial model
if self._spatial_shape.n_dim == 2:
brightness = self._spatial_shape(lon, lat)
# In this case the spectrum is the same everywhere
n_points = lat.shape[0]
n_energies = differential_flux.shape[0]
# The following is a little obscure, but it is 6x faster than doing a for loop
cube = np.repeat(differential_flux, n_points).reshape(n_energies, n_points).T
result = (cube.T * brightness).T
else:
result = self._spatial_shape(lon, lat, energies) * differential_flux
# Do not clip the output, otherwise it will not be possible to use ext. sources
# with negative fluxes
return np.squeeze(result)
def has_free_parameters(self):
for component in self._components.values():
for par in component.shape.parameters.values():
if par.free:
return True
for par in self.spatial_shape.parameters.values():
if par.free:
return True
return False
@property
def free_parameters(self):
free_parameters = collections.OrderedDict()
for component in self._components.values():
for par in component.shape.parameters.values():
if par.free:
free_parameters[par.path] = par
for par in self.spatial_shape.parameters.values():
if par.free:
free_parameters[par.path] = par
return free_parameters
@property
def parameters(self):
all_parameters = collections.OrderedDict()
for component in self._components.values():
for par in component.shape.parameters.values():
all_parameters[par.path] = par
for par in self.spatial_shape.parameters.values():
all_parameters[par.path] = par
return all_parameters
def _repr__base(self, rich_output=False):
# Make a dictionary which will then be transformed in a list
repr_dict = collections.OrderedDict()
key = '%s (extended source)' % self.name
repr_dict[key] = collections.OrderedDict()
repr_dict[key]['shape'] = self._spatial_shape.to_dict(minimal=True)
repr_dict[key]['spectrum'] = collections.OrderedDict()
for component_name, component in self.components.iteritems():
repr_dict[key]['spectrum'][component_name] = component.to_dict(minimal=True)
return dict_to_list(repr_dict, rich_output)
def get_boundaries(self):
return self._spatial_shape.get_boundaries()
| true | true |
1c49e550917e878195d1309e7b174aba630d18ef | 714 | py | Python | setup.py | M69k65y/endpoint-logger | 96aa2513271ad984bf015c959300f31f6c2acd52 | [
"MIT"
] | null | null | null | setup.py | M69k65y/endpoint-logger | 96aa2513271ad984bf015c959300f31f6c2acd52 | [
"MIT"
] | null | null | null | setup.py | M69k65y/endpoint-logger | 96aa2513271ad984bf015c959300f31f6c2acd52 | [
"MIT"
] | null | null | null | from setuptools import setup
def readme():
with open("readme.md") as f:
return f.read()
setup(
name = "endpoint-logger",
version = "1.0.1",
description = "A Python package used to track Flask API endpoint access. (Built in Python 3)",
url = "https://github.com/M69k65y/endpoint-logger",
author = "M69k65y",
license = "MIT",
packages = ["endpoint_logger"],
zip_safe=False,
install_requires = [
"flask"
],
classifiers = [
"Development Status :: 3 - Alpha",
"Framework :: Flask",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3"
],
keywords = "flask endpoint logger logging",
long_description = readme(),
long_description_content_type = "text/markdown"
) | 23.8 | 95 | 0.680672 | from setuptools import setup
def readme():
with open("readme.md") as f:
return f.read()
setup(
name = "endpoint-logger",
version = "1.0.1",
description = "A Python package used to track Flask API endpoint access. (Built in Python 3)",
url = "https://github.com/M69k65y/endpoint-logger",
author = "M69k65y",
license = "MIT",
packages = ["endpoint_logger"],
zip_safe=False,
install_requires = [
"flask"
],
classifiers = [
"Development Status :: 3 - Alpha",
"Framework :: Flask",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3"
],
keywords = "flask endpoint logger logging",
long_description = readme(),
long_description_content_type = "text/markdown"
) | true | true |
1c49e68ee19ec34fdae4c2bd75976597c975284e | 1,683 | py | Python | pype/modules/deadline/plugins/publish/validate_deadline_connection.py | simonebarbieri/pype | a6dc83aa1300738749cbe8e5e2e6d2d1794e0289 | [
"MIT"
] | null | null | null | pype/modules/deadline/plugins/publish/validate_deadline_connection.py | simonebarbieri/pype | a6dc83aa1300738749cbe8e5e2e6d2d1794e0289 | [
"MIT"
] | null | null | null | pype/modules/deadline/plugins/publish/validate_deadline_connection.py | simonebarbieri/pype | a6dc83aa1300738749cbe8e5e2e6d2d1794e0289 | [
"MIT"
] | null | null | null | import pyblish.api
from avalon.vendor import requests
from pype.plugin import contextplugin_should_run
import os
class ValidateDeadlineConnection(pyblish.api.ContextPlugin):
"""Validate Deadline Web Service is running"""
label = "Validate Deadline Web Service"
order = pyblish.api.ValidatorOrder
hosts = ["maya", "nuke"]
families = ["renderlayer"]
def process(self, context):
# Workaround bug pyblish-base#250
if not contextplugin_should_run(self, context):
return
deadline_url = (
context.data["system_settings"]
["modules"]
["deadline"]
["DEADLINE_REST_URL"]
)
# Check response
response = self._requests_get(deadline_url)
assert response.ok, "Response must be ok"
assert response.text.startswith("Deadline Web Service "), (
"Web service did not respond with 'Deadline Web Service'"
)
def _requests_get(self, *args, **kwargs):
""" Wrapper for requests, disabling SSL certificate validation if
DONT_VERIFY_SSL environment variable is found. This is useful when
Deadline or Muster server are running with self-signed certificates
and their certificate is not added to trusted certificates on
client machines.
WARNING: disabling SSL certificate validation is defeating one line
of defense SSL is providing and it is not recommended.
"""
if 'verify' not in kwargs:
kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa
return requests.get(*args, **kwargs)
| 34.346939 | 97 | 0.648247 | import pyblish.api
from avalon.vendor import requests
from pype.plugin import contextplugin_should_run
import os
class ValidateDeadlineConnection(pyblish.api.ContextPlugin):
label = "Validate Deadline Web Service"
order = pyblish.api.ValidatorOrder
hosts = ["maya", "nuke"]
families = ["renderlayer"]
def process(self, context):
if not contextplugin_should_run(self, context):
return
deadline_url = (
context.data["system_settings"]
["modules"]
["deadline"]
["DEADLINE_REST_URL"]
)
response = self._requests_get(deadline_url)
assert response.ok, "Response must be ok"
assert response.text.startswith("Deadline Web Service "), (
"Web service did not respond with 'Deadline Web Service'"
)
def _requests_get(self, *args, **kwargs):
if 'verify' not in kwargs:
kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True return requests.get(*args, **kwargs)
| true | true |
1c49e6c1969a6fefd977839e22d2106128182e9a | 1,684 | py | Python | waipawama/dag/taxdoo.py | elcolumbio/waipawama | 6ca23c3a2f35ba07762fb68d6ce115ff8f826903 | [
"MIT"
] | null | null | null | waipawama/dag/taxdoo.py | elcolumbio/waipawama | 6ca23c3a2f35ba07762fb68d6ce115ff8f826903 | [
"MIT"
] | null | null | null | waipawama/dag/taxdoo.py | elcolumbio/waipawama | 6ca23c3a2f35ba07762fb68d6ce115ff8f826903 | [
"MIT"
] | null | null | null | from airflow.decorators import dag, task
from airflow.operators.python import get_current_context
from airflow.operators.bash_operator import BashOperator
import datetime
from waipawama.models.taxdoo import TaxdooMeta
def get_timespan() -> str:
"""This is our main parameter in our monthly pipeline."""
context = get_current_context()
year_month = '-'.join(context['ds'].split('-')[:2]) # e.g. '2019-09'
return year_month
@dag(default_args={'owner': 'florian'},
schedule_interval='@monthly',
start_date=datetime.datetime(2018, 12, 1),
tags=['VAT'])
def taxdoo_dag():
"""Ingestion for Taxdoo."""
@task()
def taxdoo_external_file() -> str:
timespan = get_timespan() # e.g '2021-01'
meta = TaxdooMeta(timespan=timespan)
meta.DataFileExists # throws error if not
return timespan
@task()
def taxdoo_write_parquet(timespan) -> str:
meta = TaxdooMeta(timespan=timespan)
meta.save_as_parquet()
return timespan
@task()
def taxdoo_load_to_bigquery(timespan):
meta = TaxdooMeta(timespan=timespan)
if not meta.TableExists:
meta.create_table()
meta.update_table() # relaxation and add columns possible
meta.append_data()
return timespan
dbt_test = BashOperator(
task_id='dbt_test',
bash_command=('source ~/dbt-env/bin/activate && '
'cd ~/projects/accountant/ && dbt test'))
timespan = taxdoo_external_file()
timespan = taxdoo_write_parquet(timespan)
timespan = taxdoo_load_to_bigquery(timespan)
dbt_test.set_upstream(timespan)
taxdoo_etl_dag = taxdoo_dag()
| 30.071429 | 73 | 0.665677 | from airflow.decorators import dag, task
from airflow.operators.python import get_current_context
from airflow.operators.bash_operator import BashOperator
import datetime
from waipawama.models.taxdoo import TaxdooMeta
def get_timespan() -> str:
context = get_current_context()
year_month = '-'.join(context['ds'].split('-')[:2]) return year_month
@dag(default_args={'owner': 'florian'},
schedule_interval='@monthly',
start_date=datetime.datetime(2018, 12, 1),
tags=['VAT'])
def taxdoo_dag():
@task()
def taxdoo_external_file() -> str:
timespan = get_timespan() meta = TaxdooMeta(timespan=timespan)
meta.DataFileExists return timespan
@task()
def taxdoo_write_parquet(timespan) -> str:
meta = TaxdooMeta(timespan=timespan)
meta.save_as_parquet()
return timespan
@task()
def taxdoo_load_to_bigquery(timespan):
meta = TaxdooMeta(timespan=timespan)
if not meta.TableExists:
meta.create_table()
meta.update_table() meta.append_data()
return timespan
dbt_test = BashOperator(
task_id='dbt_test',
bash_command=('source ~/dbt-env/bin/activate && '
'cd ~/projects/accountant/ && dbt test'))
timespan = taxdoo_external_file()
timespan = taxdoo_write_parquet(timespan)
timespan = taxdoo_load_to_bigquery(timespan)
dbt_test.set_upstream(timespan)
taxdoo_etl_dag = taxdoo_dag()
| true | true |
1c49e712be10de6dccb1f07fa13fd2281c8ef4d7 | 29,322 | py | Python | pytorch/pytorchcv/models/resnet.py | oliviaweng/imgclsmob | 80fffbb46f986614b162c725b21f3d208597ac77 | [
"MIT"
] | null | null | null | pytorch/pytorchcv/models/resnet.py | oliviaweng/imgclsmob | 80fffbb46f986614b162c725b21f3d208597ac77 | [
"MIT"
] | null | null | null | pytorch/pytorchcv/models/resnet.py | oliviaweng/imgclsmob | 80fffbb46f986614b162c725b21f3d208597ac77 | [
"MIT"
] | null | null | null | """
ResNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
"""
__all__ = ['ResNet', 'resnet10', 'resnet12', 'resnet14', 'resnetbc14b', 'resnet16', 'resnet18_wd4', 'resnet18_wd2',
'resnet18_w3d4', 'resnet18', 'resnet26', 'resnetbc26b', 'resnet34', 'resnetbc38b', 'resnet50', 'resnet50b',
'resnet101', 'resnet101b', 'resnet152', 'resnet152b', 'resnet200', 'resnet200b', 'ResBlock', 'ResBottleneck',
'ResUnit', 'ResInitBlock']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1_block, conv3x3_block, conv7x7_block
class ResBlock(nn.Module):
"""
Simple ResNet block for residual path in ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bias=False,
use_bn=True):
super(ResBlock, self).__init__()
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bias=bias,
use_bn=use_bn)
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bias=bias,
use_bn=use_bn,
activation=None)
def forward(self, x, identity=None):
x = self.conv1(x)
if identity is not None:
# print('adding shorter skip connection)
x = x + identity # Shorter skip connection - LIV
x = self.conv2(x)
return x
class ResBottleneck(nn.Module):
"""
ResNet bottleneck block for residual path in ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for the second convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for the second convolution layer.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
stride,
padding=1,
dilation=1,
conv1_stride=False,
bottleneck_factor=4):
super(ResBottleneck, self).__init__()
mid_channels = out_channels // bottleneck_factor
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
stride=(stride if conv1_stride else 1))
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=(1 if conv1_stride else stride),
padding=padding,
dilation=dilation)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class ResUnit(nn.Module):
"""
ResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for the second convolution layer in bottleneck.
dilation : int or tuple/list of 2 int, default 1
Dilation value for the second convolution layer in bottleneck.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bottleneck : bool, default True
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
padding=1,
dilation=1,
bias=False,
use_bn=True,
bottleneck=True,
conv1_stride=False):
super(ResUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
padding=padding,
dilation=dilation,
conv1_stride=conv1_stride)
else:
self.body = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bias=bias,
use_bn=use_bn)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bias=bias,
use_bn=use_bn,
activation=None)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x, None) # Needed for original skip connection AND need line below: x = x + identity
# x = self.body(x, identity) # creates shorter skip connection - LIV
# Don't need skip connection bc shorter skip connection now in ResBlock() - LIV
x = x + identity
x = self.activ(x)
return x
"""
LIV
"""
class NonResBlock(nn.Module):
"""
Simple ResNet block for residual path in ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bias=False,
use_bn=True):
super(NonResBlock, self).__init__()
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bias=bias,
use_bn=use_bn)
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bias=bias,
use_bn=use_bn,
activation=None)
def forward(self, x):
x = self.conv1(x)
# NO skip connections at all
# if identity is not None:
# # print('adding shorter skip connection)
# x = x + identity # Shorter skip connection - LIV
x = self.conv2(x)
return x
class NonResUnit(nn.Module):
"""
ResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for the second convolution layer in bottleneck.
dilation : int or tuple/list of 2 int, default 1
Dilation value for the second convolution layer in bottleneck.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bottleneck : bool, default True
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
padding=1,
dilation=1,
bias=False,
use_bn=True,
bottleneck=True,
conv1_stride=False):
super(NonResUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
padding=padding,
dilation=dilation,
conv1_stride=conv1_stride)
else:
self.body = NonResBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bias=bias,
use_bn=use_bn)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
# if self.resize_identity:
# identity = self.identity_conv(x)
# else:
# identity = x
x = self.body(x)
# No skip connection
# x = x + identity
x = self.activ(x)
return x
"""
LIV END
"""
class ResInitBlock(nn.Module):
"""
ResNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(ResInitBlock, self).__init__()
self.conv = conv7x7_block(
in_channels=in_channels,
out_channels=out_channels,
stride=2)
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=2,
padding=1)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
class ResNet(nn.Module):
"""
ResNet model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(ResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), ResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck,
conv1_stride=conv1_stride))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_resnet(blocks,
bottleneck=None,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported ResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = ResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def resnet10(**kwargs):
"""
ResNet-10 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=10, model_name="resnet10", **kwargs)
def resnet12(**kwargs):
"""
ResNet-12 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=12, model_name="resnet12", **kwargs)
def resnet14(**kwargs):
"""
ResNet-14 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=14, model_name="resnet14", **kwargs)
def resnetbc14b(**kwargs):
"""
ResNet-BC-14b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetbc14b", **kwargs)
def resnet16(**kwargs):
"""
ResNet-16 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=16, model_name="resnet16", **kwargs)
def resnet18_wd4(**kwargs):
"""
ResNet-18 model with 0.25 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, width_scale=0.25, model_name="resnet18_wd4", **kwargs)
def resnet18_wd2(**kwargs):
"""
ResNet-18 model with 0.5 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, width_scale=0.5, model_name="resnet18_wd2", **kwargs)
def resnet18_w3d4(**kwargs):
"""
ResNet-18 model with 0.75 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, width_scale=0.75, model_name="resnet18_w3d4", **kwargs)
def resnet18(**kwargs):
"""
ResNet-18 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, model_name="resnet18", **kwargs)
def resnet26(**kwargs):
"""
ResNet-26 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=26, bottleneck=False, model_name="resnet26", **kwargs)
def resnetbc26b(**kwargs):
"""
ResNet-BC-26b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="resnetbc26b", **kwargs)
def resnet34(**kwargs):
"""
ResNet-34 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=34, model_name="resnet34", **kwargs)
def resnetbc38b(**kwargs):
"""
ResNet-BC-38b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="resnetbc38b", **kwargs)
def resnet50(**kwargs):
"""
ResNet-50 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=50, model_name="resnet50", **kwargs)
def resnet50b(**kwargs):
"""
ResNet-50 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=50, conv1_stride=False, model_name="resnet50b", **kwargs)
def resnet101(**kwargs):
"""
ResNet-101 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=101, model_name="resnet101", **kwargs)
def resnet101b(**kwargs):
"""
ResNet-101 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=101, conv1_stride=False, model_name="resnet101b", **kwargs)
def resnet152(**kwargs):
"""
ResNet-152 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=152, model_name="resnet152", **kwargs)
def resnet152b(**kwargs):
"""
ResNet-152 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=152, conv1_stride=False, model_name="resnet152b", **kwargs)
def resnet200(**kwargs):
"""
ResNet-200 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=200, model_name="resnet200", **kwargs)
def resnet200b(**kwargs):
"""
ResNet-200 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=200, conv1_stride=False, model_name="resnet200b", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
resnet10,
resnet12,
resnet14,
resnetbc14b,
resnet16,
resnet18_wd4,
resnet18_wd2,
resnet18_w3d4,
resnet18,
resnet26,
resnetbc26b,
resnet34,
resnetbc38b,
resnet50,
resnet50b,
resnet101,
resnet101b,
resnet152,
resnet152b,
resnet200,
resnet200b,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnet10 or weight_count == 5418792)
assert (model != resnet12 or weight_count == 5492776)
assert (model != resnet14 or weight_count == 5788200)
assert (model != resnetbc14b or weight_count == 10064936)
assert (model != resnet16 or weight_count == 6968872)
assert (model != resnet18_wd4 or weight_count == 3937400)
assert (model != resnet18_wd2 or weight_count == 5804296)
assert (model != resnet18_w3d4 or weight_count == 8476056)
assert (model != resnet18 or weight_count == 11689512)
assert (model != resnet26 or weight_count == 17960232)
assert (model != resnetbc26b or weight_count == 15995176)
assert (model != resnet34 or weight_count == 21797672)
assert (model != resnetbc38b or weight_count == 21925416)
assert (model != resnet50 or weight_count == 25557032)
assert (model != resnet50b or weight_count == 25557032)
assert (model != resnet101 or weight_count == 44549160)
assert (model != resnet101b or weight_count == 44549160)
assert (model != resnet152 or weight_count == 60192808)
assert (model != resnet152b or weight_count == 60192808)
assert (model != resnet200 or weight_count == 64673832)
assert (model != resnet200b or weight_count == 64673832)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 32.4 | 120 | 0.606405 |
__all__ = ['ResNet', 'resnet10', 'resnet12', 'resnet14', 'resnetbc14b', 'resnet16', 'resnet18_wd4', 'resnet18_wd2',
'resnet18_w3d4', 'resnet18', 'resnet26', 'resnetbc26b', 'resnet34', 'resnetbc38b', 'resnet50', 'resnet50b',
'resnet101', 'resnet101b', 'resnet152', 'resnet152b', 'resnet200', 'resnet200b', 'ResBlock', 'ResBottleneck',
'ResUnit', 'ResInitBlock']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1_block, conv3x3_block, conv7x7_block
class ResBlock(nn.Module):
def __init__(self,
in_channels,
out_channels,
stride,
bias=False,
use_bn=True):
super(ResBlock, self).__init__()
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bias=bias,
use_bn=use_bn)
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bias=bias,
use_bn=use_bn,
activation=None)
def forward(self, x, identity=None):
x = self.conv1(x)
if identity is not None:
x = x + identity # Shorter skip connection - LIV
x = self.conv2(x)
return x
class ResBottleneck(nn.Module):
def __init__(self,
in_channels,
out_channels,
stride,
padding=1,
dilation=1,
conv1_stride=False,
bottleneck_factor=4):
super(ResBottleneck, self).__init__()
mid_channels = out_channels // bottleneck_factor
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
stride=(stride if conv1_stride else 1))
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=(1 if conv1_stride else stride),
padding=padding,
dilation=dilation)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class ResUnit(nn.Module):
def __init__(self,
in_channels,
out_channels,
stride,
padding=1,
dilation=1,
bias=False,
use_bn=True,
bottleneck=True,
conv1_stride=False):
super(ResUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
padding=padding,
dilation=dilation,
conv1_stride=conv1_stride)
else:
self.body = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bias=bias,
use_bn=use_bn)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bias=bias,
use_bn=use_bn,
activation=None)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x, None) # Needed for original skip connection AND need line below: x = x + identity
# x = self.body(x, identity) # creates shorter skip connection - LIV
# Don't need skip connection bc shorter skip connection now in ResBlock() - LIV
x = x + identity
x = self.activ(x)
return x
class NonResBlock(nn.Module):
def __init__(self,
in_channels,
out_channels,
stride,
bias=False,
use_bn=True):
super(NonResBlock, self).__init__()
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bias=bias,
use_bn=use_bn)
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bias=bias,
use_bn=use_bn,
activation=None)
def forward(self, x):
x = self.conv1(x)
# x = x + identity # Shorter skip connection - LIV
x = self.conv2(x)
return x
class NonResUnit(nn.Module):
def __init__(self,
in_channels,
out_channels,
stride,
padding=1,
dilation=1,
bias=False,
use_bn=True,
bottleneck=True,
conv1_stride=False):
super(NonResUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
padding=padding,
dilation=dilation,
conv1_stride=conv1_stride)
else:
self.body = NonResBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bias=bias,
use_bn=use_bn)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
# if self.resize_identity:
# identity = self.identity_conv(x)
# else:
# identity = x
x = self.body(x)
# No skip connection
# x = x + identity
x = self.activ(x)
return x
class ResInitBlock(nn.Module):
def __init__(self,
in_channels,
out_channels):
super(ResInitBlock, self).__init__()
self.conv = conv7x7_block(
in_channels=in_channels,
out_channels=out_channels,
stride=2)
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=2,
padding=1)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
class ResNet(nn.Module):
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(ResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), ResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck,
conv1_stride=conv1_stride))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_resnet(blocks,
bottleneck=None,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported ResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = ResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def resnet10(**kwargs):
return get_resnet(blocks=10, model_name="resnet10", **kwargs)
def resnet12(**kwargs):
return get_resnet(blocks=12, model_name="resnet12", **kwargs)
def resnet14(**kwargs):
return get_resnet(blocks=14, model_name="resnet14", **kwargs)
def resnetbc14b(**kwargs):
return get_resnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetbc14b", **kwargs)
def resnet16(**kwargs):
return get_resnet(blocks=16, model_name="resnet16", **kwargs)
def resnet18_wd4(**kwargs):
return get_resnet(blocks=18, width_scale=0.25, model_name="resnet18_wd4", **kwargs)
def resnet18_wd2(**kwargs):
return get_resnet(blocks=18, width_scale=0.5, model_name="resnet18_wd2", **kwargs)
def resnet18_w3d4(**kwargs):
return get_resnet(blocks=18, width_scale=0.75, model_name="resnet18_w3d4", **kwargs)
def resnet18(**kwargs):
return get_resnet(blocks=18, model_name="resnet18", **kwargs)
def resnet26(**kwargs):
return get_resnet(blocks=26, bottleneck=False, model_name="resnet26", **kwargs)
def resnetbc26b(**kwargs):
return get_resnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="resnetbc26b", **kwargs)
def resnet34(**kwargs):
return get_resnet(blocks=34, model_name="resnet34", **kwargs)
def resnetbc38b(**kwargs):
return get_resnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="resnetbc38b", **kwargs)
def resnet50(**kwargs):
return get_resnet(blocks=50, model_name="resnet50", **kwargs)
def resnet50b(**kwargs):
return get_resnet(blocks=50, conv1_stride=False, model_name="resnet50b", **kwargs)
def resnet101(**kwargs):
return get_resnet(blocks=101, model_name="resnet101", **kwargs)
def resnet101b(**kwargs):
return get_resnet(blocks=101, conv1_stride=False, model_name="resnet101b", **kwargs)
def resnet152(**kwargs):
return get_resnet(blocks=152, model_name="resnet152", **kwargs)
def resnet152b(**kwargs):
return get_resnet(blocks=152, conv1_stride=False, model_name="resnet152b", **kwargs)
def resnet200(**kwargs):
return get_resnet(blocks=200, model_name="resnet200", **kwargs)
def resnet200b(**kwargs):
return get_resnet(blocks=200, conv1_stride=False, model_name="resnet200b", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
resnet10,
resnet12,
resnet14,
resnetbc14b,
resnet16,
resnet18_wd4,
resnet18_wd2,
resnet18_w3d4,
resnet18,
resnet26,
resnetbc26b,
resnet34,
resnetbc38b,
resnet50,
resnet50b,
resnet101,
resnet101b,
resnet152,
resnet152b,
resnet200,
resnet200b,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnet10 or weight_count == 5418792)
assert (model != resnet12 or weight_count == 5492776)
assert (model != resnet14 or weight_count == 5788200)
assert (model != resnetbc14b or weight_count == 10064936)
assert (model != resnet16 or weight_count == 6968872)
assert (model != resnet18_wd4 or weight_count == 3937400)
assert (model != resnet18_wd2 or weight_count == 5804296)
assert (model != resnet18_w3d4 or weight_count == 8476056)
assert (model != resnet18 or weight_count == 11689512)
assert (model != resnet26 or weight_count == 17960232)
assert (model != resnetbc26b or weight_count == 15995176)
assert (model != resnet34 or weight_count == 21797672)
assert (model != resnetbc38b or weight_count == 21925416)
assert (model != resnet50 or weight_count == 25557032)
assert (model != resnet50b or weight_count == 25557032)
assert (model != resnet101 or weight_count == 44549160)
assert (model != resnet101b or weight_count == 44549160)
assert (model != resnet152 or weight_count == 60192808)
assert (model != resnet152b or weight_count == 60192808)
assert (model != resnet200 or weight_count == 64673832)
assert (model != resnet200b or weight_count == 64673832)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| true | true |
1c49e72eda40b3456987582ab05a77a3b7b4b840 | 318 | py | Python | pajbot/tests/modules/test_two_word_variations.py | UVClay/SkookumBot | 69679c78475662e2b7948fe63d529a755c47fc80 | [
"MIT"
] | 1 | 2021-10-02T10:19:38.000Z | 2021-10-02T10:19:38.000Z | pajbot/tests/modules/test_two_word_variations.py | UVClay/SkookumBot | 69679c78475662e2b7948fe63d529a755c47fc80 | [
"MIT"
] | 64 | 2021-01-09T21:28:05.000Z | 2022-03-31T10:07:05.000Z | pajbot/tests/modules/test_two_word_variations.py | UVClay/SkookumBot | 69679c78475662e2b7948fe63d529a755c47fc80 | [
"MIT"
] | 1 | 2020-03-11T19:37:10.000Z | 2020-03-11T19:37:10.000Z | from pajbot.modules.bingo import two_word_variations
def test_two_word_variations():
assert two_word_variations("abc", "def", "KKona") == {
"abc-def": "KKona",
"abc_def": "KKona",
"abcdef": "KKona",
"def-abc": "KKona",
"def_abc": "KKona",
"defabc": "KKona",
}
| 24.461538 | 58 | 0.559748 | from pajbot.modules.bingo import two_word_variations
def test_two_word_variations():
assert two_word_variations("abc", "def", "KKona") == {
"abc-def": "KKona",
"abc_def": "KKona",
"abcdef": "KKona",
"def-abc": "KKona",
"def_abc": "KKona",
"defabc": "KKona",
}
| true | true |
1c49e7d519f97f97ba1df092341baac9cd9535c8 | 61,625 | py | Python | nltk/parse/chart.py | addisonblanda/Plato | cebd522dfe4b21f8c965f0e56637c15744817474 | [
"MIT"
] | 6 | 2017-01-22T03:15:01.000Z | 2019-12-01T16:19:36.000Z | nltk/parse/chart.py | addisonblanda/Plato | cebd522dfe4b21f8c965f0e56637c15744817474 | [
"MIT"
] | 3 | 2020-03-24T15:38:23.000Z | 2021-02-02T21:44:18.000Z | nltk/parse/chart.py | addisonblanda/Plato | cebd522dfe4b21f8c965f0e56637c15744817474 | [
"MIT"
] | 6 | 2017-01-19T21:49:55.000Z | 2021-04-14T09:57:17.000Z | # -*- coding: utf-8 -*-
# Natural Language Toolkit: A Chart Parser
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Edward Loper <[email protected]>
# Steven Bird <[email protected]>
# Jean Mark Gawron <[email protected]>
# Peter Ljunglöf <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Data classes and parser implementations for "chart parsers", which
use dynamic programming to efficiently parse a text. A chart
parser derives parse trees for a text by iteratively adding "edges"
to a "chart." Each edge represents a hypothesis about the tree
structure for a subsequence of the text. The chart is a
"blackboard" for composing and combining these hypotheses.
When a chart parser begins parsing a text, it creates a new (empty)
chart, spanning the text. It then incrementally adds new edges to the
chart. A set of "chart rules" specifies the conditions under which
new edges should be added to the chart. Once the chart reaches a
stage where none of the chart rules adds any new edges, parsing is
complete.
Charts are encoded with the ``Chart`` class, and edges are encoded with
the ``TreeEdge`` and ``LeafEdge`` classes. The chart parser module
defines three chart parsers:
- ``ChartParser`` is a simple and flexible chart parser. Given a
set of chart rules, it will apply those rules to the chart until
no more edges are added.
- ``SteppingChartParser`` is a subclass of ``ChartParser`` that can
be used to step through the parsing process.
"""
from __future__ import print_function, division, unicode_literals
import itertools
import re
import warnings
from nltk import compat
from nltk.tree import Tree
from nltk.grammar import PCFG, is_nonterminal, is_terminal
from nltk.util import OrderedDict
from nltk.internals import raise_unorderable_types
from nltk.compat import (total_ordering, python_2_unicode_compatible,
unicode_repr)
from nltk.parse.api import ParserI
########################################################################
## Edges
########################################################################
@total_ordering
class EdgeI(object):
"""
A hypothesis about the structure of part of a sentence.
Each edge records the fact that a structure is (partially)
consistent with the sentence. An edge contains:
- A span, indicating what part of the sentence is
consistent with the hypothesized structure.
- A left-hand side, specifying what kind of structure is
hypothesized.
- A right-hand side, specifying the contents of the
hypothesized structure.
- A dot position, indicating how much of the hypothesized
structure is consistent with the sentence.
Every edge is either complete or incomplete:
- An edge is complete if its structure is fully consistent
with the sentence.
- An edge is incomplete if its structure is partially
consistent with the sentence. For every incomplete edge, the
span specifies a possible prefix for the edge's structure.
There are two kinds of edge:
- A ``TreeEdge`` records which trees have been found to
be (partially) consistent with the text.
- A ``LeafEdge`` records the tokens occurring in the text.
The ``EdgeI`` interface provides a common interface to both types
of edge, allowing chart parsers to treat them in a uniform manner.
"""
def __init__(self):
if self.__class__ == EdgeI:
raise TypeError('Edge is an abstract interface')
#////////////////////////////////////////////////////////////
# Span
#////////////////////////////////////////////////////////////
def span(self):
"""
Return a tuple ``(s, e)``, where ``tokens[s:e]`` is the
portion of the sentence that is consistent with this
edge's structure.
:rtype: tuple(int, int)
"""
raise NotImplementedError()
def start(self):
"""
Return the start index of this edge's span.
:rtype: int
"""
raise NotImplementedError()
def end(self):
"""
Return the end index of this edge's span.
:rtype: int
"""
raise NotImplementedError()
def length(self):
"""
Return the length of this edge's span.
:rtype: int
"""
raise NotImplementedError()
#////////////////////////////////////////////////////////////
# Left Hand Side
#////////////////////////////////////////////////////////////
def lhs(self):
"""
Return this edge's left-hand side, which specifies what kind
of structure is hypothesized by this edge.
:see: ``TreeEdge`` and ``LeafEdge`` for a description of
the left-hand side values for each edge type.
"""
raise NotImplementedError()
#////////////////////////////////////////////////////////////
# Right Hand Side
#////////////////////////////////////////////////////////////
def rhs(self):
"""
Return this edge's right-hand side, which specifies
the content of the structure hypothesized by this edge.
:see: ``TreeEdge`` and ``LeafEdge`` for a description of
the right-hand side values for each edge type.
"""
raise NotImplementedError()
def dot(self):
"""
Return this edge's dot position, which indicates how much of
the hypothesized structure is consistent with the
sentence. In particular, ``self.rhs[:dot]`` is consistent
with ``tokens[self.start():self.end()]``.
:rtype: int
"""
raise NotImplementedError()
def nextsym(self):
"""
Return the element of this edge's right-hand side that
immediately follows its dot.
:rtype: Nonterminal or terminal or None
"""
raise NotImplementedError()
def is_complete(self):
"""
Return True if this edge's structure is fully consistent
with the text.
:rtype: bool
"""
raise NotImplementedError()
def is_incomplete(self):
"""
Return True if this edge's structure is partially consistent
with the text.
:rtype: bool
"""
raise NotImplementedError()
#////////////////////////////////////////////////////////////
# Comparisons & hashing
#////////////////////////////////////////////////////////////
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self._comparison_key == other._comparison_key)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if not isinstance(other, EdgeI):
raise_unorderable_types("<", self, other)
if self.__class__ is other.__class__:
return self._comparison_key < other._comparison_key
else:
return self.__class__.__name__ < other.__class__.__name__
def __hash__(self):
try:
return self._hash
except AttributeError:
self._hash = hash(self._comparison_key)
return self._hash
@python_2_unicode_compatible
class TreeEdge(EdgeI):
"""
An edge that records the fact that a tree is (partially)
consistent with the sentence. A tree edge consists of:
- A span, indicating what part of the sentence is
consistent with the hypothesized tree.
- A left-hand side, specifying the hypothesized tree's node
value.
- A right-hand side, specifying the hypothesized tree's
children. Each element of the right-hand side is either a
terminal, specifying a token with that terminal as its leaf
value; or a nonterminal, specifying a subtree with that
nonterminal's symbol as its node value.
- A dot position, indicating which children are consistent
with part of the sentence. In particular, if ``dot`` is the
dot position, ``rhs`` is the right-hand size, ``(start,end)``
is the span, and ``sentence`` is the list of tokens in the
sentence, then ``tokens[start:end]`` can be spanned by the
children specified by ``rhs[:dot]``.
For more information about edges, see the ``EdgeI`` interface.
"""
def __init__(self, span, lhs, rhs, dot=0):
"""
Construct a new ``TreeEdge``.
:type span: tuple(int, int)
:param span: A tuple ``(s, e)``, where ``tokens[s:e]`` is the
portion of the sentence that is consistent with the new
edge's structure.
:type lhs: Nonterminal
:param lhs: The new edge's left-hand side, specifying the
hypothesized tree's node value.
:type rhs: list(Nonterminal and str)
:param rhs: The new edge's right-hand side, specifying the
hypothesized tree's children.
:type dot: int
:param dot: The position of the new edge's dot. This position
specifies what prefix of the production's right hand side
is consistent with the text. In particular, if
``sentence`` is the list of tokens in the sentence, then
``okens[span[0]:span[1]]`` can be spanned by the
children specified by ``rhs[:dot]``.
"""
self._span = span
self._lhs = lhs
rhs = tuple(rhs)
self._rhs = rhs
self._dot = dot
self._comparison_key = (span, lhs, rhs, dot)
@staticmethod
def from_production(production, index):
"""
Return a new ``TreeEdge`` formed from the given production.
The new edge's left-hand side and right-hand side will
be taken from ``production``; its span will be
``(index,index)``; and its dot position will be ``0``.
:rtype: TreeEdge
"""
return TreeEdge(span=(index, index), lhs=production.lhs(),
rhs=production.rhs(), dot=0)
def move_dot_forward(self, new_end):
"""
Return a new ``TreeEdge`` formed from this edge.
The new edge's dot position is increased by ``1``,
and its end index will be replaced by ``new_end``.
:param new_end: The new end index.
:type new_end: int
:rtype: TreeEdge
"""
return TreeEdge(span=(self._span[0], new_end),
lhs=self._lhs, rhs=self._rhs,
dot=self._dot+1)
# Accessors
def lhs(self): return self._lhs
def span(self): return self._span
def start(self): return self._span[0]
def end(self): return self._span[1]
def length(self): return self._span[1] - self._span[0]
def rhs(self): return self._rhs
def dot(self): return self._dot
def is_complete(self): return self._dot == len(self._rhs)
def is_incomplete(self): return self._dot != len(self._rhs)
def nextsym(self):
if self._dot >= len(self._rhs): return None
else: return self._rhs[self._dot]
# String representation
def __str__(self):
str = '[%s:%s] ' % (self._span[0], self._span[1])
str += '%-2r ->' % (self._lhs,)
for i in range(len(self._rhs)):
if i == self._dot: str += ' *'
str += ' %s' % unicode_repr(self._rhs[i])
if len(self._rhs) == self._dot: str += ' *'
return str
def __repr__(self):
return '[Edge: %s]' % self
@python_2_unicode_compatible
class LeafEdge(EdgeI):
"""
An edge that records the fact that a leaf value is consistent with
a word in the sentence. A leaf edge consists of:
- An index, indicating the position of the word.
- A leaf, specifying the word's content.
A leaf edge's left-hand side is its leaf value, and its right hand
side is ``()``. Its span is ``[index, index+1]``, and its dot
position is ``0``.
"""
def __init__(self, leaf, index):
"""
Construct a new ``LeafEdge``.
:param leaf: The new edge's leaf value, specifying the word
that is recorded by this edge.
:param index: The new edge's index, specifying the position of
the word that is recorded by this edge.
"""
self._leaf = leaf
self._index = index
self._comparison_key = (leaf, index)
# Accessors
def lhs(self): return self._leaf
def span(self): return (self._index, self._index+1)
def start(self): return self._index
def end(self): return self._index+1
def length(self): return 1
def rhs(self): return ()
def dot(self): return 0
def is_complete(self): return True
def is_incomplete(self): return False
def nextsym(self): return None
# String representations
def __str__(self):
return '[%s:%s] %s' % (self._index, self._index+1, unicode_repr(self._leaf))
def __repr__(self):
return '[Edge: %s]' % (self)
########################################################################
## Chart
########################################################################
class Chart(object):
"""
A blackboard for hypotheses about the syntactic constituents of a
sentence. A chart contains a set of edges, and each edge encodes
a single hypothesis about the structure of some portion of the
sentence.
The ``select`` method can be used to select a specific collection
of edges. For example ``chart.select(is_complete=True, start=0)``
yields all complete edges whose start indices are 0. To ensure
the efficiency of these selection operations, ``Chart`` dynamically
creates and maintains an index for each set of attributes that
have been selected on.
In order to reconstruct the trees that are represented by an edge,
the chart associates each edge with a set of child pointer lists.
A child pointer list is a list of the edges that license an
edge's right-hand side.
:ivar _tokens: The sentence that the chart covers.
:ivar _num_leaves: The number of tokens.
:ivar _edges: A list of the edges in the chart
:ivar _edge_to_cpls: A dictionary mapping each edge to a set
of child pointer lists that are associated with that edge.
:ivar _indexes: A dictionary mapping tuples of edge attributes
to indices, where each index maps the corresponding edge
attribute values to lists of edges.
"""
def __init__(self, tokens):
"""
Construct a new chart. The chart is initialized with the
leaf edges corresponding to the terminal leaves.
:type tokens: list
:param tokens: The sentence that this chart will be used to parse.
"""
# Record the sentence token and the sentence length.
self._tokens = tuple(tokens)
self._num_leaves = len(self._tokens)
# Initialise the chart.
self.initialize()
def initialize(self):
"""
Clear the chart.
"""
# A list of edges contained in this chart.
self._edges = []
# The set of child pointer lists associated with each edge.
self._edge_to_cpls = {}
# Indexes mapping attribute values to lists of edges
# (used by select()).
self._indexes = {}
#////////////////////////////////////////////////////////////
# Sentence Access
#////////////////////////////////////////////////////////////
def num_leaves(self):
"""
Return the number of words in this chart's sentence.
:rtype: int
"""
return self._num_leaves
def leaf(self, index):
"""
Return the leaf value of the word at the given index.
:rtype: str
"""
return self._tokens[index]
def leaves(self):
"""
Return a list of the leaf values of each word in the
chart's sentence.
:rtype: list(str)
"""
return self._tokens
#////////////////////////////////////////////////////////////
# Edge access
#////////////////////////////////////////////////////////////
def edges(self):
"""
Return a list of all edges in this chart. New edges
that are added to the chart after the call to edges()
will *not* be contained in this list.
:rtype: list(EdgeI)
:see: ``iteredges``, ``select``
"""
return self._edges[:]
def iteredges(self):
"""
Return an iterator over the edges in this chart. It is
not guaranteed that new edges which are added to the
chart before the iterator is exhausted will also be generated.
:rtype: iter(EdgeI)
:see: ``edges``, ``select``
"""
return iter(self._edges)
# Iterating over the chart yields its edges.
__iter__ = iteredges
def num_edges(self):
"""
Return the number of edges contained in this chart.
:rtype: int
"""
return len(self._edge_to_cpls)
def select(self, **restrictions):
"""
Return an iterator over the edges in this chart. Any
new edges that are added to the chart before the iterator
is exahusted will also be generated. ``restrictions``
can be used to restrict the set of edges that will be
generated.
:param span: Only generate edges ``e`` where ``e.span()==span``
:param start: Only generate edges ``e`` where ``e.start()==start``
:param end: Only generate edges ``e`` where ``e.end()==end``
:param length: Only generate edges ``e`` where ``e.length()==length``
:param lhs: Only generate edges ``e`` where ``e.lhs()==lhs``
:param rhs: Only generate edges ``e`` where ``e.rhs()==rhs``
:param nextsym: Only generate edges ``e`` where
``e.nextsym()==nextsym``
:param dot: Only generate edges ``e`` where ``e.dot()==dot``
:param is_complete: Only generate edges ``e`` where
``e.is_complete()==is_complete``
:param is_incomplete: Only generate edges ``e`` where
``e.is_incomplete()==is_incomplete``
:rtype: iter(EdgeI)
"""
# If there are no restrictions, then return all edges.
if restrictions=={}: return iter(self._edges)
# Find the index corresponding to the given restrictions.
restr_keys = sorted(restrictions.keys())
restr_keys = tuple(restr_keys)
# If it doesn't exist, then create it.
if restr_keys not in self._indexes:
self._add_index(restr_keys)
vals = tuple(restrictions[key] for key in restr_keys)
return iter(self._indexes[restr_keys].get(vals, []))
def _add_index(self, restr_keys):
"""
A helper function for ``select``, which creates a new index for
a given set of attributes (aka restriction keys).
"""
# Make sure it's a valid index.
for key in restr_keys:
if not hasattr(EdgeI, key):
raise ValueError('Bad restriction: %s' % key)
# Create the index.
index = self._indexes[restr_keys] = {}
# Add all existing edges to the index.
for edge in self._edges:
vals = tuple(getattr(edge, key)() for key in restr_keys)
index.setdefault(vals, []).append(edge)
def _register_with_indexes(self, edge):
"""
A helper function for ``insert``, which registers the new
edge with all existing indexes.
"""
for (restr_keys, index) in self._indexes.items():
vals = tuple(getattr(edge, key)() for key in restr_keys)
index.setdefault(vals, []).append(edge)
#////////////////////////////////////////////////////////////
# Edge Insertion
#////////////////////////////////////////////////////////////
def insert_with_backpointer(self, new_edge, previous_edge, child_edge):
"""
Add a new edge to the chart, using a pointer to the previous edge.
"""
cpls = self.child_pointer_lists(previous_edge)
new_cpls = [cpl+(child_edge,) for cpl in cpls]
return self.insert(new_edge, *new_cpls)
def insert(self, edge, *child_pointer_lists):
"""
Add a new edge to the chart, and return True if this operation
modified the chart. In particular, return true iff the chart
did not already contain ``edge``, or if it did not already associate
``child_pointer_lists`` with ``edge``.
:type edge: EdgeI
:param edge: The new edge
:type child_pointer_lists: sequence of tuple(EdgeI)
:param child_pointer_lists: A sequence of lists of the edges that
were used to form this edge. This list is used to reconstruct
the trees (or partial trees) that are associated with ``edge``.
:rtype: bool
"""
# Is it a new edge?
if edge not in self._edge_to_cpls:
# Add it to the list of edges.
self._append_edge(edge)
# Register with indexes.
self._register_with_indexes(edge)
# Get the set of child pointer lists for this edge.
cpls = self._edge_to_cpls.setdefault(edge, OrderedDict())
chart_was_modified = False
for child_pointer_list in child_pointer_lists:
child_pointer_list = tuple(child_pointer_list)
if child_pointer_list not in cpls:
# It's a new CPL; register it, and return true.
cpls[child_pointer_list] = True
chart_was_modified = True
return chart_was_modified
def _append_edge(self, edge):
self._edges.append(edge)
#////////////////////////////////////////////////////////////
# Tree extraction & child pointer lists
#////////////////////////////////////////////////////////////
def parses(self, root, tree_class=Tree):
"""
Return an iterator of the complete tree structures that span
the entire chart, and whose root node is ``root``.
"""
for edge in self.select(start=0, end=self._num_leaves, lhs=root):
for tree in self.trees(edge, tree_class=tree_class, complete=True):
yield tree
def trees(self, edge, tree_class=Tree, complete=False):
"""
Return an iterator of the tree structures that are associated
with ``edge``.
If ``edge`` is incomplete, then the unexpanded children will be
encoded as childless subtrees, whose node value is the
corresponding terminal or nonterminal.
:rtype: list(Tree)
:note: If two trees share a common subtree, then the same
Tree may be used to encode that subtree in
both trees. If you need to eliminate this subtree
sharing, then create a deep copy of each tree.
"""
return iter(self._trees(edge, complete, memo={}, tree_class=tree_class))
def _trees(self, edge, complete, memo, tree_class):
"""
A helper function for ``trees``.
:param memo: A dictionary used to record the trees that we've
generated for each edge, so that when we see an edge more
than once, we can reuse the same trees.
"""
# If we've seen this edge before, then reuse our old answer.
if edge in memo:
return memo[edge]
# when we're reading trees off the chart, don't use incomplete edges
if complete and edge.is_incomplete():
return []
# Leaf edges.
if isinstance(edge, LeafEdge):
leaf = self._tokens[edge.start()]
memo[edge] = [leaf]
return [leaf]
# Until we're done computing the trees for edge, set
# memo[edge] to be empty. This has the effect of filtering
# out any cyclic trees (i.e., trees that contain themselves as
# descendants), because if we reach this edge via a cycle,
# then it will appear that the edge doesn't generate any trees.
memo[edge] = []
trees = []
lhs = edge.lhs().symbol()
# Each child pointer list can be used to form trees.
for cpl in self.child_pointer_lists(edge):
# Get the set of child choices for each child pointer.
# child_choices[i] is the set of choices for the tree's
# ith child.
child_choices = [self._trees(cp, complete, memo, tree_class)
for cp in cpl]
# For each combination of children, add a tree.
for children in itertools.product(*child_choices):
trees.append(tree_class(lhs, children))
# If the edge is incomplete, then extend it with "partial trees":
if edge.is_incomplete():
unexpanded = [tree_class(elt,[])
for elt in edge.rhs()[edge.dot():]]
for tree in trees:
tree.extend(unexpanded)
# Update the memoization dictionary.
memo[edge] = trees
# Return the list of trees.
return trees
def child_pointer_lists(self, edge):
"""
Return the set of child pointer lists for the given edge.
Each child pointer list is a list of edges that have
been used to form this edge.
:rtype: list(list(EdgeI))
"""
# Make a copy, in case they modify it.
return self._edge_to_cpls.get(edge, {}).keys()
#////////////////////////////////////////////////////////////
# Display
#////////////////////////////////////////////////////////////
def pretty_format_edge(self, edge, width=None):
"""
Return a pretty-printed string representation of a given edge
in this chart.
:rtype: str
:param width: The number of characters allotted to each
index in the sentence.
"""
if width is None: width = 50 // (self.num_leaves()+1)
(start, end) = (edge.start(), edge.end())
str = '|' + ('.'+' '*(width-1))*start
# Zero-width edges are "#" if complete, ">" if incomplete
if start == end:
if edge.is_complete(): str += '#'
else: str += '>'
# Spanning complete edges are "[===]"; Other edges are
# "[---]" if complete, "[--->" if incomplete
elif edge.is_complete() and edge.span() == (0,self._num_leaves):
str += '['+('='*width)*(end-start-1) + '='*(width-1)+']'
elif edge.is_complete():
str += '['+('-'*width)*(end-start-1) + '-'*(width-1)+']'
else:
str += '['+('-'*width)*(end-start-1) + '-'*(width-1)+'>'
str += (' '*(width-1)+'.')*(self._num_leaves-end)
return str + '| %s' % edge
def pretty_format_leaves(self, width=None):
"""
Return a pretty-printed string representation of this
chart's leaves. This string can be used as a header
for calls to ``pretty_format_edge``.
"""
if width is None: width = 50 // (self.num_leaves()+1)
if self._tokens is not None and width>1:
header = '|.'
for tok in self._tokens:
header += tok[:width-1].center(width-1)+'.'
header += '|'
else:
header = ''
return header
def pretty_format(self, width=None):
"""
Return a pretty-printed string representation of this chart.
:param width: The number of characters allotted to each
index in the sentence.
:rtype: str
"""
if width is None: width = 50 // (self.num_leaves()+1)
# sort edges: primary key=length, secondary key=start index.
# (and filter out the token edges)
edges = sorted([(e.length(), e.start(), e) for e in self])
edges = [e for (_,_,e) in edges]
return (self.pretty_format_leaves(width) + '\n' +
'\n'.join(self.pretty_format_edge(edge, width) for edge in edges))
#////////////////////////////////////////////////////////////
# Display: Dot (AT&T Graphviz)
#////////////////////////////////////////////////////////////
def dot_digraph(self):
# Header
s = 'digraph nltk_chart {\n'
#s += ' size="5,5";\n'
s += ' rankdir=LR;\n'
s += ' node [height=0.1,width=0.1];\n'
s += ' node [style=filled, color="lightgray"];\n'
# Set up the nodes
for y in range(self.num_edges(), -1, -1):
if y == 0:
s += ' node [style=filled, color="black"];\n'
for x in range(self.num_leaves()+1):
if y == 0 or (x <= self._edges[y-1].start() or
x >= self._edges[y-1].end()):
s += ' %04d.%04d [label=""];\n' % (x,y)
# Add a spacer
s += ' x [style=invis]; x->0000.0000 [style=invis];\n'
# Declare ranks.
for x in range(self.num_leaves()+1):
s += ' {rank=same;'
for y in range(self.num_edges()+1):
if y == 0 or (x <= self._edges[y-1].start() or
x >= self._edges[y-1].end()):
s += ' %04d.%04d' % (x,y)
s += '}\n'
# Add the leaves
s += ' edge [style=invis, weight=100];\n'
s += ' node [shape=plaintext]\n'
s += ' 0000.0000'
for x in range(self.num_leaves()):
s += '->%s->%04d.0000' % (self.leaf(x), x+1)
s += ';\n\n'
# Add the edges
s += ' edge [style=solid, weight=1];\n'
for y, edge in enumerate(self):
for x in range(edge.start()):
s += (' %04d.%04d -> %04d.%04d [style="invis"];\n' %
(x, y+1, x+1, y+1))
s += (' %04d.%04d -> %04d.%04d [label="%s"];\n' %
(edge.start(), y+1, edge.end(), y+1, edge))
for x in range(edge.end(), self.num_leaves()):
s += (' %04d.%04d -> %04d.%04d [style="invis"];\n' %
(x, y+1, x+1, y+1))
s += '}\n'
return s
########################################################################
## Chart Rules
########################################################################
class ChartRuleI(object):
"""
A rule that specifies what new edges are licensed by any given set
of existing edges. Each chart rule expects a fixed number of
edges, as indicated by the class variable ``NUM_EDGES``. In
particular:
- A chart rule with ``NUM_EDGES=0`` specifies what new edges are
licensed, regardless of existing edges.
- A chart rule with ``NUM_EDGES=1`` specifies what new edges are
licensed by a single existing edge.
- A chart rule with ``NUM_EDGES=2`` specifies what new edges are
licensed by a pair of existing edges.
:type NUM_EDGES: int
:cvar NUM_EDGES: The number of existing edges that this rule uses
to license new edges. Typically, this number ranges from zero
to two.
"""
def apply(self, chart, grammar, *edges):
"""
Return a generator that will add edges licensed by this rule
and the given edges to the chart, one at a time. Each
time the generator is resumed, it will either add a new
edge and yield that edge; or return.
:type edges: list(EdgeI)
:param edges: A set of existing edges. The number of edges
that should be passed to ``apply()`` is specified by the
``NUM_EDGES`` class variable.
:rtype: iter(EdgeI)
"""
raise NotImplementedError()
def apply_everywhere(self, chart, grammar):
"""
Return a generator that will add all edges licensed by
this rule, given the edges that are currently in the
chart, one at a time. Each time the generator is resumed,
it will either add a new edge and yield that edge; or return.
:rtype: iter(EdgeI)
"""
raise NotImplementedError()
@python_2_unicode_compatible
class AbstractChartRule(ChartRuleI):
"""
An abstract base class for chart rules. ``AbstractChartRule``
provides:
- A default implementation for ``apply``.
- A default implementation for ``apply_everywhere``,
(Currently, this implementation assumes that ``NUM_EDGES``<=3.)
- A default implementation for ``__str__``, which returns a
name based on the rule's class name.
"""
# Subclasses must define apply.
def apply(self, chart, grammar, *edges):
raise NotImplementedError()
# Default: loop through the given number of edges, and call
# self.apply() for each set of edges.
def apply_everywhere(self, chart, grammar):
if self.NUM_EDGES == 0:
for new_edge in self.apply(chart, grammar):
yield new_edge
elif self.NUM_EDGES == 1:
for e1 in chart:
for new_edge in self.apply(chart, grammar, e1):
yield new_edge
elif self.NUM_EDGES == 2:
for e1 in chart:
for e2 in chart:
for new_edge in self.apply(chart, grammar, e1, e2):
yield new_edge
elif self.NUM_EDGES == 3:
for e1 in chart:
for e2 in chart:
for e3 in chart:
for new_edge in self.apply(chart,grammar,e1,e2,e3):
yield new_edge
else:
raise AssertionError('NUM_EDGES>3 is not currently supported')
# Default: return a name based on the class name.
def __str__(self):
# Add spaces between InitialCapsWords.
return re.sub('([a-z])([A-Z])', r'\1 \2', self.__class__.__name__)
#////////////////////////////////////////////////////////////
# Fundamental Rule
#////////////////////////////////////////////////////////////
class FundamentalRule(AbstractChartRule):
"""
A rule that joins two adjacent edges to form a single combined
edge. In particular, this rule specifies that any pair of edges
- ``[A -> alpha \* B beta][i:j]``
- ``[B -> gamma \*][j:k]``
licenses the edge:
- ``[A -> alpha B * beta][i:j]``
"""
NUM_EDGES = 2
def apply(self, chart, grammar, left_edge, right_edge):
# Make sure the rule is applicable.
if not (left_edge.is_incomplete() and
right_edge.is_complete() and
left_edge.end() == right_edge.start() and
left_edge.nextsym() == right_edge.lhs()):
return
# Construct the new edge.
new_edge = left_edge.move_dot_forward(right_edge.end())
# Insert it into the chart.
if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
yield new_edge
class SingleEdgeFundamentalRule(FundamentalRule):
"""
A rule that joins a given edge with adjacent edges in the chart,
to form combined edges. In particular, this rule specifies that
either of the edges:
- ``[A -> alpha \* B beta][i:j]``
- ``[B -> gamma \*][j:k]``
licenses the edge:
- ``[A -> alpha B * beta][i:j]``
if the other edge is already in the chart.
:note: This is basically ``FundamentalRule``, with one edge left
unspecified.
"""
NUM_EDGES = 1
def apply(self, chart, grammar, edge):
if edge.is_incomplete():
for new_edge in self._apply_incomplete(chart, grammar, edge):
yield new_edge
else:
for new_edge in self._apply_complete(chart, grammar, edge):
yield new_edge
def _apply_complete(self, chart, grammar, right_edge):
for left_edge in chart.select(end=right_edge.start(),
is_complete=False,
nextsym=right_edge.lhs()):
new_edge = left_edge.move_dot_forward(right_edge.end())
if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
yield new_edge
def _apply_incomplete(self, chart, grammar, left_edge):
for right_edge in chart.select(start=left_edge.end(),
is_complete=True,
lhs=left_edge.nextsym()):
new_edge = left_edge.move_dot_forward(right_edge.end())
if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
yield new_edge
#////////////////////////////////////////////////////////////
# Inserting Terminal Leafs
#////////////////////////////////////////////////////////////
class LeafInitRule(AbstractChartRule):
NUM_EDGES=0
def apply(self, chart, grammar):
for index in range(chart.num_leaves()):
new_edge = LeafEdge(chart.leaf(index), index)
if chart.insert(new_edge, ()):
yield new_edge
#////////////////////////////////////////////////////////////
# Top-Down Prediction
#////////////////////////////////////////////////////////////
class TopDownInitRule(AbstractChartRule):
"""
A rule licensing edges corresponding to the grammar productions for
the grammar's start symbol. In particular, this rule specifies that
``[S -> \* alpha][0:i]`` is licensed for each grammar production
``S -> alpha``, where ``S`` is the grammar's start symbol.
"""
NUM_EDGES = 0
def apply(self, chart, grammar):
for prod in grammar.productions(lhs=grammar.start()):
new_edge = TreeEdge.from_production(prod, 0)
if chart.insert(new_edge, ()):
yield new_edge
class TopDownPredictRule(AbstractChartRule):
"""
A rule licensing edges corresponding to the grammar productions
for the nonterminal following an incomplete edge's dot. In
particular, this rule specifies that
``[A -> alpha \* B beta][i:j]`` licenses the edge
``[B -> \* gamma][j:j]`` for each grammar production ``B -> gamma``.
:note: This rule corresponds to the Predictor Rule in Earley parsing.
"""
NUM_EDGES = 1
def apply(self, chart, grammar, edge):
if edge.is_complete(): return
for prod in grammar.productions(lhs=edge.nextsym()):
new_edge = TreeEdge.from_production(prod, edge.end())
if chart.insert(new_edge, ()):
yield new_edge
class CachedTopDownPredictRule(TopDownPredictRule):
"""
A cached version of ``TopDownPredictRule``. After the first time
this rule is applied to an edge with a given ``end`` and ``next``,
it will not generate any more edges for edges with that ``end`` and
``next``.
If ``chart`` or ``grammar`` are changed, then the cache is flushed.
"""
def __init__(self):
TopDownPredictRule.__init__(self)
self._done = {}
def apply(self, chart, grammar, edge):
if edge.is_complete(): return
nextsym, index = edge.nextsym(), edge.end()
if not is_nonterminal(nextsym): return
# If we've already applied this rule to an edge with the same
# next & end, and the chart & grammar have not changed, then
# just return (no new edges to add).
done = self._done.get((nextsym, index), (None,None))
if done[0] is chart and done[1] is grammar: return
# Add all the edges indicated by the top down expand rule.
for prod in grammar.productions(lhs=nextsym):
# If the left corner in the predicted production is
# leaf, it must match with the input.
if prod.rhs():
first = prod.rhs()[0]
if is_terminal(first):
if index >= chart.num_leaves() or first != chart.leaf(index): continue
new_edge = TreeEdge.from_production(prod, index)
if chart.insert(new_edge, ()):
yield new_edge
# Record the fact that we've applied this rule.
self._done[nextsym, index] = (chart, grammar)
#////////////////////////////////////////////////////////////
# Bottom-Up Prediction
#////////////////////////////////////////////////////////////
class BottomUpPredictRule(AbstractChartRule):
"""
A rule licensing any edge corresponding to a production whose
right-hand side begins with a complete edge's left-hand side. In
particular, this rule specifies that ``[A -> alpha \*]`` licenses
the edge ``[B -> \* A beta]`` for each grammar production ``B -> A beta``.
"""
NUM_EDGES = 1
def apply(self, chart, grammar, edge):
if edge.is_incomplete(): return
for prod in grammar.productions(rhs=edge.lhs()):
new_edge = TreeEdge.from_production(prod, edge.start())
if chart.insert(new_edge, ()):
yield new_edge
class BottomUpPredictCombineRule(BottomUpPredictRule):
"""
A rule licensing any edge corresponding to a production whose
right-hand side begins with a complete edge's left-hand side. In
particular, this rule specifies that ``[A -> alpha \*]``
licenses the edge ``[B -> A \* beta]`` for each grammar
production ``B -> A beta``.
:note: This is like ``BottomUpPredictRule``, but it also applies
the ``FundamentalRule`` to the resulting edge.
"""
NUM_EDGES = 1
def apply(self, chart, grammar, edge):
if edge.is_incomplete(): return
for prod in grammar.productions(rhs=edge.lhs()):
new_edge = TreeEdge(edge.span(), prod.lhs(), prod.rhs(), 1)
if chart.insert(new_edge, (edge,)):
yield new_edge
class EmptyPredictRule(AbstractChartRule):
"""
A rule that inserts all empty productions as passive edges,
in every position in the chart.
"""
NUM_EDGES = 0
def apply(self, chart, grammar):
for prod in grammar.productions(empty=True):
for index in compat.xrange(chart.num_leaves() + 1):
new_edge = TreeEdge.from_production(prod, index)
if chart.insert(new_edge, ()):
yield new_edge
########################################################################
## Filtered Bottom Up
########################################################################
class FilteredSingleEdgeFundamentalRule(SingleEdgeFundamentalRule):
def _apply_complete(self, chart, grammar, right_edge):
end = right_edge.end()
nexttoken = end < chart.num_leaves() and chart.leaf(end)
for left_edge in chart.select(end=right_edge.start(),
is_complete=False,
nextsym=right_edge.lhs()):
if _bottomup_filter(grammar, nexttoken, left_edge.rhs(), left_edge.dot()):
new_edge = left_edge.move_dot_forward(right_edge.end())
if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
yield new_edge
def _apply_incomplete(self, chart, grammar, left_edge):
for right_edge in chart.select(start=left_edge.end(),
is_complete=True,
lhs=left_edge.nextsym()):
end = right_edge.end()
nexttoken = end < chart.num_leaves() and chart.leaf(end)
if _bottomup_filter(grammar, nexttoken, left_edge.rhs(), left_edge.dot()):
new_edge = left_edge.move_dot_forward(right_edge.end())
if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
yield new_edge
class FilteredBottomUpPredictCombineRule(BottomUpPredictCombineRule):
def apply(self, chart, grammar, edge):
if edge.is_incomplete():
return
end = edge.end()
nexttoken = end < chart.num_leaves() and chart.leaf(end)
for prod in grammar.productions(rhs=edge.lhs()):
if _bottomup_filter(grammar, nexttoken, prod.rhs()):
new_edge = TreeEdge(edge.span(), prod.lhs(), prod.rhs(), 1)
if chart.insert(new_edge, (edge,)):
yield new_edge
def _bottomup_filter(grammar, nexttoken, rhs, dot=0):
if len(rhs) <= dot + 1:
return True
_next = rhs[dot + 1]
if is_terminal(_next):
return nexttoken == _next
else:
return grammar.is_leftcorner(_next, nexttoken)
########################################################################
## Generic Chart Parser
########################################################################
TD_STRATEGY = [LeafInitRule(),
TopDownInitRule(),
CachedTopDownPredictRule(),
SingleEdgeFundamentalRule()]
BU_STRATEGY = [LeafInitRule(),
EmptyPredictRule(),
BottomUpPredictRule(),
SingleEdgeFundamentalRule()]
BU_LC_STRATEGY = [LeafInitRule(),
EmptyPredictRule(),
BottomUpPredictCombineRule(),
SingleEdgeFundamentalRule()]
LC_STRATEGY = [LeafInitRule(),
FilteredBottomUpPredictCombineRule(),
FilteredSingleEdgeFundamentalRule()]
class ChartParser(ParserI):
"""
A generic chart parser. A "strategy", or list of
``ChartRuleI`` instances, is used to decide what edges to add to
the chart. In particular, ``ChartParser`` uses the following
algorithm to parse texts:
| Until no new edges are added:
| For each *rule* in *strategy*:
| Apply *rule* to any applicable edges in the chart.
| Return any complete parses in the chart
"""
def __init__(self, grammar, strategy=BU_LC_STRATEGY, trace=0,
trace_chart_width=50, use_agenda=True, chart_class=Chart):
"""
Create a new chart parser, that uses ``grammar`` to parse
texts.
:type grammar: CFG
:param grammar: The grammar used to parse texts.
:type strategy: list(ChartRuleI)
:param strategy: A list of rules that should be used to decide
what edges to add to the chart (top-down strategy by default).
:type trace: int
:param trace: The level of tracing that should be used when
parsing a text. ``0`` will generate no tracing output;
and higher numbers will produce more verbose tracing
output.
:type trace_chart_width: int
:param trace_chart_width: The default total width reserved for
the chart in trace output. The remainder of each line will
be used to display edges.
:type use_agenda: bool
:param use_agenda: Use an optimized agenda-based algorithm,
if possible.
:param chart_class: The class that should be used to create
the parse charts.
"""
self._grammar = grammar
self._strategy = strategy
self._trace = trace
self._trace_chart_width = trace_chart_width
# If the strategy only consists of axioms (NUM_EDGES==0) and
# inference rules (NUM_EDGES==1), we can use an agenda-based algorithm:
self._use_agenda = use_agenda
self._chart_class = chart_class
self._axioms = []
self._inference_rules = []
for rule in strategy:
if rule.NUM_EDGES == 0:
self._axioms.append(rule)
elif rule.NUM_EDGES == 1:
self._inference_rules.append(rule)
else:
self._use_agenda = False
def grammar(self):
return self._grammar
def _trace_new_edges(self, chart, rule, new_edges, trace, edge_width):
if not trace: return
print_rule_header = trace > 1
for edge in new_edges:
if print_rule_header:
print('%s:' % rule)
print_rule_header = False
print(chart.pretty_format_edge(edge, edge_width))
def chart_parse(self, tokens, trace=None):
"""
Return the final parse ``Chart`` from which all possible
parse trees can be extracted.
:param tokens: The sentence to be parsed
:type tokens: list(str)
:rtype: Chart
"""
if trace is None: trace = self._trace
trace_new_edges = self._trace_new_edges
tokens = list(tokens)
self._grammar.check_coverage(tokens)
chart = self._chart_class(tokens)
grammar = self._grammar
# Width, for printing trace edges.
trace_edge_width = self._trace_chart_width // (chart.num_leaves() + 1)
if trace: print(chart.pretty_format_leaves(trace_edge_width))
if self._use_agenda:
# Use an agenda-based algorithm.
for axiom in self._axioms:
new_edges = list(axiom.apply(chart, grammar))
trace_new_edges(chart, axiom, new_edges, trace, trace_edge_width)
inference_rules = self._inference_rules
agenda = chart.edges()
# We reverse the initial agenda, since it is a stack
# but chart.edges() functions as a queue.
agenda.reverse()
while agenda:
edge = agenda.pop()
for rule in inference_rules:
new_edges = list(rule.apply(chart, grammar, edge))
if trace:
trace_new_edges(chart, rule, new_edges, trace, trace_edge_width)
agenda += new_edges
else:
# Do not use an agenda-based algorithm.
edges_added = True
while edges_added:
edges_added = False
for rule in self._strategy:
new_edges = list(rule.apply_everywhere(chart, grammar))
edges_added = len(new_edges)
trace_new_edges(chart, rule, new_edges, trace, trace_edge_width)
# Return the final chart.
return chart
def parse(self, tokens, tree_class=Tree):
chart = self.chart_parse(tokens)
return iter(chart.parses(self._grammar.start(), tree_class=tree_class))
class TopDownChartParser(ChartParser):
"""
A ``ChartParser`` using a top-down parsing strategy.
See ``ChartParser`` for more information.
"""
def __init__(self, grammar, **parser_args):
ChartParser.__init__(self, grammar, TD_STRATEGY, **parser_args)
class BottomUpChartParser(ChartParser):
"""
A ``ChartParser`` using a bottom-up parsing strategy.
See ``ChartParser`` for more information.
"""
def __init__(self, grammar, **parser_args):
if isinstance(grammar, PCFG):
warnings.warn("BottomUpChartParser only works for CFG, "
"use BottomUpProbabilisticChartParser instead",
category=DeprecationWarning)
ChartParser.__init__(self, grammar, BU_STRATEGY, **parser_args)
class BottomUpLeftCornerChartParser(ChartParser):
"""
A ``ChartParser`` using a bottom-up left-corner parsing strategy.
This strategy is often more efficient than standard bottom-up.
See ``ChartParser`` for more information.
"""
def __init__(self, grammar, **parser_args):
ChartParser.__init__(self, grammar, BU_LC_STRATEGY, **parser_args)
class LeftCornerChartParser(ChartParser):
def __init__(self, grammar, **parser_args):
if not grammar.is_nonempty():
raise ValueError("LeftCornerParser only works for grammars "
"without empty productions.")
ChartParser.__init__(self, grammar, LC_STRATEGY, **parser_args)
########################################################################
## Stepping Chart Parser
########################################################################
class SteppingChartParser(ChartParser):
"""
A ``ChartParser`` that allows you to step through the parsing
process, adding a single edge at a time. It also allows you to
change the parser's strategy or grammar midway through parsing a
text.
The ``initialize`` method is used to start parsing a text. ``step``
adds a single edge to the chart. ``set_strategy`` changes the
strategy used by the chart parser. ``parses`` returns the set of
parses that has been found by the chart parser.
:ivar _restart: Records whether the parser's strategy, grammar,
or chart has been changed. If so, then ``step`` must restart
the parsing algorithm.
"""
def __init__(self, grammar, strategy=[], trace=0):
self._chart = None
self._current_chartrule = None
self._restart = False
ChartParser.__init__(self, grammar, strategy, trace)
#////////////////////////////////////////////////////////////
# Initialization
#////////////////////////////////////////////////////////////
def initialize(self, tokens):
"Begin parsing the given tokens."
self._chart = Chart(list(tokens))
self._restart = True
#////////////////////////////////////////////////////////////
# Stepping
#////////////////////////////////////////////////////////////
def step(self):
"""
Return a generator that adds edges to the chart, one at a
time. Each time the generator is resumed, it adds a single
edge and yields that edge. If no more edges can be added,
then it yields None.
If the parser's strategy, grammar, or chart is changed, then
the generator will continue adding edges using the new
strategy, grammar, or chart.
Note that this generator never terminates, since the grammar
or strategy might be changed to values that would add new
edges. Instead, it yields None when no more edges can be
added with the current strategy and grammar.
"""
if self._chart is None:
raise ValueError('Parser must be initialized first')
while True:
self._restart = False
w = 50 // (self._chart.num_leaves()+1)
for e in self._parse():
if self._trace > 1: print(self._current_chartrule)
if self._trace > 0: print(self._chart.pretty_format_edge(e,w))
yield e
if self._restart: break
else:
yield None # No more edges.
def _parse(self):
"""
A generator that implements the actual parsing algorithm.
``step`` iterates through this generator, and restarts it
whenever the parser's strategy, grammar, or chart is modified.
"""
chart = self._chart
grammar = self._grammar
edges_added = 1
while edges_added > 0:
edges_added = 0
for rule in self._strategy:
self._current_chartrule = rule
for e in rule.apply_everywhere(chart, grammar):
edges_added += 1
yield e
#////////////////////////////////////////////////////////////
# Accessors
#////////////////////////////////////////////////////////////
def strategy(self):
"Return the strategy used by this parser."
return self._strategy
def grammar(self):
"Return the grammar used by this parser."
return self._grammar
def chart(self):
"Return the chart that is used by this parser."
return self._chart
def current_chartrule(self):
"Return the chart rule used to generate the most recent edge."
return self._current_chartrule
def parses(self, tree_class=Tree):
"Return the parse trees currently contained in the chart."
return self._chart.parses(self._grammar.start(), tree_class)
#////////////////////////////////////////////////////////////
# Parser modification
#////////////////////////////////////////////////////////////
def set_strategy(self, strategy):
"""
Change the strategy that the parser uses to decide which edges
to add to the chart.
:type strategy: list(ChartRuleI)
:param strategy: A list of rules that should be used to decide
what edges to add to the chart.
"""
if strategy == self._strategy: return
self._strategy = strategy[:] # Make a copy.
self._restart = True
def set_grammar(self, grammar):
"Change the grammar used by the parser."
if grammar is self._grammar: return
self._grammar = grammar
self._restart = True
def set_chart(self, chart):
"Load a given chart into the chart parser."
if chart is self._chart: return
self._chart = chart
self._restart = True
#////////////////////////////////////////////////////////////
# Standard parser methods
#////////////////////////////////////////////////////////////
def parse(self, tokens, tree_class=Tree):
tokens = list(tokens)
self._grammar.check_coverage(tokens)
# Initialize ourselves.
self.initialize(tokens)
# Step until no more edges are generated.
for e in self.step():
if e is None: break
# Return an iterator of complete parses.
return self.parses(tree_class=tree_class)
########################################################################
## Demo Code
########################################################################
def demo_grammar():
from nltk.grammar import CFG
return CFG.fromstring("""
S -> NP VP
PP -> "with" NP
NP -> NP PP
VP -> VP PP
VP -> Verb NP
VP -> Verb
NP -> Det Noun
NP -> "John"
NP -> "I"
Det -> "the"
Det -> "my"
Det -> "a"
Noun -> "dog"
Noun -> "cookie"
Verb -> "ate"
Verb -> "saw"
Prep -> "with"
Prep -> "under"
""")
def demo(choice=None,
print_times=True, print_grammar=False,
print_trees=True, trace=2,
sent='I saw John with a dog with my cookie', numparses=5):
"""
A demonstration of the chart parsers.
"""
import sys, time
from nltk import nonterminals, Production, CFG
# The grammar for ChartParser and SteppingChartParser:
grammar = demo_grammar()
if print_grammar:
print("* Grammar")
print(grammar)
# Tokenize the sample sentence.
print("* Sentence:")
print(sent)
tokens = sent.split()
print(tokens)
print()
# Ask the user which parser to test,
# if the parser wasn't provided as an argument
if choice is None:
print(' 1: Top-down chart parser')
print(' 2: Bottom-up chart parser')
print(' 3: Bottom-up left-corner chart parser')
print(' 4: Left-corner chart parser with bottom-up filter')
print(' 5: Stepping chart parser (alternating top-down & bottom-up)')
print(' 6: All parsers')
print('\nWhich parser (1-6)? ', end=' ')
choice = sys.stdin.readline().strip()
print()
choice = str(choice)
if choice not in "123456":
print('Bad parser number')
return
# Keep track of how long each parser takes.
times = {}
strategies = {'1': ('Top-down', TD_STRATEGY),
'2': ('Bottom-up', BU_STRATEGY),
'3': ('Bottom-up left-corner', BU_LC_STRATEGY),
'4': ('Filtered left-corner', LC_STRATEGY)}
choices = []
if choice in strategies: choices = [choice]
if choice=='6': choices = "1234"
# Run the requested chart parser(s), except the stepping parser.
for strategy in choices:
print("* Strategy: " + strategies[strategy][0])
print()
cp = ChartParser(grammar, strategies[strategy][1], trace=trace)
t = time.time()
chart = cp.chart_parse(tokens)
parses = list(chart.parses(grammar.start()))
times[strategies[strategy][0]] = time.time()-t
print("Nr edges in chart:", len(chart.edges()))
if numparses:
assert len(parses)==numparses, 'Not all parses found'
if print_trees:
for tree in parses: print(tree)
else:
print("Nr trees:", len(parses))
print()
# Run the stepping parser, if requested.
if choice in "56":
print("* Strategy: Stepping (top-down vs bottom-up)")
print()
t = time.time()
cp = SteppingChartParser(grammar, trace=trace)
cp.initialize(tokens)
for i in range(5):
print('*** SWITCH TO TOP DOWN')
cp.set_strategy(TD_STRATEGY)
for j, e in enumerate(cp.step()):
if j>20 or e is None: break
print('*** SWITCH TO BOTTOM UP')
cp.set_strategy(BU_STRATEGY)
for j, e in enumerate(cp.step()):
if j>20 or e is None: break
times['Stepping'] = time.time()-t
print("Nr edges in chart:", len(cp.chart().edges()))
if numparses:
assert len(list(cp.parses()))==numparses, 'Not all parses found'
if print_trees:
for tree in cp.parses(): print(tree)
else:
print("Nr trees:", len(list(cp.parses())))
print()
# Print the times of all parsers:
if not (print_times and times): return
print("* Parsing times")
print()
maxlen = max(len(key) for key in times)
format = '%' + repr(maxlen) + 's parser: %6.3fsec'
times_items = times.items()
for (parser, t) in sorted(times_items, key=lambda a:a[1]):
print(format % (parser, t))
if __name__ == '__main__':
demo()
| 36.637931 | 90 | 0.567675 |
from __future__ import print_function, division, unicode_literals
import itertools
import re
import warnings
from nltk import compat
from nltk.tree import Tree
from nltk.grammar import PCFG, is_nonterminal, is_terminal
from nltk.util import OrderedDict
from nltk.internals import raise_unorderable_types
from nltk.compat import (total_ordering, python_2_unicode_compatible,
unicode_repr)
from nltk.parse.api import ParserI
@total_ordering
class EdgeI(object):
def __init__(self):
if self.__class__ == EdgeI:
raise TypeError('Edge is an abstract interface')
def span(self):
raise NotImplementedError()
def start(self):
raise NotImplementedError()
def end(self):
raise NotImplementedError()
def length(self):
raise NotImplementedError()
def lhs(self):
raise NotImplementedError()
def rhs(self):
raise NotImplementedError()
def dot(self):
raise NotImplementedError()
def nextsym(self):
raise NotImplementedError()
def is_complete(self):
raise NotImplementedError()
def is_incomplete(self):
raise NotImplementedError()
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self._comparison_key == other._comparison_key)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if not isinstance(other, EdgeI):
raise_unorderable_types("<", self, other)
if self.__class__ is other.__class__:
return self._comparison_key < other._comparison_key
else:
return self.__class__.__name__ < other.__class__.__name__
def __hash__(self):
try:
return self._hash
except AttributeError:
self._hash = hash(self._comparison_key)
return self._hash
@python_2_unicode_compatible
class TreeEdge(EdgeI):
def __init__(self, span, lhs, rhs, dot=0):
self._span = span
self._lhs = lhs
rhs = tuple(rhs)
self._rhs = rhs
self._dot = dot
self._comparison_key = (span, lhs, rhs, dot)
@staticmethod
def from_production(production, index):
return TreeEdge(span=(index, index), lhs=production.lhs(),
rhs=production.rhs(), dot=0)
def move_dot_forward(self, new_end):
return TreeEdge(span=(self._span[0], new_end),
lhs=self._lhs, rhs=self._rhs,
dot=self._dot+1)
def lhs(self): return self._lhs
def span(self): return self._span
def start(self): return self._span[0]
def end(self): return self._span[1]
def length(self): return self._span[1] - self._span[0]
def rhs(self): return self._rhs
def dot(self): return self._dot
def is_complete(self): return self._dot == len(self._rhs)
def is_incomplete(self): return self._dot != len(self._rhs)
def nextsym(self):
if self._dot >= len(self._rhs): return None
else: return self._rhs[self._dot]
def __str__(self):
str = '[%s:%s] ' % (self._span[0], self._span[1])
str += '%-2r ->' % (self._lhs,)
for i in range(len(self._rhs)):
if i == self._dot: str += ' *'
str += ' %s' % unicode_repr(self._rhs[i])
if len(self._rhs) == self._dot: str += ' *'
return str
def __repr__(self):
return '[Edge: %s]' % self
@python_2_unicode_compatible
class LeafEdge(EdgeI):
def __init__(self, leaf, index):
self._leaf = leaf
self._index = index
self._comparison_key = (leaf, index)
def lhs(self): return self._leaf
def span(self): return (self._index, self._index+1)
def start(self): return self._index
def end(self): return self._index+1
def length(self): return 1
def rhs(self): return ()
def dot(self): return 0
def is_complete(self): return True
def is_incomplete(self): return False
def nextsym(self): return None
def __str__(self):
return '[%s:%s] %s' % (self._index, self._index+1, unicode_repr(self._leaf))
def __repr__(self):
return '[Edge: %s]' % (self)
class Chart(object):
def __init__(self, tokens):
self._tokens = tuple(tokens)
self._num_leaves = len(self._tokens)
self.initialize()
def initialize(self):
self._edges = []
self._edge_to_cpls = {}
self._indexes = {}
def num_leaves(self):
return self._num_leaves
def leaf(self, index):
return self._tokens[index]
def leaves(self):
return self._tokens
def edges(self):
return self._edges[:]
def iteredges(self):
return iter(self._edges)
__iter__ = iteredges
def num_edges(self):
return len(self._edge_to_cpls)
def select(self, **restrictions):
if restrictions=={}: return iter(self._edges)
restr_keys = sorted(restrictions.keys())
restr_keys = tuple(restr_keys)
if restr_keys not in self._indexes:
self._add_index(restr_keys)
vals = tuple(restrictions[key] for key in restr_keys)
return iter(self._indexes[restr_keys].get(vals, []))
def _add_index(self, restr_keys):
# Make sure it's a valid index.
for key in restr_keys:
if not hasattr(EdgeI, key):
raise ValueError('Bad restriction: %s' % key)
index = self._indexes[restr_keys] = {}
for edge in self._edges:
vals = tuple(getattr(edge, key)() for key in restr_keys)
index.setdefault(vals, []).append(edge)
def _register_with_indexes(self, edge):
for (restr_keys, index) in self._indexes.items():
vals = tuple(getattr(edge, key)() for key in restr_keys)
index.setdefault(vals, []).append(edge)
def insert_with_backpointer(self, new_edge, previous_edge, child_edge):
cpls = self.child_pointer_lists(previous_edge)
new_cpls = [cpl+(child_edge,) for cpl in cpls]
return self.insert(new_edge, *new_cpls)
def insert(self, edge, *child_pointer_lists):
if edge not in self._edge_to_cpls:
self._append_edge(edge)
self._register_with_indexes(edge)
cpls = self._edge_to_cpls.setdefault(edge, OrderedDict())
chart_was_modified = False
for child_pointer_list in child_pointer_lists:
child_pointer_list = tuple(child_pointer_list)
if child_pointer_list not in cpls:
cpls[child_pointer_list] = True
chart_was_modified = True
return chart_was_modified
def _append_edge(self, edge):
self._edges.append(edge)
#////////////////////////////////////////////////////////////
# Tree extraction & child pointer lists
#////////////////////////////////////////////////////////////
def parses(self, root, tree_class=Tree):
for edge in self.select(start=0, end=self._num_leaves, lhs=root):
for tree in self.trees(edge, tree_class=tree_class, complete=True):
yield tree
def trees(self, edge, tree_class=Tree, complete=False):
return iter(self._trees(edge, complete, memo={}, tree_class=tree_class))
def _trees(self, edge, complete, memo, tree_class):
# If we've seen this edge before, then reuse our old answer.
if edge in memo:
return memo[edge]
if complete and edge.is_incomplete():
return []
if isinstance(edge, LeafEdge):
leaf = self._tokens[edge.start()]
memo[edge] = [leaf]
return [leaf]
# memo[edge] to be empty. This has the effect of filtering
# out any cyclic trees (i.e., trees that contain themselves as
# descendants), because if we reach this edge via a cycle,
# then it will appear that the edge doesn't generate any trees.
memo[edge] = []
trees = []
lhs = edge.lhs().symbol()
for cpl in self.child_pointer_lists(edge):
# ith child.
child_choices = [self._trees(cp, complete, memo, tree_class)
for cp in cpl]
# For each combination of children, add a tree.
for children in itertools.product(*child_choices):
trees.append(tree_class(lhs, children))
# If the edge is incomplete, then extend it with "partial trees":
if edge.is_incomplete():
unexpanded = [tree_class(elt,[])
for elt in edge.rhs()[edge.dot():]]
for tree in trees:
tree.extend(unexpanded)
# Update the memoization dictionary.
memo[edge] = trees
# Return the list of trees.
return trees
def child_pointer_lists(self, edge):
# Make a copy, in case they modify it.
return self._edge_to_cpls.get(edge, {}).keys()
#////////////////////////////////////////////////////////////
# Display
#////////////////////////////////////////////////////////////
def pretty_format_edge(self, edge, width=None):
if width is None: width = 50 // (self.num_leaves()+1)
(start, end) = (edge.start(), edge.end())
str = '|' + ('.'+' '*(width-1))*start
# Zero-width edges are "#" if complete, ">" if incomplete
if start == end:
if edge.is_complete(): str += ' else: str += '>'
# Spanning complete edges are "[===]"; Other edges are
# "[---]" if complete, "[--->" if incomplete
elif edge.is_complete() and edge.span() == (0,self._num_leaves):
str += '['+('='*width)*(end-start-1) + '='*(width-1)+']'
elif edge.is_complete():
str += '['+('-'*width)*(end-start-1) + '-'*(width-1)+']'
else:
str += '['+('-'*width)*(end-start-1) + '-'*(width-1)+'>'
str += (' '*(width-1)+'.')*(self._num_leaves-end)
return str + '| %s' % edge
def pretty_format_leaves(self, width=None):
if width is None: width = 50 // (self.num_leaves()+1)
if self._tokens is not None and width>1:
header = '|.'
for tok in self._tokens:
header += tok[:width-1].center(width-1)+'.'
header += '|'
else:
header = ''
return header
def pretty_format(self, width=None):
if width is None: width = 50 // (self.num_leaves()+1)
# sort edges: primary key=length, secondary key=start index.
# (and filter out the token edges)
edges = sorted([(e.length(), e.start(), e) for e in self])
edges = [e for (_,_,e) in edges]
return (self.pretty_format_leaves(width) + '\n' +
'\n'.join(self.pretty_format_edge(edge, width) for edge in edges))
#////////////////////////////////////////////////////////////
# Display: Dot (AT&T Graphviz)
#////////////////////////////////////////////////////////////
def dot_digraph(self):
# Header
s = 'digraph nltk_chart {\n'
#s += ' size="5,5";\n'
s += ' rankdir=LR;\n'
s += ' node [height=0.1,width=0.1];\n'
s += ' node [style=filled, color="lightgray"];\n'
# Set up the nodes
for y in range(self.num_edges(), -1, -1):
if y == 0:
s += ' node [style=filled, color="black"];\n'
for x in range(self.num_leaves()+1):
if y == 0 or (x <= self._edges[y-1].start() or
x >= self._edges[y-1].end()):
s += ' %04d.%04d [label=""];\n' % (x,y)
# Add a spacer
s += ' x [style=invis]; x->0000.0000 [style=invis];\n'
# Declare ranks.
for x in range(self.num_leaves()+1):
s += ' {rank=same;'
for y in range(self.num_edges()+1):
if y == 0 or (x <= self._edges[y-1].start() or
x >= self._edges[y-1].end()):
s += ' %04d.%04d' % (x,y)
s += '}\n'
# Add the leaves
s += ' edge [style=invis, weight=100];\n'
s += ' node [shape=plaintext]\n'
s += ' 0000.0000'
for x in range(self.num_leaves()):
s += '->%s->%04d.0000' % (self.leaf(x), x+1)
s += ';\n\n'
# Add the edges
s += ' edge [style=solid, weight=1];\n'
for y, edge in enumerate(self):
for x in range(edge.start()):
s += (' %04d.%04d -> %04d.%04d [style="invis"];\n' %
(x, y+1, x+1, y+1))
s += (' %04d.%04d -> %04d.%04d [label="%s"];\n' %
(edge.start(), y+1, edge.end(), y+1, edge))
for x in range(edge.end(), self.num_leaves()):
s += (' %04d.%04d -> %04d.%04d [style="invis"];\n' %
(x, y+1, x+1, y+1))
s += '}\n'
return s
########################################################################
## Chart Rules
########################################################################
class ChartRuleI(object):
def apply(self, chart, grammar, *edges):
raise NotImplementedError()
def apply_everywhere(self, chart, grammar):
raise NotImplementedError()
@python_2_unicode_compatible
class AbstractChartRule(ChartRuleI):
# Subclasses must define apply.
def apply(self, chart, grammar, *edges):
raise NotImplementedError()
# Default: loop through the given number of edges, and call
# self.apply() for each set of edges.
def apply_everywhere(self, chart, grammar):
if self.NUM_EDGES == 0:
for new_edge in self.apply(chart, grammar):
yield new_edge
elif self.NUM_EDGES == 1:
for e1 in chart:
for new_edge in self.apply(chart, grammar, e1):
yield new_edge
elif self.NUM_EDGES == 2:
for e1 in chart:
for e2 in chart:
for new_edge in self.apply(chart, grammar, e1, e2):
yield new_edge
elif self.NUM_EDGES == 3:
for e1 in chart:
for e2 in chart:
for e3 in chart:
for new_edge in self.apply(chart,grammar,e1,e2,e3):
yield new_edge
else:
raise AssertionError('NUM_EDGES>3 is not currently supported')
# Default: return a name based on the class name.
def __str__(self):
# Add spaces between InitialCapsWords.
return re.sub('([a-z])([A-Z])', r'\1 \2', self.__class__.__name__)
#////////////////////////////////////////////////////////////
# Fundamental Rule
#////////////////////////////////////////////////////////////
class FundamentalRule(AbstractChartRule):
NUM_EDGES = 2
def apply(self, chart, grammar, left_edge, right_edge):
# Make sure the rule is applicable.
if not (left_edge.is_incomplete() and
right_edge.is_complete() and
left_edge.end() == right_edge.start() and
left_edge.nextsym() == right_edge.lhs()):
return
# Construct the new edge.
new_edge = left_edge.move_dot_forward(right_edge.end())
# Insert it into the chart.
if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
yield new_edge
class SingleEdgeFundamentalRule(FundamentalRule):
NUM_EDGES = 1
def apply(self, chart, grammar, edge):
if edge.is_incomplete():
for new_edge in self._apply_incomplete(chart, grammar, edge):
yield new_edge
else:
for new_edge in self._apply_complete(chart, grammar, edge):
yield new_edge
def _apply_complete(self, chart, grammar, right_edge):
for left_edge in chart.select(end=right_edge.start(),
is_complete=False,
nextsym=right_edge.lhs()):
new_edge = left_edge.move_dot_forward(right_edge.end())
if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
yield new_edge
def _apply_incomplete(self, chart, grammar, left_edge):
for right_edge in chart.select(start=left_edge.end(),
is_complete=True,
lhs=left_edge.nextsym()):
new_edge = left_edge.move_dot_forward(right_edge.end())
if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
yield new_edge
#////////////////////////////////////////////////////////////
# Inserting Terminal Leafs
#////////////////////////////////////////////////////////////
class LeafInitRule(AbstractChartRule):
NUM_EDGES=0
def apply(self, chart, grammar):
for index in range(chart.num_leaves()):
new_edge = LeafEdge(chart.leaf(index), index)
if chart.insert(new_edge, ()):
yield new_edge
#////////////////////////////////////////////////////////////
# Top-Down Prediction
#////////////////////////////////////////////////////////////
class TopDownInitRule(AbstractChartRule):
NUM_EDGES = 0
def apply(self, chart, grammar):
for prod in grammar.productions(lhs=grammar.start()):
new_edge = TreeEdge.from_production(prod, 0)
if chart.insert(new_edge, ()):
yield new_edge
class TopDownPredictRule(AbstractChartRule):
NUM_EDGES = 1
def apply(self, chart, grammar, edge):
if edge.is_complete(): return
for prod in grammar.productions(lhs=edge.nextsym()):
new_edge = TreeEdge.from_production(prod, edge.end())
if chart.insert(new_edge, ()):
yield new_edge
class CachedTopDownPredictRule(TopDownPredictRule):
def __init__(self):
TopDownPredictRule.__init__(self)
self._done = {}
def apply(self, chart, grammar, edge):
if edge.is_complete(): return
nextsym, index = edge.nextsym(), edge.end()
if not is_nonterminal(nextsym): return
# If we've already applied this rule to an edge with the same
done = self._done.get((nextsym, index), (None,None))
if done[0] is chart and done[1] is grammar: return
for prod in grammar.productions(lhs=nextsym):
if prod.rhs():
first = prod.rhs()[0]
if is_terminal(first):
if index >= chart.num_leaves() or first != chart.leaf(index): continue
new_edge = TreeEdge.from_production(prod, index)
if chart.insert(new_edge, ()):
yield new_edge
self._done[nextsym, index] = (chart, grammar)
#////////////////////////////////////////////////////////////
# Bottom-Up Prediction
#////////////////////////////////////////////////////////////
class BottomUpPredictRule(AbstractChartRule):
NUM_EDGES = 1
def apply(self, chart, grammar, edge):
if edge.is_incomplete(): return
for prod in grammar.productions(rhs=edge.lhs()):
new_edge = TreeEdge.from_production(prod, edge.start())
if chart.insert(new_edge, ()):
yield new_edge
class BottomUpPredictCombineRule(BottomUpPredictRule):
NUM_EDGES = 1
def apply(self, chart, grammar, edge):
if edge.is_incomplete(): return
for prod in grammar.productions(rhs=edge.lhs()):
new_edge = TreeEdge(edge.span(), prod.lhs(), prod.rhs(), 1)
if chart.insert(new_edge, (edge,)):
yield new_edge
class EmptyPredictRule(AbstractChartRule):
NUM_EDGES = 0
def apply(self, chart, grammar):
for prod in grammar.productions(empty=True):
for index in compat.xrange(chart.num_leaves() + 1):
new_edge = TreeEdge.from_production(prod, index)
if chart.insert(new_edge, ()):
yield new_edge
########################################################################
## Filtered Bottom Up
########################################################################
class FilteredSingleEdgeFundamentalRule(SingleEdgeFundamentalRule):
def _apply_complete(self, chart, grammar, right_edge):
end = right_edge.end()
nexttoken = end < chart.num_leaves() and chart.leaf(end)
for left_edge in chart.select(end=right_edge.start(),
is_complete=False,
nextsym=right_edge.lhs()):
if _bottomup_filter(grammar, nexttoken, left_edge.rhs(), left_edge.dot()):
new_edge = left_edge.move_dot_forward(right_edge.end())
if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
yield new_edge
def _apply_incomplete(self, chart, grammar, left_edge):
for right_edge in chart.select(start=left_edge.end(),
is_complete=True,
lhs=left_edge.nextsym()):
end = right_edge.end()
nexttoken = end < chart.num_leaves() and chart.leaf(end)
if _bottomup_filter(grammar, nexttoken, left_edge.rhs(), left_edge.dot()):
new_edge = left_edge.move_dot_forward(right_edge.end())
if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
yield new_edge
class FilteredBottomUpPredictCombineRule(BottomUpPredictCombineRule):
def apply(self, chart, grammar, edge):
if edge.is_incomplete():
return
end = edge.end()
nexttoken = end < chart.num_leaves() and chart.leaf(end)
for prod in grammar.productions(rhs=edge.lhs()):
if _bottomup_filter(grammar, nexttoken, prod.rhs()):
new_edge = TreeEdge(edge.span(), prod.lhs(), prod.rhs(), 1)
if chart.insert(new_edge, (edge,)):
yield new_edge
def _bottomup_filter(grammar, nexttoken, rhs, dot=0):
if len(rhs) <= dot + 1:
return True
_next = rhs[dot + 1]
if is_terminal(_next):
return nexttoken == _next
else:
return grammar.is_leftcorner(_next, nexttoken)
########################################################################
## Generic Chart Parser
########################################################################
TD_STRATEGY = [LeafInitRule(),
TopDownInitRule(),
CachedTopDownPredictRule(),
SingleEdgeFundamentalRule()]
BU_STRATEGY = [LeafInitRule(),
EmptyPredictRule(),
BottomUpPredictRule(),
SingleEdgeFundamentalRule()]
BU_LC_STRATEGY = [LeafInitRule(),
EmptyPredictRule(),
BottomUpPredictCombineRule(),
SingleEdgeFundamentalRule()]
LC_STRATEGY = [LeafInitRule(),
FilteredBottomUpPredictCombineRule(),
FilteredSingleEdgeFundamentalRule()]
class ChartParser(ParserI):
def __init__(self, grammar, strategy=BU_LC_STRATEGY, trace=0,
trace_chart_width=50, use_agenda=True, chart_class=Chart):
self._grammar = grammar
self._strategy = strategy
self._trace = trace
self._trace_chart_width = trace_chart_width
# If the strategy only consists of axioms (NUM_EDGES==0) and
# inference rules (NUM_EDGES==1), we can use an agenda-based algorithm:
self._use_agenda = use_agenda
self._chart_class = chart_class
self._axioms = []
self._inference_rules = []
for rule in strategy:
if rule.NUM_EDGES == 0:
self._axioms.append(rule)
elif rule.NUM_EDGES == 1:
self._inference_rules.append(rule)
else:
self._use_agenda = False
def grammar(self):
return self._grammar
def _trace_new_edges(self, chart, rule, new_edges, trace, edge_width):
if not trace: return
print_rule_header = trace > 1
for edge in new_edges:
if print_rule_header:
print('%s:' % rule)
print_rule_header = False
print(chart.pretty_format_edge(edge, edge_width))
def chart_parse(self, tokens, trace=None):
if trace is None: trace = self._trace
trace_new_edges = self._trace_new_edges
tokens = list(tokens)
self._grammar.check_coverage(tokens)
chart = self._chart_class(tokens)
grammar = self._grammar
# Width, for printing trace edges.
trace_edge_width = self._trace_chart_width // (chart.num_leaves() + 1)
if trace: print(chart.pretty_format_leaves(trace_edge_width))
if self._use_agenda:
# Use an agenda-based algorithm.
for axiom in self._axioms:
new_edges = list(axiom.apply(chart, grammar))
trace_new_edges(chart, axiom, new_edges, trace, trace_edge_width)
inference_rules = self._inference_rules
agenda = chart.edges()
# We reverse the initial agenda, since it is a stack
# but chart.edges() functions as a queue.
agenda.reverse()
while agenda:
edge = agenda.pop()
for rule in inference_rules:
new_edges = list(rule.apply(chart, grammar, edge))
if trace:
trace_new_edges(chart, rule, new_edges, trace, trace_edge_width)
agenda += new_edges
else:
# Do not use an agenda-based algorithm.
edges_added = True
while edges_added:
edges_added = False
for rule in self._strategy:
new_edges = list(rule.apply_everywhere(chart, grammar))
edges_added = len(new_edges)
trace_new_edges(chart, rule, new_edges, trace, trace_edge_width)
# Return the final chart.
return chart
def parse(self, tokens, tree_class=Tree):
chart = self.chart_parse(tokens)
return iter(chart.parses(self._grammar.start(), tree_class=tree_class))
class TopDownChartParser(ChartParser):
def __init__(self, grammar, **parser_args):
ChartParser.__init__(self, grammar, TD_STRATEGY, **parser_args)
class BottomUpChartParser(ChartParser):
def __init__(self, grammar, **parser_args):
if isinstance(grammar, PCFG):
warnings.warn("BottomUpChartParser only works for CFG, "
"use BottomUpProbabilisticChartParser instead",
category=DeprecationWarning)
ChartParser.__init__(self, grammar, BU_STRATEGY, **parser_args)
class BottomUpLeftCornerChartParser(ChartParser):
def __init__(self, grammar, **parser_args):
ChartParser.__init__(self, grammar, BU_LC_STRATEGY, **parser_args)
class LeftCornerChartParser(ChartParser):
def __init__(self, grammar, **parser_args):
if not grammar.is_nonempty():
raise ValueError("LeftCornerParser only works for grammars "
"without empty productions.")
ChartParser.__init__(self, grammar, LC_STRATEGY, **parser_args)
########################################################################
## Stepping Chart Parser
########################################################################
class SteppingChartParser(ChartParser):
def __init__(self, grammar, strategy=[], trace=0):
self._chart = None
self._current_chartrule = None
self._restart = False
ChartParser.__init__(self, grammar, strategy, trace)
#////////////////////////////////////////////////////////////
# Initialization
#////////////////////////////////////////////////////////////
def initialize(self, tokens):
self._chart = Chart(list(tokens))
self._restart = True
#////////////////////////////////////////////////////////////
# Stepping
#////////////////////////////////////////////////////////////
def step(self):
if self._chart is None:
raise ValueError('Parser must be initialized first')
while True:
self._restart = False
w = 50 // (self._chart.num_leaves()+1)
for e in self._parse():
if self._trace > 1: print(self._current_chartrule)
if self._trace > 0: print(self._chart.pretty_format_edge(e,w))
yield e
if self._restart: break
else:
yield None # No more edges.
def _parse(self):
chart = self._chart
grammar = self._grammar
edges_added = 1
while edges_added > 0:
edges_added = 0
for rule in self._strategy:
self._current_chartrule = rule
for e in rule.apply_everywhere(chart, grammar):
edges_added += 1
yield e
#////////////////////////////////////////////////////////////
# Accessors
#////////////////////////////////////////////////////////////
def strategy(self):
return self._strategy
def grammar(self):
return self._grammar
def chart(self):
return self._chart
def current_chartrule(self):
return self._current_chartrule
def parses(self, tree_class=Tree):
return self._chart.parses(self._grammar.start(), tree_class)
#////////////////////////////////////////////////////////////
# Parser modification
#////////////////////////////////////////////////////////////
def set_strategy(self, strategy):
if strategy == self._strategy: return
self._strategy = strategy[:] # Make a copy.
self._restart = True
def set_grammar(self, grammar):
if grammar is self._grammar: return
self._grammar = grammar
self._restart = True
def set_chart(self, chart):
if chart is self._chart: return
self._chart = chart
self._restart = True
#////////////////////////////////////////////////////////////
# Standard parser methods
#////////////////////////////////////////////////////////////
def parse(self, tokens, tree_class=Tree):
tokens = list(tokens)
self._grammar.check_coverage(tokens)
# Initialize ourselves.
self.initialize(tokens)
# Step until no more edges are generated.
for e in self.step():
if e is None: break
# Return an iterator of complete parses.
return self.parses(tree_class=tree_class)
########################################################################
## Demo Code
########################################################################
def demo_grammar():
from nltk.grammar import CFG
return CFG.fromstring("""
S -> NP VP
PP -> "with" NP
NP -> NP PP
VP -> VP PP
VP -> Verb NP
VP -> Verb
NP -> Det Noun
NP -> "John"
NP -> "I"
Det -> "the"
Det -> "my"
Det -> "a"
Noun -> "dog"
Noun -> "cookie"
Verb -> "ate"
Verb -> "saw"
Prep -> "with"
Prep -> "under"
""")
def demo(choice=None,
print_times=True, print_grammar=False,
print_trees=True, trace=2,
sent='I saw John with a dog with my cookie', numparses=5):
import sys, time
from nltk import nonterminals, Production, CFG
# The grammar for ChartParser and SteppingChartParser:
grammar = demo_grammar()
if print_grammar:
print("* Grammar")
print(grammar)
# Tokenize the sample sentence.
print("* Sentence:")
print(sent)
tokens = sent.split()
print(tokens)
print()
# Ask the user which parser to test,
# if the parser wasn't provided as an argument
if choice is None:
print(' 1: Top-down chart parser')
print(' 2: Bottom-up chart parser')
print(' 3: Bottom-up left-corner chart parser')
print(' 4: Left-corner chart parser with bottom-up filter')
print(' 5: Stepping chart parser (alternating top-down & bottom-up)')
print(' 6: All parsers')
print('\nWhich parser (1-6)? ', end=' ')
choice = sys.stdin.readline().strip()
print()
choice = str(choice)
if choice not in "123456":
print('Bad parser number')
return
times = {}
strategies = {'1': ('Top-down', TD_STRATEGY),
'2': ('Bottom-up', BU_STRATEGY),
'3': ('Bottom-up left-corner', BU_LC_STRATEGY),
'4': ('Filtered left-corner', LC_STRATEGY)}
choices = []
if choice in strategies: choices = [choice]
if choice=='6': choices = "1234"
for strategy in choices:
print("* Strategy: " + strategies[strategy][0])
print()
cp = ChartParser(grammar, strategies[strategy][1], trace=trace)
t = time.time()
chart = cp.chart_parse(tokens)
parses = list(chart.parses(grammar.start()))
times[strategies[strategy][0]] = time.time()-t
print("Nr edges in chart:", len(chart.edges()))
if numparses:
assert len(parses)==numparses, 'Not all parses found'
if print_trees:
for tree in parses: print(tree)
else:
print("Nr trees:", len(parses))
print()
if choice in "56":
print("* Strategy: Stepping (top-down vs bottom-up)")
print()
t = time.time()
cp = SteppingChartParser(grammar, trace=trace)
cp.initialize(tokens)
for i in range(5):
print('*** SWITCH TO TOP DOWN')
cp.set_strategy(TD_STRATEGY)
for j, e in enumerate(cp.step()):
if j>20 or e is None: break
print('*** SWITCH TO BOTTOM UP')
cp.set_strategy(BU_STRATEGY)
for j, e in enumerate(cp.step()):
if j>20 or e is None: break
times['Stepping'] = time.time()-t
print("Nr edges in chart:", len(cp.chart().edges()))
if numparses:
assert len(list(cp.parses()))==numparses, 'Not all parses found'
if print_trees:
for tree in cp.parses(): print(tree)
else:
print("Nr trees:", len(list(cp.parses())))
print()
if not (print_times and times): return
print("* Parsing times")
print()
maxlen = max(len(key) for key in times)
format = '%' + repr(maxlen) + 's parser: %6.3fsec'
times_items = times.items()
for (parser, t) in sorted(times_items, key=lambda a:a[1]):
print(format % (parser, t))
if __name__ == '__main__':
demo()
| true | true |
1c49e8c673c464665b7013997bb5ecdb23c0b915 | 642 | py | Python | tests/objects/message/__init__.py | mjneff2/Pincer | a11bc3e4bad319fdf927d913c58c933576ec7c99 | [
"MIT"
] | null | null | null | tests/objects/message/__init__.py | mjneff2/Pincer | a11bc3e4bad319fdf927d913c58c933576ec7c99 | [
"MIT"
] | null | null | null | tests/objects/message/__init__.py | mjneff2/Pincer | a11bc3e4bad319fdf927d913c58c933576ec7c99 | [
"MIT"
] | null | null | null | from pincer.objects import Embed
print(Embed(
title="Pincer - 0.6.4",
description=(
"🚀 An asynchronous python API wrapper meant to replace"
" discord.py\n> Snappy discord api wrapper written "
"with aiohttp & websockets"
)
).add_field(
name="**Github Repository**",
value="> https://github.com/Pincer-org/Pincer"
).set_thumbnail(
url="https://pincer.dev/img/icon.png"
).set_image(
url=(
"https://repository-images.githubusercontent.com"
"/400871418/045ebf39-7c6e-4c3a-b744-0c3122374203"
)
).to_dict()) | 32.1 | 67 | 0.579439 | from pincer.objects import Embed
print(Embed(
title="Pincer - 0.6.4",
description=(
"🚀 An asynchronous python API wrapper meant to replace"
" discord.py\n> Snappy discord api wrapper written "
"with aiohttp & websockets"
)
).add_field(
name="**Github Repository**",
value="> https://github.com/Pincer-org/Pincer"
).set_thumbnail(
url="https://pincer.dev/img/icon.png"
).set_image(
url=(
"https://repository-images.githubusercontent.com"
"/400871418/045ebf39-7c6e-4c3a-b744-0c3122374203"
)
).to_dict()) | true | true |
1c49e8d16ca5be1232c4449dc3a9df00edfe575b | 1,186 | py | Python | setup.py | deone/requestor | 9af13ebc90861d37dc2db4e1b1375aa445655868 | [
"MIT"
] | null | null | null | setup.py | deone/requestor | 9af13ebc90861d37dc2db4e1b1375aa445655868 | [
"MIT"
] | null | null | null | setup.py | deone/requestor | 9af13ebc90861d37dc2db4e1b1375aa445655868 | [
"MIT"
] | null | null | null | # Always prefer setuptools over distutils
from setuptools import setup
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='requestor',
version='0.1.1',
description='Use this package to make HTTP post calls to django APIs with csrf support, and return response in json.',
long_description=long_description,
url='https://github.com/deone/requestor',
author='Dayo Osikoya',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
keywords='http post requests json response django csrf',
py_modules=["requestor"],
install_requires=['requests'],
)
| 33.885714 | 122 | 0.676223 | from setuptools import setup
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='requestor',
version='0.1.1',
description='Use this package to make HTTP post calls to django APIs with csrf support, and return response in json.',
long_description=long_description,
url='https://github.com/deone/requestor',
author='Dayo Osikoya',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
keywords='http post requests json response django csrf',
py_modules=["requestor"],
install_requires=['requests'],
)
| true | true |
1c49e9bafda8707fe36fdbba4d19f3bf3c46ee9e | 43,586 | py | Python | codalab/apps/web/migrations/0063_CompetitionDump.py | AIMultimediaLab/AI4Media-EaaS-prototype-Py2-public | 64cd6ac9a56a4e2d40d93608d4289b1a0e50cce7 | [
"Apache-2.0"
] | 333 | 2015-12-29T22:49:40.000Z | 2022-03-27T12:01:57.000Z | codalab/apps/web/migrations/0063_CompetitionDump.py | AIMultimediaLab/AI4Media-EaaS-prototype-Py2-public | 64cd6ac9a56a4e2d40d93608d4289b1a0e50cce7 | [
"Apache-2.0"
] | 1,572 | 2015-12-28T21:54:00.000Z | 2022-03-31T13:00:32.000Z | codalab/apps/web/migrations/0063_CompetitionDump.py | AIMultimediaLab/AI4Media-EaaS-prototype-Py2-public | 64cd6ac9a56a4e2d40d93608d4289b1a0e50cce7 | [
"Apache-2.0"
] | 107 | 2016-01-08T03:46:07.000Z | 2022-03-16T08:43:57.000Z | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CompetitionDump'
db.create_table(u'web_competitiondump', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('competition', self.gf('django.db.models.fields.related.ForeignKey')(related_name='dumps', to=orm['web.Competition'])),
('timestamp', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('status', self.gf('django.db.models.fields.CharField')(default='Starting', max_length=64)),
('data_file', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True, blank=True)),
))
db.send_create_signal(u'web', ['CompetitionDump'])
def backwards(self, orm):
# Deleting model 'CompetitionDump'
db.delete_table(u'web_competitiondump')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'authenz.cluser': {
'Meta': {'object_name': 'ClUser'},
'bibtex': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'contact_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_on_submission_finished_successfully': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'method_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'method_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'organization_or_affiliation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'organizer_direct_message_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizer_status_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'participation_status_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'publication_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'rabbitmq_password': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}),
'rabbitmq_queue_limit': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5', 'blank': 'True'}),
'rabbitmq_username': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}),
'team_members': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'team_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'queues.queue': {
'Meta': {'object_name': 'Queue'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organizers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'organizers'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['authenz.ClUser']"}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authenz.ClUser']"}),
'vhost': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
u'teams.team': {
'Meta': {'unique_together': "(('name', 'competition'),)", 'object_name': 'Team'},
'allow_requests': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.Competition']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_creator'", 'to': u"orm['authenz.ClUser']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'image_url_base': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['authenz.ClUser']", 'null': 'True', 'through': u"orm['teams.TeamMembership']", 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['teams.TeamStatus']", 'null': 'True'})
},
u'teams.teammembership': {
'Meta': {'object_name': 'TeamMembership'},
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_invitation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_request': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['teams.TeamMembershipStatus']", 'null': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authenz.ClUser']"})
},
u'teams.teammembershipstatus': {
'Meta': {'object_name': 'TeamMembershipStatus'},
'codename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'teams.teamstatus': {
'Meta': {'object_name': 'TeamStatus'},
'codename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'web.competition': {
'Meta': {'ordering': "['end_date']", 'object_name': 'Competition'},
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'competition_admins'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['authenz.ClUser']"}),
'allow_public_submissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allow_teams': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'anonymous_leaderboard': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'competitioninfo_creator'", 'to': u"orm['authenz.ClUser']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disallow_leaderboard_modifying': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_detailed_results': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_forum': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'enable_medical_image_viewer': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_per_submission_metadata': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_teams': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'force_submission_to_leaderboard': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_registration': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'image_url_base': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'is_migrating': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_migrating_delayed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'last_phase_migration': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'competitioninfo_modified_by'", 'to': u"orm['authenz.ClUser']"}),
'original_yaml_file': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'queue': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'competitions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['queues.Queue']"}),
'require_team_approval': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'reward': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'show_datasets_from_yaml': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'competition_teams'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['teams.Team']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'web.competitiondefbundle': {
'Meta': {'object_name': 'CompetitionDefBundle'},
'config_bundle': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['authenz.ClUser']"}),
's3_config_bundle': ('s3direct.fields.S3DirectField', [], {'null': 'True', 'blank': 'True'})
},
u'web.competitiondump': {
'Meta': {'object_name': 'CompetitionDump'},
'competition': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dumps'", 'to': u"orm['web.Competition']"}),
'data_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Starting'", 'max_length': '64'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'web.competitionparticipant': {
'Meta': {'unique_together': "(('user', 'competition'),)", 'object_name': 'CompetitionParticipant'},
'competition': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'participants'", 'to': u"orm['web.Competition']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.ParticipantStatus']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'participation'", 'to': u"orm['authenz.ClUser']"})
},
u'web.competitionphase': {
'Meta': {'ordering': "['phasenumber']", 'object_name': 'CompetitionPhase'},
'auto_migration': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True', 'blank': 'True'}),
'competition': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'phases'", 'to': u"orm['web.Competition']"}),
'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'phase'", 'blank': 'True', 'to': u"orm['web.Dataset']"}),
'default_docker_image': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'disable_custom_docker_image': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'execution_time_limit': ('django.db.models.fields.PositiveIntegerField', [], {'default': '300'}),
'force_best_submission_to_leaderboard': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input_data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'input_data_organizer_dataset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'input_data_organizer_dataset'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['web.OrganizerDataSet']"}),
'is_migrated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_scoring_only': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'leaderboard_management_mode': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '50'}),
'max_submissions': ('django.db.models.fields.PositiveIntegerField', [], {'default': '100'}),
'max_submissions_per_day': ('django.db.models.fields.PositiveIntegerField', [], {'default': '999'}),
'phase_never_ends': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'phasenumber': ('django.db.models.fields.PositiveIntegerField', [], {}),
'reference_data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'reference_data_organizer_dataset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'reference_data_organizer_dataset'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['web.OrganizerDataSet']"}),
'scoring_program': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'scoring_program_docker_image': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'scoring_program_organizer_dataset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'scoring_program_organizer_dataset'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['web.OrganizerDataSet']"}),
'start_date': ('django.db.models.fields.DateTimeField', [], {})
},
u'web.competitionsubmission': {
'Meta': {'unique_together': "(('submission_number', 'phase', 'participant'),)", 'object_name': 'CompetitionSubmission'},
'bibtex': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'coopetition_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'detailed_results_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'dislike_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'docker_image': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'download_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'exception_details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'execution_key': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'file_url_base': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'history_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inputfile': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'is_migrated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'like_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'method_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'method_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'organization_or_affiliation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'output_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'participant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'to': u"orm['web.CompetitionParticipant']"}),
'phase': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'to': u"orm['web.CompetitionPhase']"}),
'prediction_output_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'prediction_runfile': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'prediction_stderr_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'prediction_stdout_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'private_output_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'publication_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'readable_filename': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'runfile': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
's3_file': ('s3direct.fields.S3DirectField', [], {'null': 'True', 'blank': 'True'}),
'scores_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.CompetitionSubmissionStatus']"}),
'status_details': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'stderr_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'stdout_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'submission_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'submitted_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'team'", 'null': 'True', 'to': u"orm['teams.Team']"}),
'team_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'when_made_public': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'when_unmade_public': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'web.competitionsubmissionmetadata': {
'Meta': {'object_name': 'CompetitionSubmissionMetadata'},
'beginning_cpu_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'beginning_swap_memory_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'beginning_virtual_memory_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_cpu_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_swap_memory_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_virtual_memory_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_predict': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_scoring': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'processes_running_in_temp_dir': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'submission': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metadatas'", 'to': u"orm['web.CompetitionSubmission']"})
},
u'web.competitionsubmissionstatus': {
'Meta': {'object_name': 'CompetitionSubmissionStatus'},
'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'web.contentcategory': {
'Meta': {'object_name': 'ContentCategory'},
'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'content_limit': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_menu': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['web.ContentCategory']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'visibility': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.ContentVisibility']"})
},
u'web.contentvisibility': {
'Meta': {'object_name': 'ContentVisibility'},
'classname': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'web.dataset': {
'Meta': {'ordering': "['number']", 'object_name': 'Dataset'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datasets'", 'to': u"orm['authenz.ClUser']"}),
'datafile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.ExternalFile']"}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'web.defaultcontentitem': {
'Meta': {'object_name': 'DefaultContentItem'},
'category': ('mptt.fields.TreeForeignKey', [], {'to': u"orm['web.ContentCategory']"}),
'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_visibility': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.ContentVisibility']"}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'rank': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'web.externalfile': {
'Meta': {'object_name': 'ExternalFile'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authenz.ClUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'source_address_info': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.ExternalFileType']"})
},
u'web.externalfilesource': {
'Meta': {'object_name': 'ExternalFileSource'},
'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'service_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'web.externalfiletype': {
'Meta': {'object_name': 'ExternalFileType'},
'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'web.organizerdataset': {
'Meta': {'object_name': 'OrganizerDataSet'},
'data_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sub_data_files': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['web.OrganizerDataSet']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'None'", 'max_length': '64'}),
'uploaded_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authenz.ClUser']"})
},
u'web.page': {
'Meta': {'ordering': "['category', 'rank']", 'unique_together': "(('label', 'category', 'container'),)", 'object_name': 'Page'},
'category': ('mptt.fields.TreeForeignKey', [], {'to': u"orm['web.ContentCategory']"}),
'codename': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'competition': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pages'", 'null': 'True', 'to': u"orm['web.Competition']"}),
'container': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pages'", 'to': u"orm['web.PageContainer']"}),
'defaults': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.DefaultContentItem']", 'null': 'True', 'blank': 'True'}),
'html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'markup': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rank': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'web.pagecontainer': {
'Meta': {'unique_together': "(('object_id', 'content_type'),)", 'object_name': 'PageContainer'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
u'web.participantstatus': {
'Meta': {'object_name': 'ParticipantStatus'},
'codename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'web.phaseleaderboard': {
'Meta': {'object_name': 'PhaseLeaderBoard'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phase': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'board'", 'unique': 'True', 'to': u"orm['web.CompetitionPhase']"})
},
u'web.phaseleaderboardentry': {
'Meta': {'unique_together': "(('board', 'result'),)", 'object_name': 'PhaseLeaderBoardEntry'},
'board': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entries'", 'to': u"orm['web.PhaseLeaderBoard']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'result': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'leaderboard_entry_result'", 'to': u"orm['web.CompetitionSubmission']"})
},
u'web.submissioncomputedscore': {
'Meta': {'object_name': 'SubmissionComputedScore'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'operation': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'scoredef': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'computed_score'", 'unique': 'True', 'to': u"orm['web.SubmissionScoreDef']"})
},
u'web.submissioncomputedscorefield': {
'Meta': {'object_name': 'SubmissionComputedScoreField'},
'computed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fields'", 'to': u"orm['web.SubmissionComputedScore']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scoredef': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionScoreDef']"})
},
u'web.submissionresultgroup': {
'Meta': {'ordering': "['ordering']", 'object_name': 'SubmissionResultGroup'},
'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.Competition']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'ordering': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'phases': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['web.CompetitionPhase']", 'through': u"orm['web.SubmissionResultGroupPhase']", 'symmetrical': 'False'})
},
u'web.submissionresultgroupphase': {
'Meta': {'unique_together': "(('group', 'phase'),)", 'object_name': 'SubmissionResultGroupPhase'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionResultGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phase': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.CompetitionPhase']"})
},
u'web.submissionscore': {
'Meta': {'unique_together': "(('result', 'scoredef'),)", 'object_name': 'SubmissionScore'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'result': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'scores'", 'to': u"orm['web.CompetitionSubmission']"}),
'scoredef': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionScoreDef']"}),
'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '10'})
},
u'web.submissionscoredef': {
'Meta': {'unique_together': "(('key', 'competition'),)", 'object_name': 'SubmissionScoreDef'},
'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.Competition']"}),
'computed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['web.SubmissionResultGroup']", 'through': u"orm['web.SubmissionScoreDefGroup']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'numeric_format': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'ordering': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'selection_default': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_rank': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sorting': ('django.db.models.fields.SlugField', [], {'default': "'asc'", 'max_length': '20'})
},
u'web.submissionscoredefgroup': {
'Meta': {'unique_together': "(('scoredef', 'group'),)", 'object_name': 'SubmissionScoreDefGroup'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionResultGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scoredef': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionScoreDef']"})
},
u'web.submissionscoreset': {
'Meta': {'unique_together': "(('key', 'competition'),)", 'object_name': 'SubmissionScoreSet'},
'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.Competition']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'ordering': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['web.SubmissionScoreSet']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'scoredef': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionScoreDef']", 'null': 'True', 'blank': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
}
}
complete_apps = ['web'] | 91.76 | 260 | 0.576079 | from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.create_table(u'web_competitiondump', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('competition', self.gf('django.db.models.fields.related.ForeignKey')(related_name='dumps', to=orm['web.Competition'])),
('timestamp', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('status', self.gf('django.db.models.fields.CharField')(default='Starting', max_length=64)),
('data_file', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True, blank=True)),
))
db.send_create_signal(u'web', ['CompetitionDump'])
def backwards(self, orm):
db.delete_table(u'web_competitiondump')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'authenz.cluser': {
'Meta': {'object_name': 'ClUser'},
'bibtex': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'contact_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_on_submission_finished_successfully': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'method_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'method_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'organization_or_affiliation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'organizer_direct_message_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizer_status_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'participation_status_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'publication_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'rabbitmq_password': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}),
'rabbitmq_queue_limit': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5', 'blank': 'True'}),
'rabbitmq_username': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}),
'team_members': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'team_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'queues.queue': {
'Meta': {'object_name': 'Queue'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organizers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'organizers'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['authenz.ClUser']"}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authenz.ClUser']"}),
'vhost': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
u'teams.team': {
'Meta': {'unique_together': "(('name', 'competition'),)", 'object_name': 'Team'},
'allow_requests': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.Competition']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_creator'", 'to': u"orm['authenz.ClUser']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'image_url_base': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['authenz.ClUser']", 'null': 'True', 'through': u"orm['teams.TeamMembership']", 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['teams.TeamStatus']", 'null': 'True'})
},
u'teams.teammembership': {
'Meta': {'object_name': 'TeamMembership'},
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_invitation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_request': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['teams.TeamMembershipStatus']", 'null': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authenz.ClUser']"})
},
u'teams.teammembershipstatus': {
'Meta': {'object_name': 'TeamMembershipStatus'},
'codename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'teams.teamstatus': {
'Meta': {'object_name': 'TeamStatus'},
'codename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'web.competition': {
'Meta': {'ordering': "['end_date']", 'object_name': 'Competition'},
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'competition_admins'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['authenz.ClUser']"}),
'allow_public_submissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allow_teams': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'anonymous_leaderboard': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'competitioninfo_creator'", 'to': u"orm['authenz.ClUser']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disallow_leaderboard_modifying': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_detailed_results': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_forum': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'enable_medical_image_viewer': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_per_submission_metadata': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_teams': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'force_submission_to_leaderboard': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_registration': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'image_url_base': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'is_migrating': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_migrating_delayed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'last_phase_migration': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'competitioninfo_modified_by'", 'to': u"orm['authenz.ClUser']"}),
'original_yaml_file': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'queue': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'competitions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['queues.Queue']"}),
'require_team_approval': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'reward': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'show_datasets_from_yaml': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'competition_teams'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['teams.Team']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'web.competitiondefbundle': {
'Meta': {'object_name': 'CompetitionDefBundle'},
'config_bundle': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['authenz.ClUser']"}),
's3_config_bundle': ('s3direct.fields.S3DirectField', [], {'null': 'True', 'blank': 'True'})
},
u'web.competitiondump': {
'Meta': {'object_name': 'CompetitionDump'},
'competition': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dumps'", 'to': u"orm['web.Competition']"}),
'data_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Starting'", 'max_length': '64'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'web.competitionparticipant': {
'Meta': {'unique_together': "(('user', 'competition'),)", 'object_name': 'CompetitionParticipant'},
'competition': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'participants'", 'to': u"orm['web.Competition']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.ParticipantStatus']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'participation'", 'to': u"orm['authenz.ClUser']"})
},
u'web.competitionphase': {
'Meta': {'ordering': "['phasenumber']", 'object_name': 'CompetitionPhase'},
'auto_migration': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True', 'blank': 'True'}),
'competition': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'phases'", 'to': u"orm['web.Competition']"}),
'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'phase'", 'blank': 'True', 'to': u"orm['web.Dataset']"}),
'default_docker_image': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'disable_custom_docker_image': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'execution_time_limit': ('django.db.models.fields.PositiveIntegerField', [], {'default': '300'}),
'force_best_submission_to_leaderboard': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input_data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'input_data_organizer_dataset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'input_data_organizer_dataset'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['web.OrganizerDataSet']"}),
'is_migrated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_scoring_only': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'leaderboard_management_mode': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '50'}),
'max_submissions': ('django.db.models.fields.PositiveIntegerField', [], {'default': '100'}),
'max_submissions_per_day': ('django.db.models.fields.PositiveIntegerField', [], {'default': '999'}),
'phase_never_ends': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'phasenumber': ('django.db.models.fields.PositiveIntegerField', [], {}),
'reference_data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'reference_data_organizer_dataset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'reference_data_organizer_dataset'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['web.OrganizerDataSet']"}),
'scoring_program': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'scoring_program_docker_image': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'scoring_program_organizer_dataset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'scoring_program_organizer_dataset'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['web.OrganizerDataSet']"}),
'start_date': ('django.db.models.fields.DateTimeField', [], {})
},
u'web.competitionsubmission': {
'Meta': {'unique_together': "(('submission_number', 'phase', 'participant'),)", 'object_name': 'CompetitionSubmission'},
'bibtex': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'coopetition_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'detailed_results_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'dislike_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'docker_image': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'download_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'exception_details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'execution_key': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'file_url_base': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'history_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inputfile': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'is_migrated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'like_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'method_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'method_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'organization_or_affiliation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'output_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'participant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'to': u"orm['web.CompetitionParticipant']"}),
'phase': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'to': u"orm['web.CompetitionPhase']"}),
'prediction_output_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'prediction_runfile': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'prediction_stderr_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'prediction_stdout_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'private_output_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'publication_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'readable_filename': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'runfile': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
's3_file': ('s3direct.fields.S3DirectField', [], {'null': 'True', 'blank': 'True'}),
'scores_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.CompetitionSubmissionStatus']"}),
'status_details': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'stderr_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'stdout_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'submission_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'submitted_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'team'", 'null': 'True', 'to': u"orm['teams.Team']"}),
'team_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'when_made_public': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'when_unmade_public': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'web.competitionsubmissionmetadata': {
'Meta': {'object_name': 'CompetitionSubmissionMetadata'},
'beginning_cpu_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'beginning_swap_memory_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'beginning_virtual_memory_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_cpu_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_swap_memory_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_virtual_memory_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_predict': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_scoring': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'processes_running_in_temp_dir': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'submission': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metadatas'", 'to': u"orm['web.CompetitionSubmission']"})
},
u'web.competitionsubmissionstatus': {
'Meta': {'object_name': 'CompetitionSubmissionStatus'},
'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'web.contentcategory': {
'Meta': {'object_name': 'ContentCategory'},
'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'content_limit': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_menu': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['web.ContentCategory']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'visibility': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.ContentVisibility']"})
},
u'web.contentvisibility': {
'Meta': {'object_name': 'ContentVisibility'},
'classname': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'web.dataset': {
'Meta': {'ordering': "['number']", 'object_name': 'Dataset'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datasets'", 'to': u"orm['authenz.ClUser']"}),
'datafile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.ExternalFile']"}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'web.defaultcontentitem': {
'Meta': {'object_name': 'DefaultContentItem'},
'category': ('mptt.fields.TreeForeignKey', [], {'to': u"orm['web.ContentCategory']"}),
'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_visibility': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.ContentVisibility']"}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'rank': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'web.externalfile': {
'Meta': {'object_name': 'ExternalFile'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authenz.ClUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'source_address_info': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.ExternalFileType']"})
},
u'web.externalfilesource': {
'Meta': {'object_name': 'ExternalFileSource'},
'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'service_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'web.externalfiletype': {
'Meta': {'object_name': 'ExternalFileType'},
'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'web.organizerdataset': {
'Meta': {'object_name': 'OrganizerDataSet'},
'data_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sub_data_files': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['web.OrganizerDataSet']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'None'", 'max_length': '64'}),
'uploaded_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authenz.ClUser']"})
},
u'web.page': {
'Meta': {'ordering': "['category', 'rank']", 'unique_together': "(('label', 'category', 'container'),)", 'object_name': 'Page'},
'category': ('mptt.fields.TreeForeignKey', [], {'to': u"orm['web.ContentCategory']"}),
'codename': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'competition': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pages'", 'null': 'True', 'to': u"orm['web.Competition']"}),
'container': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pages'", 'to': u"orm['web.PageContainer']"}),
'defaults': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.DefaultContentItem']", 'null': 'True', 'blank': 'True'}),
'html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'markup': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rank': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'web.pagecontainer': {
'Meta': {'unique_together': "(('object_id', 'content_type'),)", 'object_name': 'PageContainer'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
u'web.participantstatus': {
'Meta': {'object_name': 'ParticipantStatus'},
'codename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'web.phaseleaderboard': {
'Meta': {'object_name': 'PhaseLeaderBoard'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phase': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'board'", 'unique': 'True', 'to': u"orm['web.CompetitionPhase']"})
},
u'web.phaseleaderboardentry': {
'Meta': {'unique_together': "(('board', 'result'),)", 'object_name': 'PhaseLeaderBoardEntry'},
'board': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entries'", 'to': u"orm['web.PhaseLeaderBoard']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'result': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'leaderboard_entry_result'", 'to': u"orm['web.CompetitionSubmission']"})
},
u'web.submissioncomputedscore': {
'Meta': {'object_name': 'SubmissionComputedScore'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'operation': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'scoredef': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'computed_score'", 'unique': 'True', 'to': u"orm['web.SubmissionScoreDef']"})
},
u'web.submissioncomputedscorefield': {
'Meta': {'object_name': 'SubmissionComputedScoreField'},
'computed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fields'", 'to': u"orm['web.SubmissionComputedScore']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scoredef': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionScoreDef']"})
},
u'web.submissionresultgroup': {
'Meta': {'ordering': "['ordering']", 'object_name': 'SubmissionResultGroup'},
'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.Competition']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'ordering': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'phases': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['web.CompetitionPhase']", 'through': u"orm['web.SubmissionResultGroupPhase']", 'symmetrical': 'False'})
},
u'web.submissionresultgroupphase': {
'Meta': {'unique_together': "(('group', 'phase'),)", 'object_name': 'SubmissionResultGroupPhase'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionResultGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phase': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.CompetitionPhase']"})
},
u'web.submissionscore': {
'Meta': {'unique_together': "(('result', 'scoredef'),)", 'object_name': 'SubmissionScore'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'result': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'scores'", 'to': u"orm['web.CompetitionSubmission']"}),
'scoredef': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionScoreDef']"}),
'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '10'})
},
u'web.submissionscoredef': {
'Meta': {'unique_together': "(('key', 'competition'),)", 'object_name': 'SubmissionScoreDef'},
'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.Competition']"}),
'computed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['web.SubmissionResultGroup']", 'through': u"orm['web.SubmissionScoreDefGroup']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'numeric_format': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'ordering': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'selection_default': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_rank': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sorting': ('django.db.models.fields.SlugField', [], {'default': "'asc'", 'max_length': '20'})
},
u'web.submissionscoredefgroup': {
'Meta': {'unique_together': "(('scoredef', 'group'),)", 'object_name': 'SubmissionScoreDefGroup'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionResultGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scoredef': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionScoreDef']"})
},
u'web.submissionscoreset': {
'Meta': {'unique_together': "(('key', 'competition'),)", 'object_name': 'SubmissionScoreSet'},
'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.Competition']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'ordering': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['web.SubmissionScoreSet']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'scoredef': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionScoreDef']", 'null': 'True', 'blank': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
}
}
complete_apps = ['web'] | true | true |
1c49ea49846f00cd469bd4230329f98ec6de0734 | 3,872 | py | Python | tensorflow/python/autograph/tests/basic_list_test.py | Stevanus-Christian/tensorflow | d44afcf5ca16c5d704c66f891b99eac804e7cd14 | [
"Apache-2.0"
] | 3 | 2022-03-09T01:39:56.000Z | 2022-03-30T23:17:58.000Z | tensorflow/python/autograph/tests/basic_list_test.py | Stevanus-Christian/tensorflow | d44afcf5ca16c5d704c66f891b99eac804e7cd14 | [
"Apache-2.0"
] | 1 | 2020-08-01T05:40:12.000Z | 2020-08-01T05:40:12.000Z | tensorflow/python/autograph/tests/basic_list_test.py | Stevanus-Christian/tensorflow | d44afcf5ca16c5d704c66f891b99eac804e7cd14 | [
"Apache-2.0"
] | 1 | 2022-03-22T00:45:15.000Z | 2022-03-22T00:45:15.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic list operations."""
import tensorflow as tf
from tensorflow.python import autograph as ag
from tensorflow.python.autograph.tests import reference_test_base
def type_not_annotated(n):
l = []
# TODO(mdan): Here, we ought to infer the dtype and shape when i is staged.
for i in range(n):
l.append(i)
return ag.stack(l, strict=False)
def element_access():
l = []
l.append(1)
l.append(2)
l.append(3)
ag.set_element_type(l, tf.int32)
return 2 * l[1]
def element_update():
l = []
l.append(1)
l.append(2)
l.append(3)
ag.set_element_type(l, tf.int32)
l[1] = 5
return ag.stack(l, strict=False)
def simple_fill(n):
l = []
ag.set_element_type(l, tf.int32)
for i in range(n):
l.append(i)
return ag.stack(l, strict=False)
def nested_fill(m, n):
mat = []
ag.set_element_type(mat, tf.int32)
for _ in range(m):
l = []
ag.set_element_type(l, tf.int32)
for j in range(n):
l.append(j)
mat.append(ag.stack(l, strict=False))
return ag.stack(mat, strict=False)
def read_write_loop(n):
l = []
l.append(1)
l.append(1)
ag.set_element_type(l, tf.int32)
for i in range(2, n):
l.append(l[i-1] + l[i-2])
l[i-2] = -l[i-2]
return ag.stack(l, strict=False)
def simple_empty(n):
l = []
l.append(1)
l.append(2)
l.append(3)
l.append(4)
ag.set_element_type(l, tf.int32, ())
s = 0
for _ in range(n):
s += l.pop()
return ag.stack(l, strict=False), s
def mutation(t, n):
for i in range(n):
t[i] = i
return t
class ReferenceTest(reference_test_base.TestCase):
def setUp(self):
super(ReferenceTest, self).setUp()
self.autograph_opts = tf.autograph.experimental.Feature.LISTS
def test_tensor_mutation(self):
self.assertConvertedMatchesNative(mutation, [0] * 10, 10)
def test_basic(self):
self.all_inputs_tensors = True
self.assertFunctionMatchesEager(element_access)
self.assertFunctionMatchesEager(element_update)
# TODO(mdan): This should raise a compilation, not runtime, error.
with self.assertRaisesRegex(
ValueError,
'cannot stack a list without knowing its element type; '
'use set_element_type to annotate it'):
self.function(type_not_annotated)(3)
self.assertFunctionMatchesEager(simple_fill, 5)
self.assertFunctionMatchesEager(nested_fill, 5, 3)
self.assertFunctionMatchesEager(read_write_loop, 4)
self.assertFunctionMatchesEager(simple_empty, 0)
self.assertFunctionMatchesEager(simple_empty, 2)
self.assertFunctionMatchesEager(simple_empty, 4)
# TODO(mdan): Allow explicitly setting the element shape to mitigate these.
# TODO(mdan): This should raise a friendlier runtime error.
# The error should spell out that empty lists cannot be stacked.
# Alternatively, we can also insert conditionals that construct a zero-sized
# Tensor of the appropriate type and shape, but we first want to make sure
# that doesn't degrade performance.
with self.assertRaises(ValueError):
self.function(simple_fill)(0)
with self.assertRaises(ValueError):
self.function(nested_fill)(0, 3)
if __name__ == '__main__':
tf.test.main()
| 27.267606 | 80 | 0.685692 |
import tensorflow as tf
from tensorflow.python import autograph as ag
from tensorflow.python.autograph.tests import reference_test_base
def type_not_annotated(n):
l = []
for i in range(n):
l.append(i)
return ag.stack(l, strict=False)
def element_access():
l = []
l.append(1)
l.append(2)
l.append(3)
ag.set_element_type(l, tf.int32)
return 2 * l[1]
def element_update():
l = []
l.append(1)
l.append(2)
l.append(3)
ag.set_element_type(l, tf.int32)
l[1] = 5
return ag.stack(l, strict=False)
def simple_fill(n):
l = []
ag.set_element_type(l, tf.int32)
for i in range(n):
l.append(i)
return ag.stack(l, strict=False)
def nested_fill(m, n):
mat = []
ag.set_element_type(mat, tf.int32)
for _ in range(m):
l = []
ag.set_element_type(l, tf.int32)
for j in range(n):
l.append(j)
mat.append(ag.stack(l, strict=False))
return ag.stack(mat, strict=False)
def read_write_loop(n):
l = []
l.append(1)
l.append(1)
ag.set_element_type(l, tf.int32)
for i in range(2, n):
l.append(l[i-1] + l[i-2])
l[i-2] = -l[i-2]
return ag.stack(l, strict=False)
def simple_empty(n):
l = []
l.append(1)
l.append(2)
l.append(3)
l.append(4)
ag.set_element_type(l, tf.int32, ())
s = 0
for _ in range(n):
s += l.pop()
return ag.stack(l, strict=False), s
def mutation(t, n):
for i in range(n):
t[i] = i
return t
class ReferenceTest(reference_test_base.TestCase):
def setUp(self):
super(ReferenceTest, self).setUp()
self.autograph_opts = tf.autograph.experimental.Feature.LISTS
def test_tensor_mutation(self):
self.assertConvertedMatchesNative(mutation, [0] * 10, 10)
def test_basic(self):
self.all_inputs_tensors = True
self.assertFunctionMatchesEager(element_access)
self.assertFunctionMatchesEager(element_update)
with self.assertRaisesRegex(
ValueError,
'cannot stack a list without knowing its element type; '
'use set_element_type to annotate it'):
self.function(type_not_annotated)(3)
self.assertFunctionMatchesEager(simple_fill, 5)
self.assertFunctionMatchesEager(nested_fill, 5, 3)
self.assertFunctionMatchesEager(read_write_loop, 4)
self.assertFunctionMatchesEager(simple_empty, 0)
self.assertFunctionMatchesEager(simple_empty, 2)
self.assertFunctionMatchesEager(simple_empty, 4)
with self.assertRaises(ValueError):
self.function(simple_fill)(0)
with self.assertRaises(ValueError):
self.function(nested_fill)(0, 3)
if __name__ == '__main__':
tf.test.main()
| true | true |
1c49ea6e4d435b59d0f0cb4c10b9268eb44ceb65 | 2,196 | py | Python | python_example/python_example.py | markbentzjr/TroyBot | 3be3afa6ae05b889c505f7ab2cc140cc368d1c05 | [
"MIT"
] | 1 | 2018-11-24T12:50:51.000Z | 2018-11-24T12:50:51.000Z | python_example/python_example.py | markbentzjr/TroyBot | 3be3afa6ae05b889c505f7ab2cc140cc368d1c05 | [
"MIT"
] | null | null | null | python_example/python_example.py | markbentzjr/TroyBot | 3be3afa6ae05b889c505f7ab2cc140cc368d1c05 | [
"MIT"
] | null | null | null | import math
from rlbot.agents.base_agent import BaseAgent, SimpleControllerState
from rlbot.utils.structures.game_data_struct import GameTickPacket
class PythonExample(BaseAgent):
def initialize_agent(self):
#This runs once before the bot starts up
self.controller_state = SimpleControllerState()
def get_output(self, packet: GameTickPacket) -> SimpleControllerState:
ball_location = Vector2(packet.game_ball.physics.location.x, packet.game_ball.physics.location.y)
my_car = packet.game_cars[self.index]
car_location = Vector2(my_car.physics.location.x, my_car.physics.location.y)
car_direction = get_car_facing_vector(my_car)
car_to_ball = ball_location - car_location
steer_correction_radians = car_direction.correction_to(car_to_ball)
if steer_correction_radians > 0:
# Positive radians in the unit circle is a turn to the left.
turn = -1.0 # Negative value for a turn to the left.
else:
turn = 1.0
self.controller_state.throttle = 1.0
self.controller_state.steer = turn
return self.controller_state
class Vector2:
def __init__(self, x=0, y=0):
self.x = float(x)
self.y = float(y)
def __add__(self, val):
return Vector2(self.x + val.x, self.y + val.y)
def __sub__(self, val):
return Vector2(self.x - val.x, self.y - val.y)
def correction_to(self, ideal):
# The in-game axes are left handed, so use -x
current_in_radians = math.atan2(self.y, -self.x)
ideal_in_radians = math.atan2(ideal.y, -ideal.x)
correction = ideal_in_radians - current_in_radians
# Make sure we go the 'short way'
if abs(correction) > math.pi:
if correction < 0:
correction += 2 * math.pi
else:
correction -= 2 * math.pi
return correction
def get_car_facing_vector(car):
pitch = float(car.physics.rotation.pitch)
yaw = float(car.physics.rotation.yaw)
facing_x = math.cos(pitch) * math.cos(yaw)
facing_y = math.cos(pitch) * math.sin(yaw)
return Vector2(facing_x, facing_y)
| 30.929577 | 105 | 0.656193 | import math
from rlbot.agents.base_agent import BaseAgent, SimpleControllerState
from rlbot.utils.structures.game_data_struct import GameTickPacket
class PythonExample(BaseAgent):
def initialize_agent(self):
self.controller_state = SimpleControllerState()
def get_output(self, packet: GameTickPacket) -> SimpleControllerState:
ball_location = Vector2(packet.game_ball.physics.location.x, packet.game_ball.physics.location.y)
my_car = packet.game_cars[self.index]
car_location = Vector2(my_car.physics.location.x, my_car.physics.location.y)
car_direction = get_car_facing_vector(my_car)
car_to_ball = ball_location - car_location
steer_correction_radians = car_direction.correction_to(car_to_ball)
if steer_correction_radians > 0:
turn = -1.0 else:
turn = 1.0
self.controller_state.throttle = 1.0
self.controller_state.steer = turn
return self.controller_state
class Vector2:
def __init__(self, x=0, y=0):
self.x = float(x)
self.y = float(y)
def __add__(self, val):
return Vector2(self.x + val.x, self.y + val.y)
def __sub__(self, val):
return Vector2(self.x - val.x, self.y - val.y)
def correction_to(self, ideal):
current_in_radians = math.atan2(self.y, -self.x)
ideal_in_radians = math.atan2(ideal.y, -ideal.x)
correction = ideal_in_radians - current_in_radians
if abs(correction) > math.pi:
if correction < 0:
correction += 2 * math.pi
else:
correction -= 2 * math.pi
return correction
def get_car_facing_vector(car):
pitch = float(car.physics.rotation.pitch)
yaw = float(car.physics.rotation.yaw)
facing_x = math.cos(pitch) * math.cos(yaw)
facing_y = math.cos(pitch) * math.sin(yaw)
return Vector2(facing_x, facing_y)
| true | true |
1c49eae527f808a327985c9ccfa493755d812372 | 2,038 | py | Python | util/log.py | brunomateus/open_source_android_apps | 143deea78ff125b4dd5e88b89f48dc3a9e8bcdfa | [
"MIT"
] | 2 | 2019-11-18T18:01:27.000Z | 2021-05-13T18:16:17.000Z | util/log.py | brunomateus/open_source_android_apps | 143deea78ff125b4dd5e88b89f48dc3a9e8bcdfa | [
"MIT"
] | null | null | null | util/log.py | brunomateus/open_source_android_apps | 143deea78ff125b4dd5e88b89f48dc3a9e8bcdfa | [
"MIT"
] | 3 | 2019-07-18T19:33:04.000Z | 2021-01-13T21:13:29.000Z | """Maintain a global logger instance."""
import logging
from typing import IO, Text
import github3
import urllib3
import neo4j
LOG_LEVEL = logging.WARNING
LOG_FORMAT = '%(asctime)s | [%(levelname)s] %(name)s: %(message)s'
LEVELS = [
logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING,
logging.ERROR, logging.CRITICAL]
def compute_level(verbose: int, quiet: int) -> int:
"""Compute a log level based on input.
Log level is based on LOG_LEVEL.
:param int verbose:
Number of levels to increase log level.
:param int quiet:
Number of levels to decrease log level.
:returns int:
New log level. Either of NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL.
"""
if verbose < 0 or quiet < 0:
raise ValueError('Input must not be less than 0')
default_index = LEVELS.index(LOG_LEVEL)
index = min(len(LEVELS) - 1, max(0, default_index + quiet - verbose))
return LEVELS[index]
def lower_level_for_libraries(min_level: int):
"""Decrease log level for libraries."""
max_level = max(min_level, logging.WARNING)
for package in [github3, urllib3, neo4j]:
logger = logging.getLogger(package.__package__)
logger.setLevel(max_level)
def configure_logger(name: Text, stream: IO[str], verbose: int, quiet: int):
"""Create handler for logging to an IO stream.
:param Text name:
Name of logger, e.g. __package__.
:param IO[str] stream:
Stream to log to, e.g. sys.stderr.
:param int verbose:
Number of levels to increase log level.
:param int quiet:
Number of levels to decrease log level.
"""
log_level = compute_level(verbose, quiet)
handler = logging.StreamHandler(stream)
handler.setFormatter(logging.Formatter(LOG_FORMAT))
handler.setLevel(log_level)
lower_level_for_libraries(log_level)
logger = logging.getLogger(name)
logger.setLevel(handler.level)
logger.addHandler(handler)
logger.info('Log to %s. Level: %d', stream.name, log_level)
| 30.41791 | 79 | 0.683023 | import logging
from typing import IO, Text
import github3
import urllib3
import neo4j
LOG_LEVEL = logging.WARNING
LOG_FORMAT = '%(asctime)s | [%(levelname)s] %(name)s: %(message)s'
LEVELS = [
logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING,
logging.ERROR, logging.CRITICAL]
def compute_level(verbose: int, quiet: int) -> int:
if verbose < 0 or quiet < 0:
raise ValueError('Input must not be less than 0')
default_index = LEVELS.index(LOG_LEVEL)
index = min(len(LEVELS) - 1, max(0, default_index + quiet - verbose))
return LEVELS[index]
def lower_level_for_libraries(min_level: int):
max_level = max(min_level, logging.WARNING)
for package in [github3, urllib3, neo4j]:
logger = logging.getLogger(package.__package__)
logger.setLevel(max_level)
def configure_logger(name: Text, stream: IO[str], verbose: int, quiet: int):
log_level = compute_level(verbose, quiet)
handler = logging.StreamHandler(stream)
handler.setFormatter(logging.Formatter(LOG_FORMAT))
handler.setLevel(log_level)
lower_level_for_libraries(log_level)
logger = logging.getLogger(name)
logger.setLevel(handler.level)
logger.addHandler(handler)
logger.info('Log to %s. Level: %d', stream.name, log_level)
| true | true |
1c49eb59478315cee1ec4654e6fbc849582a0094 | 1,528 | py | Python | caldavclientlibrary/admin/xmlaccounts/manage.py | LaudateCorpus1/ccs-caldavclientlibrary | 5b1db7f3b49f03ba715f7286f71ddb9f54ddddac | [
"Apache-2.0"
] | 49 | 2016-08-22T17:34:34.000Z | 2021-11-08T09:47:45.000Z | caldavclientlibrary/admin/xmlaccounts/manage.py | DalavanCloud/ccs-caldavclientlibrary | ce8d554b8a0bcb13468f2dc87eef77da2302d6b3 | [
"Apache-2.0"
] | null | null | null | caldavclientlibrary/admin/xmlaccounts/manage.py | DalavanCloud/ccs-caldavclientlibrary | ce8d554b8a0bcb13468f2dc87eef77da2302d6b3 | [
"Apache-2.0"
] | 18 | 2017-01-21T22:28:04.000Z | 2022-03-26T11:57:30.000Z | ##
# Copyright (c) 2007-2016 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
# import caldavclientlibrary.admin.xmlaccounts.commands
from caldavclientlibrary.admin.xmlaccounts.commands import registered
import sys
def usage():
"""
Print out the command line usage.
"""
cmds = registered.keys()
cmds.sort()
print """USAGE: manage CMD [OPTIONS]
CMD: one of:
%s
OPTIONS: specific to each command, use --help with the
command to see what options are supported.
""" % ("\n".join(["\t%s" % (cmd,) for cmd in cmds]),)
def runit():
"""
Run the command based on command line arguments.
"""
# Dispatch a command based on the first argument
if len(sys.argv) == 1:
usage()
sys.exit(0)
if sys.argv[1] in registered:
sys.exit(registered[sys.argv[1]]().execute(sys.argv[2:]))
else:
print "No command called '%s' is available." % (sys.argv[1],)
usage()
sys.exit(0)
if __name__ == '__main__':
runit()
| 26.344828 | 74 | 0.674084 |
from caldavclientlibrary.admin.xmlaccounts.commands import registered
import sys
def usage():
"""
Print out the command line usage.
"""
cmds = registered.keys()
cmds.sort()
print """USAGE: manage CMD [OPTIONS]
CMD: one of:
%s
OPTIONS: specific to each command, use --help with the
command to see what options are supported.
""" % ("\n".join(["\t%s" % (cmd,) for cmd in cmds]),)
def runit():
"""
Run the command based on command line arguments.
"""
if len(sys.argv) == 1:
usage()
sys.exit(0)
if sys.argv[1] in registered:
sys.exit(registered[sys.argv[1]]().execute(sys.argv[2:]))
else:
print "No command called '%s' is available." % (sys.argv[1],)
usage()
sys.exit(0)
if __name__ == '__main__':
runit()
| false | true |
1c49eb7bdb4418c37bc8952b6d14fa22f15819b6 | 1,033 | py | Python | authen/views.py | ozcanyarimdunya/django_authen | fdb48a65d6f4ac4bb2fc09e3b7f024b3a41dd71b | [
"MIT"
] | null | null | null | authen/views.py | ozcanyarimdunya/django_authen | fdb48a65d6f4ac4bb2fc09e3b7f024b3a41dd71b | [
"MIT"
] | 2 | 2020-02-11T23:05:32.000Z | 2020-06-05T18:43:16.000Z | authen/views.py | ozcanyarimdunya/django_authen | fdb48a65d6f4ac4bb2fc09e3b7f024b3a41dd71b | [
"MIT"
] | null | null | null | from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.views import LoginView, LogoutView
from django.conf import settings
class Login(LoginView):
template_name = "authen/login.html"
def get_success_url(self):
return "/"
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx.update({"company_name": settings.AUTHEN.get("COMPANY_NAME", "")})
ctx.update({"login_title": settings.AUTHEN.get("LOGIN_TITLE", "")})
return ctx
class Logout(LoginRequiredMixin, LogoutView):
template_name = "authen/logout.html"
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx.update({"company_name": settings.AUTHEN.get("COMPANY_NAME", "")})
ctx.update({"logout_title": settings.AUTHEN.get("LOGOUT_TITLE", "")})
ctx.update({"logout_message": settings.AUTHEN.get("LOGOUT_MESSAGE", "")})
return ctx
login_view = Login.as_view()
logout_view = Logout.as_view()
| 32.28125 | 81 | 0.684414 | from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.views import LoginView, LogoutView
from django.conf import settings
class Login(LoginView):
template_name = "authen/login.html"
def get_success_url(self):
return "/"
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx.update({"company_name": settings.AUTHEN.get("COMPANY_NAME", "")})
ctx.update({"login_title": settings.AUTHEN.get("LOGIN_TITLE", "")})
return ctx
class Logout(LoginRequiredMixin, LogoutView):
template_name = "authen/logout.html"
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx.update({"company_name": settings.AUTHEN.get("COMPANY_NAME", "")})
ctx.update({"logout_title": settings.AUTHEN.get("LOGOUT_TITLE", "")})
ctx.update({"logout_message": settings.AUTHEN.get("LOGOUT_MESSAGE", "")})
return ctx
login_view = Login.as_view()
logout_view = Logout.as_view()
| true | true |
1c49ecb6fa75797648420f768e466984724c7b7a | 782 | py | Python | scripts/beam_pairs.py | RolT/ZRTools | 47aa156b660224fd123582c832bb5e5525c262d8 | [
"BSD-3-Clause"
] | 25 | 2015-08-06T20:15:30.000Z | 2021-08-30T15:12:42.000Z | scripts/beam_pairs.py | RolT/ZRTools | 47aa156b660224fd123582c832bb5e5525c262d8 | [
"BSD-3-Clause"
] | 2 | 2017-07-21T11:06:35.000Z | 2020-02-27T13:20:34.000Z | scripts/beam_pairs.py | RolT/ZRTools | 47aa156b660224fd123582c832bb5e5525c262d8 | [
"BSD-3-Clause"
] | 16 | 2015-08-06T21:16:55.000Z | 2020-07-09T08:05:50.000Z | #!/usr/bin/env python
#
# Copyright 2011-2012 Johns Hopkins University (Author: Aren Jansen)
#
from __future__ import division
import sys
import os
import re
import string
import random
beamwidth = int(sys.argv[1])
baselist = []
for line in sys.stdin:
base = line.strip()
baselist.append(base)
if beamwidth == 0:
random.shuffle(baselist)
for n in range(len(baselist)):
for m in range(n,len(baselist)):
sys.stdout.write(baselist[n]+" "+baselist[m]+"\n")
else:
for n in range(len(baselist)):
sys.stdout.write(baselist[n]+" "+baselist[n]+"\n")
if beamwidth > 1:
samp = random.sample(baselist,beamwidth-1)
for m in range(len(samp)):
sys.stdout.write(baselist[n]+" "+samp[m]+"\n")
| 21.135135 | 69 | 0.621483 |
from __future__ import division
import sys
import os
import re
import string
import random
beamwidth = int(sys.argv[1])
baselist = []
for line in sys.stdin:
base = line.strip()
baselist.append(base)
if beamwidth == 0:
random.shuffle(baselist)
for n in range(len(baselist)):
for m in range(n,len(baselist)):
sys.stdout.write(baselist[n]+" "+baselist[m]+"\n")
else:
for n in range(len(baselist)):
sys.stdout.write(baselist[n]+" "+baselist[n]+"\n")
if beamwidth > 1:
samp = random.sample(baselist,beamwidth-1)
for m in range(len(samp)):
sys.stdout.write(baselist[n]+" "+samp[m]+"\n")
| true | true |
1c49ed056770080f895f299b51a9ce278a83c276 | 309 | py | Python | oommfc/tests/test_stt.py | gamdow/oommfc | de33ae2a8348ca78d9e16fe18bc562393703c215 | [
"BSD-3-Clause"
] | null | null | null | oommfc/tests/test_stt.py | gamdow/oommfc | de33ae2a8348ca78d9e16fe18bc562393703c215 | [
"BSD-3-Clause"
] | null | null | null | oommfc/tests/test_stt.py | gamdow/oommfc | de33ae2a8348ca78d9e16fe18bc562393703c215 | [
"BSD-3-Clause"
] | null | null | null | import pytest
import oommfc as oc
import micromagneticmodel.tests as mmt
class TestSTT(mmt.TestSTT):
def test_script(self):
for arg in self.valid_args:
u, beta = arg
stt = oc.STT(u, beta)
with pytest.raises(NotImplementedError):
stt._script()
| 23.769231 | 52 | 0.614887 | import pytest
import oommfc as oc
import micromagneticmodel.tests as mmt
class TestSTT(mmt.TestSTT):
def test_script(self):
for arg in self.valid_args:
u, beta = arg
stt = oc.STT(u, beta)
with pytest.raises(NotImplementedError):
stt._script()
| true | true |
1c49ede342e3b5381a8d05e606d45887e3cb7caf | 569 | py | Python | src/Lexer/token_types.py | Sword-And-Rose/Simple-Interpreter | 471b962e385ade5b18e1b1b785cd0d7529011144 | [
"MIT"
] | 1 | 2019-07-19T16:27:31.000Z | 2019-07-19T16:27:31.000Z | src/Lexer/token_types.py | HorizonFTT/Simple-Interpreter | 471b962e385ade5b18e1b1b785cd0d7529011144 | [
"MIT"
] | null | null | null | src/Lexer/token_types.py | HorizonFTT/Simple-Interpreter | 471b962e385ade5b18e1b1b785cd0d7529011144 | [
"MIT"
] | null | null | null | INTEGER = 'INTEGER'
REAL = 'REAL'
INTEGER_CONST = 'INTEGER_CONST'
REAL_CONST = 'REAL_CONST'
STRING = 'STRING'
STRING_CONST = 'STRING_CONST'
PLUS = '+'
MINUS = '-'
MUL = '*'
INTEGER_DIV = 'DIV'
FLOAT_DIV = '/'
LESS_THAN = '<'
GREATER_THAN = '>'
EQUAL = '='
LPAREN = '('
RPAREN = ')'
ID = 'ID'
ASSIGN = ':='
BEGIN = 'BEGIN'
END = 'END'
SEMI = ';'
DOT = '.'
PROGRAM = 'PROGRAM'
VAR = 'VAR'
COLON = ':'
COMMA = ','
PROCEDURE = 'PROCEDURE'
FUNCTION = 'FUNCTION'
CALL = 'CALL'
IF = 'IF'
THEN = 'THEN'
ELSE = 'ELSE'
WHILE = 'WHILE'
DO = 'DO'
FOR = 'FOR'
TO = 'TO'
EOF = 'EOF'
| 14.973684 | 31 | 0.58348 | INTEGER = 'INTEGER'
REAL = 'REAL'
INTEGER_CONST = 'INTEGER_CONST'
REAL_CONST = 'REAL_CONST'
STRING = 'STRING'
STRING_CONST = 'STRING_CONST'
PLUS = '+'
MINUS = '-'
MUL = '*'
INTEGER_DIV = 'DIV'
FLOAT_DIV = '/'
LESS_THAN = '<'
GREATER_THAN = '>'
EQUAL = '='
LPAREN = '('
RPAREN = ')'
ID = 'ID'
ASSIGN = ':='
BEGIN = 'BEGIN'
END = 'END'
SEMI = ';'
DOT = '.'
PROGRAM = 'PROGRAM'
VAR = 'VAR'
COLON = ':'
COMMA = ','
PROCEDURE = 'PROCEDURE'
FUNCTION = 'FUNCTION'
CALL = 'CALL'
IF = 'IF'
THEN = 'THEN'
ELSE = 'ELSE'
WHILE = 'WHILE'
DO = 'DO'
FOR = 'FOR'
TO = 'TO'
EOF = 'EOF'
| true | true |
1c49eea08a2dfc05dfb02ffb03ad6b610d781514 | 248 | py | Python | idm/commands/bind_chat.py | Ruslan21473/IDM2 | 27adc319e753173e63b1d790caec993b920f2823 | [
"MIT"
] | null | null | null | idm/commands/bind_chat.py | Ruslan21473/IDM2 | 27adc319e753173e63b1d790caec993b920f2823 | [
"MIT"
] | null | null | null | idm/commands/bind_chat.py | Ruslan21473/IDM2 | 27adc319e753173e63b1d790caec993b920f2823 | [
"MIT"
] | null | null | null | from ..objects import dp, Event
from ..utils import new_message
@dp.event_handle(dp.Methods.BIND_CHAT)
def bind_chat(event: Event) -> str:
new_message(event.api, event.chat.peer_id,
message=f"✅ Беседа распознана.")
return "ok" | 31 | 47 | 0.701613 | from ..objects import dp, Event
from ..utils import new_message
@dp.event_handle(dp.Methods.BIND_CHAT)
def bind_chat(event: Event) -> str:
new_message(event.api, event.chat.peer_id,
message=f"✅ Беседа распознана.")
return "ok" | true | true |
1c49efc05a3a126007cf12dd6346fb8bbdb8cd2f | 898 | py | Python | simanalysis.py | EndyLab/spaceballs | 331ce388674a4b01b56b36dfb3dda26729b107e6 | [
"MIT"
] | 1 | 2017-10-19T07:41:26.000Z | 2017-10-19T07:41:26.000Z | simanalysis.py | EndyLab/spaceballs | 331ce388674a4b01b56b36dfb3dda26729b107e6 | [
"MIT"
] | 1 | 2017-10-19T07:42:12.000Z | 2017-10-19T07:42:12.000Z | simanalysis.py | EndyLab/spaceballs | 331ce388674a4b01b56b36dfb3dda26729b107e6 | [
"MIT"
] | null | null | null | """
Created by Akshay Maheshwari
09/05/2017
Produces analysis figures from experiment data
"""
from simanalysis_methods import *
import matplotlib.pyplot as plt
import time;
start_time=time.time()
expt_name = "171018_2219"
outputlist = loadOutputList(expt_name,'molpos')
histlistpklpath = combinePkls(expt_name,outputlist,covertime=True)
#histlistpklpath = saveHist(outputlist, expt_name,bins=10,diameter=0.1,molposTS=1e-7)
fig = plotHist(histlistpklpath,expt_name,diameter=0.1, graphs="all", logscale=False,step=1,start=1.25,simtime=1,x_label="R_crowder (nm)")
fig.suptitle("Effects of crowding molecule size on covertime, and dispersion of a single tracked molecule. \n[1s. sim] -- R_tracked=7.25nm -- R_crowder=[1.25nm,2.25nm,...9.25nm] -- $\phi$=0.25 -- time step=1e-7s.")
plt.savefig("data/"+expt_name+"/"+expt_name+"_analysis1.png")
print("--- %s seconds ---" % (time.time() - start_time))
| 44.9 | 214 | 0.752784 | from simanalysis_methods import *
import matplotlib.pyplot as plt
import time;
start_time=time.time()
expt_name = "171018_2219"
outputlist = loadOutputList(expt_name,'molpos')
histlistpklpath = combinePkls(expt_name,outputlist,covertime=True)
fig = plotHist(histlistpklpath,expt_name,diameter=0.1, graphs="all", logscale=False,step=1,start=1.25,simtime=1,x_label="R_crowder (nm)")
fig.suptitle("Effects of crowding molecule size on covertime, and dispersion of a single tracked molecule. \n[1s. sim] -- R_tracked=7.25nm -- R_crowder=[1.25nm,2.25nm,...9.25nm] -- $\phi$=0.25 -- time step=1e-7s.")
plt.savefig("data/"+expt_name+"/"+expt_name+"_analysis1.png")
print("--- %s seconds ---" % (time.time() - start_time))
| true | true |
1c49f0117ce0749903b3f36e283ae1b91cd3b22f | 825 | py | Python | GameMenu.py | KRHS-GameProgramming-2015/King-of-the-Pile | 1368c97ba8124e27f74c6f8aae8e1f8362126934 | [
"BSD-2-Clause"
] | 1 | 2015-12-15T17:39:08.000Z | 2015-12-15T17:39:08.000Z | GameMenu.py | KRHS-GameProgramming-2015/King-of-the-Pile | 1368c97ba8124e27f74c6f8aae8e1f8362126934 | [
"BSD-2-Clause"
] | 4 | 2015-12-21T17:06:51.000Z | 2016-02-10T16:50:57.000Z | GameMenu.py | KRHS-GameProgramming-2015/King-of-the-Pile | 1368c97ba8124e27f74c6f8aae8e1f8362126934 | [
"BSD-2-Clause"
] | null | null | null | import sys, pygame, math, random
from Button import *
class Menu():
def __init__(self, images):
self.images = []
for image in images:
#print image
self.images += [pygame.image.load(image)]
self.image = self.images[0]
self.rect = self.image.get_rect()
self.originalImage = self.image
self.width, self.height = self.image.get_size()
def update():
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
sys.exit()
elif event.type == pygame.KEYUP:
pass
| 21.153846 | 56 | 0.466667 | import sys, pygame, math, random
from Button import *
class Menu():
def __init__(self, images):
self.images = []
for image in images:
self.images += [pygame.image.load(image)]
self.image = self.images[0]
self.rect = self.image.get_rect()
self.originalImage = self.image
self.width, self.height = self.image.get_size()
def update():
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
sys.exit()
elif event.type == pygame.KEYUP:
pass
| true | true |
1c49f01f22cbc23cfecb70fb36d3a72ff0991e5f | 8,685 | py | Python | python/paddle_serving_app/local_predict.py | hysunflower/Serving | 50d0c2900f3385b049f76b91e38cc69d8e8a102d | [
"Apache-2.0"
] | null | null | null | python/paddle_serving_app/local_predict.py | hysunflower/Serving | 50d0c2900f3385b049f76b91e38cc69d8e8a102d | [
"Apache-2.0"
] | null | null | null | python/paddle_serving_app/local_predict.py | hysunflower/Serving | 50d0c2900f3385b049f76b91e38cc69d8e8a102d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
import os
import google.protobuf.text_format
import numpy as np
import argparse
import paddle.fluid as fluid
import paddle.inference as inference
from .proto import general_model_config_pb2 as m_config
from paddle.fluid.core import PaddleTensor
from paddle.fluid.core import AnalysisConfig
from paddle.fluid.core import create_paddle_predictor
import logging
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s")
logger = logging.getLogger("fluid")
logger.setLevel(logging.INFO)
class LocalPredictor(object):
"""
Prediction in the current process of the local environment, in process
call, Compared with RPC/HTTP, LocalPredictor has better performance,
because of no network and packaging load.
"""
def __init__(self):
self.feed_names_ = []
self.fetch_names_ = []
self.feed_types_ = {}
self.fetch_types_ = {}
self.feed_shapes_ = {}
self.feed_names_to_idx_ = {}
self.fetch_names_to_idx_ = {}
self.fetch_names_to_type_ = {}
def load_model_config(self,
model_path,
use_gpu=False,
gpu_id=0,
use_profile=False,
thread_num=1,
mem_optim=True,
ir_optim=False,
use_trt=False,
use_lite=False,
use_xpu=False,
use_feed_fetch_ops=False):
"""
Load model config and set the engine config for the paddle predictor
Args:
model_path: model config path.
use_gpu: calculating with gpu, False default.
gpu_id: gpu id, 0 default.
use_profile: use predictor profiles, False default.
thread_num: thread nums, default 1.
mem_optim: memory optimization, True default.
ir_optim: open calculation chart optimization, False default.
use_trt: use nvidia TensorRT optimization, False default
use_lite: use Paddle-Lite Engint, False default
use_xpu: run predict on Baidu Kunlun, False default
use_feed_fetch_ops: use feed/fetch ops, False default.
"""
client_config = "{}/serving_server_conf.prototxt".format(model_path)
model_conf = m_config.GeneralModelConfig()
f = open(client_config, 'r')
model_conf = google.protobuf.text_format.Merge(
str(f.read()), model_conf)
config = AnalysisConfig(model_path)
logger.info("load_model_config params: model_path:{}, use_gpu:{},\
gpu_id:{}, use_profile:{}, thread_num:{}, mem_optim:{}, ir_optim:{},\
use_trt:{}, use_lite:{}, use_xpu: {}, use_feed_fetch_ops:{}".format(
model_path, use_gpu, gpu_id, use_profile, thread_num, mem_optim,
ir_optim, use_trt, use_lite, use_xpu, use_feed_fetch_ops))
self.feed_names_ = [var.alias_name for var in model_conf.feed_var]
self.fetch_names_ = [var.alias_name for var in model_conf.fetch_var]
self.feed_names_to_idx_ = {}
self.fetch_names_to_idx_ = {}
for i, var in enumerate(model_conf.feed_var):
self.feed_names_to_idx_[var.alias_name] = i
self.feed_types_[var.alias_name] = var.feed_type
self.feed_shapes_[var.alias_name] = var.shape
for i, var in enumerate(model_conf.fetch_var):
self.fetch_names_to_idx_[var.alias_name] = i
self.fetch_names_to_type_[var.alias_name] = var.fetch_type
if use_profile:
config.enable_profile()
if mem_optim:
config.enable_memory_optim()
config.switch_ir_optim(ir_optim)
config.set_cpu_math_library_num_threads(thread_num)
config.switch_use_feed_fetch_ops(use_feed_fetch_ops)
config.delete_pass("conv_transpose_eltwiseadd_bn_fuse_pass")
if not use_gpu:
config.disable_gpu()
else:
config.enable_use_gpu(100, gpu_id)
if use_trt:
config.enable_tensorrt_engine(
workspace_size=1 << 20,
max_batch_size=32,
min_subgraph_size=3,
use_static=False,
use_calib_mode=False)
if use_lite:
config.enable_lite_engine(
precision_mode=inference.PrecisionType.Float32,
zero_copy=True,
passes_filter=[],
ops_filter=[])
if use_xpu:
# 2MB l3 cache
config.enable_xpu(8 * 1024 * 1024)
self.predictor = create_paddle_predictor(config)
def predict(self, feed=None, fetch=None, batch=False, log_id=0):
"""
Predict locally
Args:
feed: feed var
fetch: fetch var
batch: batch data or not, False default.If batch is False, a new
dimension is added to header of the shape[np.newaxis].
log_id: for logging
Returns:
fetch_map: dict
"""
if feed is None or fetch is None:
raise ValueError("You should specify feed and fetch for prediction")
fetch_list = []
if isinstance(fetch, str):
fetch_list = [fetch]
elif isinstance(fetch, list):
fetch_list = fetch
else:
raise ValueError("Fetch only accepts string and list of string")
feed_batch = []
if isinstance(feed, dict):
feed_batch.append(feed)
elif isinstance(feed, list):
feed_batch = feed
else:
raise ValueError("Feed only accepts dict and list of dict")
int_slot_batch = []
float_slot_batch = []
int_feed_names = []
float_feed_names = []
int_shape = []
float_shape = []
fetch_names = []
counter = 0
batch_size = len(feed_batch)
for key in fetch_list:
if key in self.fetch_names_:
fetch_names.append(key)
if len(fetch_names) == 0:
raise ValueError(
"Fetch names should not be empty or out of saved fetch list.")
return {}
input_names = self.predictor.get_input_names()
for name in input_names:
if isinstance(feed[name], list):
feed[name] = np.array(feed[name]).reshape(self.feed_shapes_[
name])
if self.feed_types_[name] == 0:
feed[name] = feed[name].astype("int64")
elif self.feed_types_[name] == 1:
feed[name] = feed[name].astype("float32")
elif self.feed_types_[name] == 2:
feed[name] = feed[name].astype("int32")
else:
raise ValueError("local predictor receives wrong data type")
input_tensor = self.predictor.get_input_tensor(name)
if "{}.lod".format(name) in feed:
input_tensor.set_lod([feed["{}.lod".format(name)]])
if batch == False:
input_tensor.copy_from_cpu(feed[name][np.newaxis, :])
else:
input_tensor.copy_from_cpu(feed[name])
output_tensors = []
output_names = self.predictor.get_output_names()
for output_name in output_names:
output_tensor = self.predictor.get_output_tensor(output_name)
output_tensors.append(output_tensor)
outputs = []
self.predictor.zero_copy_run()
for output_tensor in output_tensors:
output = output_tensor.copy_to_cpu()
outputs.append(output)
fetch_map = {}
for i, name in enumerate(fetch):
fetch_map[name] = outputs[i]
if len(output_tensors[i].lod()) > 0:
fetch_map[name + ".lod"] = np.array(output_tensors[i].lod()[
0]).astype('int32')
return fetch_map
| 38.092105 | 81 | 0.595855 |
import os
import google.protobuf.text_format
import numpy as np
import argparse
import paddle.fluid as fluid
import paddle.inference as inference
from .proto import general_model_config_pb2 as m_config
from paddle.fluid.core import PaddleTensor
from paddle.fluid.core import AnalysisConfig
from paddle.fluid.core import create_paddle_predictor
import logging
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s")
logger = logging.getLogger("fluid")
logger.setLevel(logging.INFO)
class LocalPredictor(object):
def __init__(self):
self.feed_names_ = []
self.fetch_names_ = []
self.feed_types_ = {}
self.fetch_types_ = {}
self.feed_shapes_ = {}
self.feed_names_to_idx_ = {}
self.fetch_names_to_idx_ = {}
self.fetch_names_to_type_ = {}
def load_model_config(self,
model_path,
use_gpu=False,
gpu_id=0,
use_profile=False,
thread_num=1,
mem_optim=True,
ir_optim=False,
use_trt=False,
use_lite=False,
use_xpu=False,
use_feed_fetch_ops=False):
client_config = "{}/serving_server_conf.prototxt".format(model_path)
model_conf = m_config.GeneralModelConfig()
f = open(client_config, 'r')
model_conf = google.protobuf.text_format.Merge(
str(f.read()), model_conf)
config = AnalysisConfig(model_path)
logger.info("load_model_config params: model_path:{}, use_gpu:{},\
gpu_id:{}, use_profile:{}, thread_num:{}, mem_optim:{}, ir_optim:{},\
use_trt:{}, use_lite:{}, use_xpu: {}, use_feed_fetch_ops:{}".format(
model_path, use_gpu, gpu_id, use_profile, thread_num, mem_optim,
ir_optim, use_trt, use_lite, use_xpu, use_feed_fetch_ops))
self.feed_names_ = [var.alias_name for var in model_conf.feed_var]
self.fetch_names_ = [var.alias_name for var in model_conf.fetch_var]
self.feed_names_to_idx_ = {}
self.fetch_names_to_idx_ = {}
for i, var in enumerate(model_conf.feed_var):
self.feed_names_to_idx_[var.alias_name] = i
self.feed_types_[var.alias_name] = var.feed_type
self.feed_shapes_[var.alias_name] = var.shape
for i, var in enumerate(model_conf.fetch_var):
self.fetch_names_to_idx_[var.alias_name] = i
self.fetch_names_to_type_[var.alias_name] = var.fetch_type
if use_profile:
config.enable_profile()
if mem_optim:
config.enable_memory_optim()
config.switch_ir_optim(ir_optim)
config.set_cpu_math_library_num_threads(thread_num)
config.switch_use_feed_fetch_ops(use_feed_fetch_ops)
config.delete_pass("conv_transpose_eltwiseadd_bn_fuse_pass")
if not use_gpu:
config.disable_gpu()
else:
config.enable_use_gpu(100, gpu_id)
if use_trt:
config.enable_tensorrt_engine(
workspace_size=1 << 20,
max_batch_size=32,
min_subgraph_size=3,
use_static=False,
use_calib_mode=False)
if use_lite:
config.enable_lite_engine(
precision_mode=inference.PrecisionType.Float32,
zero_copy=True,
passes_filter=[],
ops_filter=[])
if use_xpu:
config.enable_xpu(8 * 1024 * 1024)
self.predictor = create_paddle_predictor(config)
def predict(self, feed=None, fetch=None, batch=False, log_id=0):
if feed is None or fetch is None:
raise ValueError("You should specify feed and fetch for prediction")
fetch_list = []
if isinstance(fetch, str):
fetch_list = [fetch]
elif isinstance(fetch, list):
fetch_list = fetch
else:
raise ValueError("Fetch only accepts string and list of string")
feed_batch = []
if isinstance(feed, dict):
feed_batch.append(feed)
elif isinstance(feed, list):
feed_batch = feed
else:
raise ValueError("Feed only accepts dict and list of dict")
int_slot_batch = []
float_slot_batch = []
int_feed_names = []
float_feed_names = []
int_shape = []
float_shape = []
fetch_names = []
counter = 0
batch_size = len(feed_batch)
for key in fetch_list:
if key in self.fetch_names_:
fetch_names.append(key)
if len(fetch_names) == 0:
raise ValueError(
"Fetch names should not be empty or out of saved fetch list.")
return {}
input_names = self.predictor.get_input_names()
for name in input_names:
if isinstance(feed[name], list):
feed[name] = np.array(feed[name]).reshape(self.feed_shapes_[
name])
if self.feed_types_[name] == 0:
feed[name] = feed[name].astype("int64")
elif self.feed_types_[name] == 1:
feed[name] = feed[name].astype("float32")
elif self.feed_types_[name] == 2:
feed[name] = feed[name].astype("int32")
else:
raise ValueError("local predictor receives wrong data type")
input_tensor = self.predictor.get_input_tensor(name)
if "{}.lod".format(name) in feed:
input_tensor.set_lod([feed["{}.lod".format(name)]])
if batch == False:
input_tensor.copy_from_cpu(feed[name][np.newaxis, :])
else:
input_tensor.copy_from_cpu(feed[name])
output_tensors = []
output_names = self.predictor.get_output_names()
for output_name in output_names:
output_tensor = self.predictor.get_output_tensor(output_name)
output_tensors.append(output_tensor)
outputs = []
self.predictor.zero_copy_run()
for output_tensor in output_tensors:
output = output_tensor.copy_to_cpu()
outputs.append(output)
fetch_map = {}
for i, name in enumerate(fetch):
fetch_map[name] = outputs[i]
if len(output_tensors[i].lod()) > 0:
fetch_map[name + ".lod"] = np.array(output_tensors[i].lod()[
0]).astype('int32')
return fetch_map
| true | true |
1c49f06dffa7d3da20a9bd4b1507a6abb441f68a | 98 | py | Python | src/sports_halls/apps.py | codacy-badger/hbscorez | 215e4d2617ac9be91bb9d561bbfc552349cd4781 | [
"MIT"
] | 12 | 2018-03-20T21:38:53.000Z | 2021-10-31T10:00:12.000Z | src/sports_halls/apps.py | codacy-badger/hbscorez | 215e4d2617ac9be91bb9d561bbfc552349cd4781 | [
"MIT"
] | 79 | 2018-03-18T14:26:47.000Z | 2022-03-01T15:51:40.000Z | src/sports_halls/apps.py | codacy-badger/hbscorez | 215e4d2617ac9be91bb9d561bbfc552349cd4781 | [
"MIT"
] | 4 | 2018-05-18T15:39:56.000Z | 2020-10-29T09:28:41.000Z | from django.apps import AppConfig
class SportsHallsConfig(AppConfig):
name = 'sports_halls'
| 16.333333 | 35 | 0.77551 | from django.apps import AppConfig
class SportsHallsConfig(AppConfig):
name = 'sports_halls'
| true | true |
1c49f0af27c73266029ce93f22052346da2d9b95 | 5,439 | py | Python | zerver/lib/bugdown/api_code_examples.py | fearless0307/zulip | 378d14af7ea73a9a83c7245706cd918bec5a37bf | [
"Apache-2.0"
] | 4 | 2019-06-04T09:06:53.000Z | 2019-06-04T09:07:47.000Z | zerver/lib/bugdown/api_code_examples.py | fearless0307/zulip | 378d14af7ea73a9a83c7245706cd918bec5a37bf | [
"Apache-2.0"
] | 10 | 2019-02-26T11:10:42.000Z | 2019-02-26T14:30:24.000Z | zerver/lib/bugdown/api_code_examples.py | fearless0307/zulip | 378d14af7ea73a9a83c7245706cd918bec5a37bf | [
"Apache-2.0"
] | 1 | 2020-01-07T15:49:54.000Z | 2020-01-07T15:49:54.000Z | import re
import json
import inspect
from markdown.extensions import Extension
from markdown.preprocessors import Preprocessor
from typing import Any, Dict, Optional, List
import markdown
import zerver.lib.api_test_helpers
from zerver.lib.openapi import get_openapi_fixture
MACRO_REGEXP = re.compile(r'\{generate_code_example(\(\s*(.+?)\s*\))*\|\s*(.+?)\s*\|\s*(.+?)\s*(\(\s*(.+)\s*\))?\}')
CODE_EXAMPLE_REGEX = re.compile(r'\# \{code_example\|\s*(.+?)\s*\}')
PYTHON_CLIENT_CONFIG = """
#!/usr/bin/env python3
import zulip
# Pass the path to your zuliprc file here.
client = zulip.Client(config_file="~/zuliprc")
"""
PYTHON_CLIENT_ADMIN_CONFIG = """
#!/usr/bin/env python
import zulip
# The user for this zuliprc file must be an organization administrator
client = zulip.Client(config_file="~/zuliprc-admin")
"""
def extract_python_code_example(source: List[str], snippet: List[str]) -> List[str]:
start = -1
end = -1
for line in source:
match = CODE_EXAMPLE_REGEX.search(line)
if match:
if match.group(1) == 'start':
start = source.index(line)
elif match.group(1) == 'end':
end = source.index(line)
break
if (start == -1 and end == -1):
return snippet
snippet.extend(source[start + 1: end])
snippet.append(' print(result)')
snippet.append('\n')
source = source[end + 1:]
return extract_python_code_example(source, snippet)
def render_python_code_example(function: str, admin_config: Optional[bool]=False) -> List[str]:
method = zerver.lib.api_test_helpers.TEST_FUNCTIONS[function]
function_source_lines = inspect.getsourcelines(method)[0]
if admin_config:
config = PYTHON_CLIENT_ADMIN_CONFIG.splitlines()
else:
config = PYTHON_CLIENT_CONFIG.splitlines()
snippet = extract_python_code_example(function_source_lines, [])
code_example = []
code_example.append('```python')
code_example.extend(config)
for line in snippet:
# Remove one level of indentation and strip newlines
code_example.append(line[4:].rstrip())
code_example.append('```')
return code_example
SUPPORTED_LANGUAGES = {
'python': {
'client_config': PYTHON_CLIENT_CONFIG,
'admin_config': PYTHON_CLIENT_ADMIN_CONFIG,
'render': render_python_code_example,
}
} # type: Dict[str, Any]
class APICodeExamplesGenerator(Extension):
def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:
md.preprocessors.add(
'generate_code_example', APICodeExamplesPreprocessor(md, self.getConfigs()), '_begin'
)
class APICodeExamplesPreprocessor(Preprocessor):
def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:
super(APICodeExamplesPreprocessor, self).__init__(md)
def run(self, lines: List[str]) -> List[str]:
done = False
while not done:
for line in lines:
loc = lines.index(line)
match = MACRO_REGEXP.search(line)
if match:
language = match.group(2)
function = match.group(3)
key = match.group(4)
argument = match.group(6)
if key == 'fixture':
if argument:
text = self.render_fixture(function, name=argument)
else:
text = self.render_fixture(function)
elif key == 'example':
if argument == 'admin_config=True':
text = SUPPORTED_LANGUAGES[language]['render'](function, admin_config=True)
else:
text = SUPPORTED_LANGUAGES[language]['render'](function)
# The line that contains the directive to include the macro
# may be preceded or followed by text or tags, in that case
# we need to make sure that any preceding or following text
# stays the same.
line_split = MACRO_REGEXP.split(line, maxsplit=0)
preceding = line_split[0]
following = line_split[-1]
text = [preceding] + text + [following]
lines = lines[:loc] + text + lines[loc+1:]
break
else:
done = True
return lines
def render_fixture(self, function: str, name: Optional[str]=None) -> List[str]:
fixture = []
# We assume that if the function we're rendering starts with a slash
# it's a path in the endpoint and therefore it uses the new OpenAPI
# format.
if function.startswith('/'):
path, method = function.rsplit(':', 1)
fixture_dict = get_openapi_fixture(path, method, name)
else:
fixture_dict = zerver.lib.api_test_helpers.FIXTURES[function]
fixture_json = json.dumps(fixture_dict, indent=4, sort_keys=True,
separators=(',', ': '))
fixture.append('```')
fixture.extend(fixture_json.splitlines())
fixture.append('```')
return fixture
def makeExtension(*args: Any, **kwargs: str) -> APICodeExamplesGenerator:
return APICodeExamplesGenerator(**kwargs)
| 33.99375 | 116 | 0.596249 | import re
import json
import inspect
from markdown.extensions import Extension
from markdown.preprocessors import Preprocessor
from typing import Any, Dict, Optional, List
import markdown
import zerver.lib.api_test_helpers
from zerver.lib.openapi import get_openapi_fixture
MACRO_REGEXP = re.compile(r'\{generate_code_example(\(\s*(.+?)\s*\))*\|\s*(.+?)\s*\|\s*(.+?)\s*(\(\s*(.+)\s*\))?\}')
CODE_EXAMPLE_REGEX = re.compile(r'\# \{code_example\|\s*(.+?)\s*\}')
PYTHON_CLIENT_CONFIG = """
#!/usr/bin/env python3
import zulip
# Pass the path to your zuliprc file here.
client = zulip.Client(config_file="~/zuliprc")
"""
PYTHON_CLIENT_ADMIN_CONFIG = """
#!/usr/bin/env python
import zulip
# The user for this zuliprc file must be an organization administrator
client = zulip.Client(config_file="~/zuliprc-admin")
"""
def extract_python_code_example(source: List[str], snippet: List[str]) -> List[str]:
start = -1
end = -1
for line in source:
match = CODE_EXAMPLE_REGEX.search(line)
if match:
if match.group(1) == 'start':
start = source.index(line)
elif match.group(1) == 'end':
end = source.index(line)
break
if (start == -1 and end == -1):
return snippet
snippet.extend(source[start + 1: end])
snippet.append(' print(result)')
snippet.append('\n')
source = source[end + 1:]
return extract_python_code_example(source, snippet)
def render_python_code_example(function: str, admin_config: Optional[bool]=False) -> List[str]:
method = zerver.lib.api_test_helpers.TEST_FUNCTIONS[function]
function_source_lines = inspect.getsourcelines(method)[0]
if admin_config:
config = PYTHON_CLIENT_ADMIN_CONFIG.splitlines()
else:
config = PYTHON_CLIENT_CONFIG.splitlines()
snippet = extract_python_code_example(function_source_lines, [])
code_example = []
code_example.append('```python')
code_example.extend(config)
for line in snippet:
code_example.append(line[4:].rstrip())
code_example.append('```')
return code_example
SUPPORTED_LANGUAGES = {
'python': {
'client_config': PYTHON_CLIENT_CONFIG,
'admin_config': PYTHON_CLIENT_ADMIN_CONFIG,
'render': render_python_code_example,
}
}
class APICodeExamplesGenerator(Extension):
def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:
md.preprocessors.add(
'generate_code_example', APICodeExamplesPreprocessor(md, self.getConfigs()), '_begin'
)
class APICodeExamplesPreprocessor(Preprocessor):
def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:
super(APICodeExamplesPreprocessor, self).__init__(md)
def run(self, lines: List[str]) -> List[str]:
done = False
while not done:
for line in lines:
loc = lines.index(line)
match = MACRO_REGEXP.search(line)
if match:
language = match.group(2)
function = match.group(3)
key = match.group(4)
argument = match.group(6)
if key == 'fixture':
if argument:
text = self.render_fixture(function, name=argument)
else:
text = self.render_fixture(function)
elif key == 'example':
if argument == 'admin_config=True':
text = SUPPORTED_LANGUAGES[language]['render'](function, admin_config=True)
else:
text = SUPPORTED_LANGUAGES[language]['render'](function)
line_split = MACRO_REGEXP.split(line, maxsplit=0)
preceding = line_split[0]
following = line_split[-1]
text = [preceding] + text + [following]
lines = lines[:loc] + text + lines[loc+1:]
break
else:
done = True
return lines
def render_fixture(self, function: str, name: Optional[str]=None) -> List[str]:
fixture = []
# it's a path in the endpoint and therefore it uses the new OpenAPI
if function.startswith('/'):
path, method = function.rsplit(':', 1)
fixture_dict = get_openapi_fixture(path, method, name)
else:
fixture_dict = zerver.lib.api_test_helpers.FIXTURES[function]
fixture_json = json.dumps(fixture_dict, indent=4, sort_keys=True,
separators=(',', ': '))
fixture.append('```')
fixture.extend(fixture_json.splitlines())
fixture.append('```')
return fixture
def makeExtension(*args: Any, **kwargs: str) -> APICodeExamplesGenerator:
return APICodeExamplesGenerator(**kwargs)
| true | true |
1c49f0b92cb9d9dcfa236360b7e8067165742279 | 15,373 | py | Python | tests/test_absorption_spectrum.py | foggie-sims/trident | c5902a066ed87dc760f620d502c3e644bf93d450 | [
"BSD-3-Clause-Clear"
] | null | null | null | tests/test_absorption_spectrum.py | foggie-sims/trident | c5902a066ed87dc760f620d502c3e644bf93d450 | [
"BSD-3-Clause-Clear"
] | 5 | 2020-11-18T11:58:08.000Z | 2022-02-24T10:40:50.000Z | tests/test_absorption_spectrum.py | foggie-sims/trident | c5902a066ed87dc760f620d502c3e644bf93d450 | [
"BSD-3-Clause-Clear"
] | 1 | 2022-03-29T17:44:56.000Z | 2022-03-29T17:44:56.000Z | """
Unit test for the AbsorptionSpectrum analysis module
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2014-2017, yt Development Team.
# Copyright (c) 2017, Trident Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
#-----------------------------------------------------------------------------
import numpy as np
import os
from yt.loaders import load
from yt.testing import \
assert_allclose_units, \
assert_almost_equal
from trident.absorption_spectrum.absorption_line import \
voigt
from trident.absorption_spectrum.absorption_spectrum import \
AbsorptionSpectrum
from trident.light_ray import \
LightRay
from trident.testing import \
answer_test_data_dir, \
assert_array_rel_equal, \
h5_answer_test, \
TempDirTest
COSMO_PLUS = os.path.join(answer_test_data_dir,
"enzo_cosmology_plus/AMRCosmology.enzo")
COSMO_PLUS_SINGLE = os.path.join(answer_test_data_dir,
"enzo_cosmology_plus/RD0009/RD0009")
GIZMO_PLUS = os.path.join(answer_test_data_dir,
"gizmo_cosmology_plus/N128L16.param")
GIZMO_PLUS_SINGLE = os.path.join(answer_test_data_dir,
"gizmo_cosmology_plus/snap_N128L16_151.hdf5")
ISO_GALAXY = os.path.join(answer_test_data_dir,
"IsolatedGalaxy/galaxy0030/galaxy0030")
FIRE = os.path.join(answer_test_data_dir,
"FIRE_M12i_ref11/snapshot_600.hdf5")
class AbsorptionSpectrumTest(TempDirTest):
@h5_answer_test(assert_array_rel_equal, decimals=13)
def test_absorption_spectrum_cosmo(self):
"""
This test generates an absorption spectrum from a compound light ray on a
grid dataset
"""
lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03)
lr.make_light_ray(seed=1234567,
fields=['temperature', 'density', 'H_p0_number_density'],
data_filename='lightray.h5')
sp = AbsorptionSpectrum(900.0, 1800.0, 10000)
my_label = 'HI Lya'
field = 'H_p0_number_density'
wavelength = 1215.6700 # Angstromss
f_value = 4.164E-01
gamma = 6.265e+08
mass = 1.00794
sp.add_line(my_label, field, wavelength, f_value,
gamma, mass, label_threshold=1.e10)
my_label = 'HI Lya'
field = 'H_p0_number_density'
wavelength = 912.323660 # Angstroms
normalization = 1.6e17
index = 3.0
sp.add_continuum(my_label, field, wavelength, normalization, index)
filename = "spectrum.h5"
wavelength, flux = sp.make_spectrum('lightray.h5',
output_file=filename,
line_list_file='lines.txt',
use_peculiar_velocity=True)
return filename
@h5_answer_test(assert_array_rel_equal, decimals=15)
def test_absorption_spectrum_non_cosmo(self):
"""
This test generates an absorption spectrum from a simple light ray on a
grid dataset
"""
lr = LightRay(COSMO_PLUS_SINGLE)
ray_start = [0,0,0]
ray_end = [1,1,1]
lr.make_light_ray(start_position=ray_start, end_position=ray_end,
fields=['temperature', 'density', 'H_p0_number_density'],
data_filename='lightray.h5')
sp = AbsorptionSpectrum(1200.0, 1300.0, 10001)
my_label = 'HI Lya'
field = 'H_p0_number_density'
wavelength = 1215.6700 # Angstromss
f_value = 4.164E-01
gamma = 6.265e+08
mass = 1.00794
sp.add_line(my_label, field, wavelength, f_value,
gamma, mass, label_threshold=1.e10)
filename = "spectrum.h5"
wavelength, flux = sp.make_spectrum('lightray.h5',
output_file=filename,
line_list_file='lines.txt',
use_peculiar_velocity=True)
return filename
@h5_answer_test(assert_array_rel_equal, decimals=15)
def test_absorption_spectrum_non_cosmo_novpec(self):
"""
This test generates an absorption spectrum from a simple light ray on a
grid dataset
"""
lr = LightRay(COSMO_PLUS_SINGLE)
ray_start = [0,0,0]
ray_end = [1,1,1]
lr.make_light_ray(start_position=ray_start, end_position=ray_end,
fields=['temperature', 'density', 'H_p0_number_density'],
data_filename='lightray.h5', use_peculiar_velocity=False)
sp = AbsorptionSpectrum(1200.0, 1300.0, 10001)
my_label = 'HI Lya'
field = 'H_p0_number_density'
wavelength = 1215.6700 # Angstromss
f_value = 4.164E-01
gamma = 6.265e+08
mass = 1.00794
sp.add_line(my_label, field, wavelength, f_value,
gamma, mass, label_threshold=1.e10)
filename = "spectrum.h5"
wavelength, flux = sp.make_spectrum('lightray.h5',
output_file=filename,
line_list_file='lines.txt',
use_peculiar_velocity=False)
return filename
def test_equivalent_width_conserved(self):
"""
This tests that the equivalent width of the optical depth is conserved
regardless of the bin width employed in wavelength space.
Unresolved lines should still deposit optical depth into the spectrum.
"""
lr = LightRay(COSMO_PLUS_SINGLE)
ray_start = [0,0,0]
ray_end = [1,1,1]
lr.make_light_ray(start_position=ray_start, end_position=ray_end,
fields=['temperature', 'density', 'H_p0_number_density'],
data_filename='lightray.h5')
my_label = 'HI Lya'
field = 'H_p0_number_density'
wave = 1215.6700 # Angstromss
f_value = 4.164E-01
gamma = 6.265e+08
mass = 1.00794
lambda_min= 1200
lambda_max= 1300
lambda_bin_widths = [1e-3, 1e-2, 1e-1, 1e0, 1e1]
total_tau = []
for lambda_bin_width in lambda_bin_widths:
n_lambda = ((lambda_max - lambda_min)/ lambda_bin_width) + 1
sp = AbsorptionSpectrum(lambda_min=lambda_min, lambda_max=lambda_max,
n_lambda=n_lambda)
sp.add_line(my_label, field, wave, f_value, gamma, mass)
wavelength, flux = sp.make_spectrum('lightray.h5')
total_tau.append((lambda_bin_width * sp.tau_field).sum())
# assure that the total tau values are all within 1e-3 of each other
for tau in total_tau:
assert_almost_equal(tau, total_tau[0], 3)
def test_absorption_spectrum_fits(self):
"""
This test generates an absorption spectrum and saves it as a fits file.
"""
lr = LightRay(COSMO_PLUS_SINGLE)
ray_start = [0,0,0]
ray_end = [1,1,1]
lr.make_light_ray(start_position=ray_start, end_position=ray_end,
fields=['temperature', 'density', 'H_p0_number_density'],
data_filename='lightray.h5')
sp = AbsorptionSpectrum(900.0, 1800.0, 10000)
my_label = 'HI Lya'
field = 'H_p0_number_density'
wavelength = 1215.6700 # Angstromss
f_value = 4.164E-01
gamma = 6.265e+08
mass = 1.00794
sp.add_line(my_label, field, wavelength, f_value,
gamma, mass, label_threshold=1.e10)
my_label = 'HI Lya'
field = 'H_p0_number_density'
wavelength = 912.323660 # Angstroms
normalization = 1.6e17
index = 3.0
sp.add_continuum(my_label, field, wavelength, normalization, index)
wavelength, flux = sp.make_spectrum('lightray.h5',
output_file='spectrum.fits',
line_list_file='lines.txt',
use_peculiar_velocity=True)
@h5_answer_test(assert_array_rel_equal, decimals=12)
def test_absorption_spectrum_cosmo_sph(self):
"""
This test generates an absorption spectrum from a compound light ray on a
particle dataset
"""
lr = LightRay(GIZMO_PLUS, 'Gadget', 0.0, 0.01)
lr.make_light_ray(seed=1234567,
fields=[('gas', 'temperature'),
('gas', 'H_p0_number_density')],
data_filename='lightray.h5')
sp = AbsorptionSpectrum(900.0, 1800.0, 10000)
my_label = 'HI Lya'
field = ('gas', 'H_p0_number_density')
wavelength = 1215.6700 # Angstromss
f_value = 4.164E-01
gamma = 6.265e+08
mass = 1.00794
sp.add_line(my_label, field, wavelength, f_value,
gamma, mass, label_threshold=1.e10)
my_label = 'HI Lya'
field = ('gas', 'H_p0_number_density')
wavelength = 912.323660 # Angstroms
normalization = 1.6e17
index = 3.0
sp.add_continuum(my_label, field, wavelength, normalization, index)
filename = "spectrum.h5"
wavelength, flux = sp.make_spectrum('lightray.h5',
output_file=filename,
line_list_file='lines.txt',
use_peculiar_velocity=True)
return filename
@h5_answer_test(assert_array_rel_equal, decimals=16)
def test_absorption_spectrum_non_cosmo_sph(self):
"""
This test generates an absorption spectrum from a simple light ray on a
particle dataset
"""
ds = load(GIZMO_PLUS_SINGLE)
lr = LightRay(ds)
ray_start = ds.domain_left_edge
ray_end = ds.domain_right_edge
lr.make_light_ray(start_position=ray_start, end_position=ray_end,
fields=[('gas', 'temperature'),
('gas', 'H_p0_number_density')],
data_filename='lightray.h5')
sp = AbsorptionSpectrum(1200.0, 1300.0, 10001)
my_label = 'HI Lya'
field = ('gas', 'H_p0_number_density')
wavelength = 1215.6700 # Angstromss
f_value = 4.164E-01
gamma = 6.265e+08
mass = 1.00794
sp.add_line(my_label, field, wavelength, f_value,
gamma, mass, label_threshold=1.e10)
filename = "spectrum.h5"
wavelength, flux = sp.make_spectrum('lightray.h5',
output_file=filename,
line_list_file='lines.txt',
use_peculiar_velocity=True)
return filename
@h5_answer_test(assert_array_rel_equal, decimals=15)
def test_absorption_spectrum_with_continuum(self):
"""
This test generates an absorption spectrum from a simple light ray on a
grid dataset and adds Lyman alpha and Lyman continuum to it
"""
ds = load(ISO_GALAXY)
lr = LightRay(ds)
ray_start = ds.domain_left_edge
ray_end = ds.domain_right_edge
lr.make_light_ray(start_position=ray_start, end_position=ray_end,
fields=['temperature', 'density', 'H_p0_number_density'],
data_filename='lightray.h5')
sp = AbsorptionSpectrum(800.0, 1300.0, 5001)
my_label = 'HI Lya'
field = 'H_p0_number_density'
wavelength = 1215.6700 # Angstromss
f_value = 4.164E-01
gamma = 6.265e+08
mass = 1.00794
sp.add_line(my_label, field, wavelength, f_value,
gamma, mass, label_threshold=1.e10)
my_label = 'Ly C'
field = 'H_p0_number_density'
wavelength = 912.323660 # Angstroms
normalization = 1.6e17
index = 3.0
sp.add_continuum(my_label, field, wavelength, normalization, index)
filename = "spectrum.h5"
wavelength, flux = sp.make_spectrum('lightray.h5',
output_file=filename,
line_list_file='lines.txt',
use_peculiar_velocity=True)
return filename
def test_absorption_spectrum_with_zero_field(self):
"""
This test generates an absorption spectrum with some
particle dataset
"""
ds = load(FIRE)
lr = LightRay(ds)
# Define species and associated parameters to add to continuum
# Parameters used for both adding the transition to the spectrum
# and for fitting
# Note that for single species that produce multiple lines
# (as in the OVI doublet), 'numLines' will be equal to the number
# of lines, and f,gamma, and wavelength will have multiple values.
HI_parameters = {
'name': 'HI',
'field': 'H_p0_number_density',
'f': [.4164],
'Gamma': [6.265E8],
'wavelength': [1215.67],
'mass': 1.00794,
'numLines': 1,
'maxN': 1E22, 'minN': 1E11,
'maxb': 300, 'minb': 1,
'maxz': 6, 'minz': 0,
'init_b': 30,
'init_N': 1E14
}
species_dicts = {'HI': HI_parameters}
# Get all fields that need to be added to the light ray
fields = [('gas','temperature')]
for s, params in species_dicts.items():
fields.append(params['field'])
# With a single dataset, a start_position and
# end_position or trajectory must be given.
# Trajectory should be given as (r, theta, phi)
lr.make_light_ray(
start_position=ds.arr([0., 0., 0.], 'unitary'),
end_position=ds.arr([1., 1., 1.], 'unitary'),
solution_filename='test_lightraysolution.txt',
data_filename='test_lightray.h5',
fields=fields)
# Create an AbsorptionSpectrum object extending from
# lambda = 900 to lambda = 1800, with 10000 pixels
sp = AbsorptionSpectrum(900.0, 1400.0, 50000)
# Iterate over species
for s, params in species_dicts.items():
# Iterate over transitions for a single species
for i in range(params['numLines']):
# Add the lines to the spectrum
sp.add_line(
s, params['field'],
params['wavelength'][i], params['f'][i],
params['Gamma'][i], params['mass'],
label_threshold=1.e10)
# Make and save spectrum
wavelength, flux = sp.make_spectrum(
'test_lightray.h5',
output_file='test_spectrum.h5',
line_list_file='test_lines.txt',
use_peculiar_velocity=True)
| 36.34279 | 83 | 0.564366 |
import numpy as np
import os
from yt.loaders import load
from yt.testing import \
assert_allclose_units, \
assert_almost_equal
from trident.absorption_spectrum.absorption_line import \
voigt
from trident.absorption_spectrum.absorption_spectrum import \
AbsorptionSpectrum
from trident.light_ray import \
LightRay
from trident.testing import \
answer_test_data_dir, \
assert_array_rel_equal, \
h5_answer_test, \
TempDirTest
COSMO_PLUS = os.path.join(answer_test_data_dir,
"enzo_cosmology_plus/AMRCosmology.enzo")
COSMO_PLUS_SINGLE = os.path.join(answer_test_data_dir,
"enzo_cosmology_plus/RD0009/RD0009")
GIZMO_PLUS = os.path.join(answer_test_data_dir,
"gizmo_cosmology_plus/N128L16.param")
GIZMO_PLUS_SINGLE = os.path.join(answer_test_data_dir,
"gizmo_cosmology_plus/snap_N128L16_151.hdf5")
ISO_GALAXY = os.path.join(answer_test_data_dir,
"IsolatedGalaxy/galaxy0030/galaxy0030")
FIRE = os.path.join(answer_test_data_dir,
"FIRE_M12i_ref11/snapshot_600.hdf5")
class AbsorptionSpectrumTest(TempDirTest):
@h5_answer_test(assert_array_rel_equal, decimals=13)
def test_absorption_spectrum_cosmo(self):
lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03)
lr.make_light_ray(seed=1234567,
fields=['temperature', 'density', 'H_p0_number_density'],
data_filename='lightray.h5')
sp = AbsorptionSpectrum(900.0, 1800.0, 10000)
my_label = 'HI Lya'
field = 'H_p0_number_density'
wavelength = 1215.6700 f_value = 4.164E-01
gamma = 6.265e+08
mass = 1.00794
sp.add_line(my_label, field, wavelength, f_value,
gamma, mass, label_threshold=1.e10)
my_label = 'HI Lya'
field = 'H_p0_number_density'
wavelength = 912.323660 normalization = 1.6e17
index = 3.0
sp.add_continuum(my_label, field, wavelength, normalization, index)
filename = "spectrum.h5"
wavelength, flux = sp.make_spectrum('lightray.h5',
output_file=filename,
line_list_file='lines.txt',
use_peculiar_velocity=True)
return filename
@h5_answer_test(assert_array_rel_equal, decimals=15)
def test_absorption_spectrum_non_cosmo(self):
lr = LightRay(COSMO_PLUS_SINGLE)
ray_start = [0,0,0]
ray_end = [1,1,1]
lr.make_light_ray(start_position=ray_start, end_position=ray_end,
fields=['temperature', 'density', 'H_p0_number_density'],
data_filename='lightray.h5')
sp = AbsorptionSpectrum(1200.0, 1300.0, 10001)
my_label = 'HI Lya'
field = 'H_p0_number_density'
wavelength = 1215.6700 f_value = 4.164E-01
gamma = 6.265e+08
mass = 1.00794
sp.add_line(my_label, field, wavelength, f_value,
gamma, mass, label_threshold=1.e10)
filename = "spectrum.h5"
wavelength, flux = sp.make_spectrum('lightray.h5',
output_file=filename,
line_list_file='lines.txt',
use_peculiar_velocity=True)
return filename
@h5_answer_test(assert_array_rel_equal, decimals=15)
def test_absorption_spectrum_non_cosmo_novpec(self):
lr = LightRay(COSMO_PLUS_SINGLE)
ray_start = [0,0,0]
ray_end = [1,1,1]
lr.make_light_ray(start_position=ray_start, end_position=ray_end,
fields=['temperature', 'density', 'H_p0_number_density'],
data_filename='lightray.h5', use_peculiar_velocity=False)
sp = AbsorptionSpectrum(1200.0, 1300.0, 10001)
my_label = 'HI Lya'
field = 'H_p0_number_density'
wavelength = 1215.6700 f_value = 4.164E-01
gamma = 6.265e+08
mass = 1.00794
sp.add_line(my_label, field, wavelength, f_value,
gamma, mass, label_threshold=1.e10)
filename = "spectrum.h5"
wavelength, flux = sp.make_spectrum('lightray.h5',
output_file=filename,
line_list_file='lines.txt',
use_peculiar_velocity=False)
return filename
def test_equivalent_width_conserved(self):
lr = LightRay(COSMO_PLUS_SINGLE)
ray_start = [0,0,0]
ray_end = [1,1,1]
lr.make_light_ray(start_position=ray_start, end_position=ray_end,
fields=['temperature', 'density', 'H_p0_number_density'],
data_filename='lightray.h5')
my_label = 'HI Lya'
field = 'H_p0_number_density'
wave = 1215.6700 f_value = 4.164E-01
gamma = 6.265e+08
mass = 1.00794
lambda_min= 1200
lambda_max= 1300
lambda_bin_widths = [1e-3, 1e-2, 1e-1, 1e0, 1e1]
total_tau = []
for lambda_bin_width in lambda_bin_widths:
n_lambda = ((lambda_max - lambda_min)/ lambda_bin_width) + 1
sp = AbsorptionSpectrum(lambda_min=lambda_min, lambda_max=lambda_max,
n_lambda=n_lambda)
sp.add_line(my_label, field, wave, f_value, gamma, mass)
wavelength, flux = sp.make_spectrum('lightray.h5')
total_tau.append((lambda_bin_width * sp.tau_field).sum())
for tau in total_tau:
assert_almost_equal(tau, total_tau[0], 3)
def test_absorption_spectrum_fits(self):
lr = LightRay(COSMO_PLUS_SINGLE)
ray_start = [0,0,0]
ray_end = [1,1,1]
lr.make_light_ray(start_position=ray_start, end_position=ray_end,
fields=['temperature', 'density', 'H_p0_number_density'],
data_filename='lightray.h5')
sp = AbsorptionSpectrum(900.0, 1800.0, 10000)
my_label = 'HI Lya'
field = 'H_p0_number_density'
wavelength = 1215.6700 f_value = 4.164E-01
gamma = 6.265e+08
mass = 1.00794
sp.add_line(my_label, field, wavelength, f_value,
gamma, mass, label_threshold=1.e10)
my_label = 'HI Lya'
field = 'H_p0_number_density'
wavelength = 912.323660 normalization = 1.6e17
index = 3.0
sp.add_continuum(my_label, field, wavelength, normalization, index)
wavelength, flux = sp.make_spectrum('lightray.h5',
output_file='spectrum.fits',
line_list_file='lines.txt',
use_peculiar_velocity=True)
@h5_answer_test(assert_array_rel_equal, decimals=12)
def test_absorption_spectrum_cosmo_sph(self):
lr = LightRay(GIZMO_PLUS, 'Gadget', 0.0, 0.01)
lr.make_light_ray(seed=1234567,
fields=[('gas', 'temperature'),
('gas', 'H_p0_number_density')],
data_filename='lightray.h5')
sp = AbsorptionSpectrum(900.0, 1800.0, 10000)
my_label = 'HI Lya'
field = ('gas', 'H_p0_number_density')
wavelength = 1215.6700 f_value = 4.164E-01
gamma = 6.265e+08
mass = 1.00794
sp.add_line(my_label, field, wavelength, f_value,
gamma, mass, label_threshold=1.e10)
my_label = 'HI Lya'
field = ('gas', 'H_p0_number_density')
wavelength = 912.323660 normalization = 1.6e17
index = 3.0
sp.add_continuum(my_label, field, wavelength, normalization, index)
filename = "spectrum.h5"
wavelength, flux = sp.make_spectrum('lightray.h5',
output_file=filename,
line_list_file='lines.txt',
use_peculiar_velocity=True)
return filename
@h5_answer_test(assert_array_rel_equal, decimals=16)
def test_absorption_spectrum_non_cosmo_sph(self):
ds = load(GIZMO_PLUS_SINGLE)
lr = LightRay(ds)
ray_start = ds.domain_left_edge
ray_end = ds.domain_right_edge
lr.make_light_ray(start_position=ray_start, end_position=ray_end,
fields=[('gas', 'temperature'),
('gas', 'H_p0_number_density')],
data_filename='lightray.h5')
sp = AbsorptionSpectrum(1200.0, 1300.0, 10001)
my_label = 'HI Lya'
field = ('gas', 'H_p0_number_density')
wavelength = 1215.6700 f_value = 4.164E-01
gamma = 6.265e+08
mass = 1.00794
sp.add_line(my_label, field, wavelength, f_value,
gamma, mass, label_threshold=1.e10)
filename = "spectrum.h5"
wavelength, flux = sp.make_spectrum('lightray.h5',
output_file=filename,
line_list_file='lines.txt',
use_peculiar_velocity=True)
return filename
@h5_answer_test(assert_array_rel_equal, decimals=15)
def test_absorption_spectrum_with_continuum(self):
ds = load(ISO_GALAXY)
lr = LightRay(ds)
ray_start = ds.domain_left_edge
ray_end = ds.domain_right_edge
lr.make_light_ray(start_position=ray_start, end_position=ray_end,
fields=['temperature', 'density', 'H_p0_number_density'],
data_filename='lightray.h5')
sp = AbsorptionSpectrum(800.0, 1300.0, 5001)
my_label = 'HI Lya'
field = 'H_p0_number_density'
wavelength = 1215.6700 f_value = 4.164E-01
gamma = 6.265e+08
mass = 1.00794
sp.add_line(my_label, field, wavelength, f_value,
gamma, mass, label_threshold=1.e10)
my_label = 'Ly C'
field = 'H_p0_number_density'
wavelength = 912.323660 normalization = 1.6e17
index = 3.0
sp.add_continuum(my_label, field, wavelength, normalization, index)
filename = "spectrum.h5"
wavelength, flux = sp.make_spectrum('lightray.h5',
output_file=filename,
line_list_file='lines.txt',
use_peculiar_velocity=True)
return filename
def test_absorption_spectrum_with_zero_field(self):
ds = load(FIRE)
lr = LightRay(ds)
HI_parameters = {
'name': 'HI',
'field': 'H_p0_number_density',
'f': [.4164],
'Gamma': [6.265E8],
'wavelength': [1215.67],
'mass': 1.00794,
'numLines': 1,
'maxN': 1E22, 'minN': 1E11,
'maxb': 300, 'minb': 1,
'maxz': 6, 'minz': 0,
'init_b': 30,
'init_N': 1E14
}
species_dicts = {'HI': HI_parameters}
fields = [('gas','temperature')]
for s, params in species_dicts.items():
fields.append(params['field'])
lr.make_light_ray(
start_position=ds.arr([0., 0., 0.], 'unitary'),
end_position=ds.arr([1., 1., 1.], 'unitary'),
solution_filename='test_lightraysolution.txt',
data_filename='test_lightray.h5',
fields=fields)
sp = AbsorptionSpectrum(900.0, 1400.0, 50000)
for s, params in species_dicts.items():
for i in range(params['numLines']):
sp.add_line(
s, params['field'],
params['wavelength'][i], params['f'][i],
params['Gamma'][i], params['mass'],
label_threshold=1.e10)
wavelength, flux = sp.make_spectrum(
'test_lightray.h5',
output_file='test_spectrum.h5',
line_list_file='test_lines.txt',
use_peculiar_velocity=True)
| true | true |
1c49f0f3bf54c9438b56d4e5b82e5dc16b9dd8e7 | 4,982 | py | Python | tests/unit/test_models/test_full_battery_models/test_lithium_ion/test_dfn.py | NunoEdgarGFlowHub/PyBaMM | 4e4e1ab8c488b0c0a6efdb9934c5ac59e947a190 | [
"BSD-3-Clause"
] | null | null | null | tests/unit/test_models/test_full_battery_models/test_lithium_ion/test_dfn.py | NunoEdgarGFlowHub/PyBaMM | 4e4e1ab8c488b0c0a6efdb9934c5ac59e947a190 | [
"BSD-3-Clause"
] | null | null | null | tests/unit/test_models/test_full_battery_models/test_lithium_ion/test_dfn.py | NunoEdgarGFlowHub/PyBaMM | 4e4e1ab8c488b0c0a6efdb9934c5ac59e947a190 | [
"BSD-3-Clause"
] | null | null | null | #
# Tests for the lithium-ion DFN model
#
import pybamm
import unittest
class TestDFN(unittest.TestCase):
def test_well_posed(self):
options = {"thermal": "isothermal"}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_well_posed_2plus1D(self):
options = {"current collector": "potential pair", "dimensionality": 1}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
options = {"current collector": "potential pair", "dimensionality": 2}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
options = {"bc_options": {"dimensionality": 5}}
with self.assertRaises(pybamm.OptionError):
model = pybamm.lithium_ion.DFN(options)
def test_lumped_thermal_model_1D(self):
options = {"thermal": "x-lumped"}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_x_full_thermal_model(self):
options = {"thermal": "x-full"}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_x_full_Nplus1D_not_implemented(self):
# 1plus1D
options = {
"current collector": "potential pair",
"dimensionality": 1,
"thermal": "x-full",
}
with self.assertRaises(NotImplementedError):
pybamm.lithium_ion.DFN(options)
# 2plus1D
options = {
"current collector": "potential pair",
"dimensionality": 2,
"thermal": "x-full",
}
with self.assertRaises(NotImplementedError):
pybamm.lithium_ion.DFN(options)
def test_lumped_thermal_1plus1D(self):
options = {
"current collector": "potential pair",
"dimensionality": 1,
"thermal": "lumped",
}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_lumped_thermal_2plus1D(self):
options = {
"current collector": "potential pair",
"dimensionality": 2,
"thermal": "lumped",
}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_thermal_1plus1D(self):
options = {
"current collector": "potential pair",
"dimensionality": 1,
"thermal": "x-lumped",
}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_thermal_2plus1D(self):
options = {
"current collector": "potential pair",
"dimensionality": 2,
"thermal": "x-lumped",
}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_particle_fast_diffusion(self):
options = {"particle": "fast diffusion"}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_surface_form_differential(self):
options = {"surface form": "differential"}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_surface_form_algebraic(self):
options = {"surface form": "algebraic"}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
class TestDFNWithSEI(unittest.TestCase):
def test_well_posed_constant(self):
options = {"sei": "constant"}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_well_posed_reaction_limited(self):
options = {"sei": "reaction limited"}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_well_posed_reaction_limited_average_film_resistance(self):
options = {"sei": "reaction limited", "sei film resistance": "average"}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_well_posed_solvent_diffusion_limited(self):
options = {"sei": "solvent-diffusion limited"}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_well_posed_electron_migration_limited(self):
options = {"sei": "electron-migration limited"}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_well_posed_interstitial_diffusion_limited(self):
options = {"sei": "interstitial-diffusion limited"}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_well_posed_ec_reaction_limited(self):
options = {"sei": "ec reaction limited", "sei porosity change": True}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
| 32.776316 | 79 | 0.633681 | import pybamm
import unittest
class TestDFN(unittest.TestCase):
def test_well_posed(self):
options = {"thermal": "isothermal"}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_well_posed_2plus1D(self):
options = {"current collector": "potential pair", "dimensionality": 1}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
options = {"current collector": "potential pair", "dimensionality": 2}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
options = {"bc_options": {"dimensionality": 5}}
with self.assertRaises(pybamm.OptionError):
model = pybamm.lithium_ion.DFN(options)
def test_lumped_thermal_model_1D(self):
options = {"thermal": "x-lumped"}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_x_full_thermal_model(self):
options = {"thermal": "x-full"}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_x_full_Nplus1D_not_implemented(self):
options = {
"current collector": "potential pair",
"dimensionality": 1,
"thermal": "x-full",
}
with self.assertRaises(NotImplementedError):
pybamm.lithium_ion.DFN(options)
options = {
"current collector": "potential pair",
"dimensionality": 2,
"thermal": "x-full",
}
with self.assertRaises(NotImplementedError):
pybamm.lithium_ion.DFN(options)
def test_lumped_thermal_1plus1D(self):
options = {
"current collector": "potential pair",
"dimensionality": 1,
"thermal": "lumped",
}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_lumped_thermal_2plus1D(self):
options = {
"current collector": "potential pair",
"dimensionality": 2,
"thermal": "lumped",
}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_thermal_1plus1D(self):
options = {
"current collector": "potential pair",
"dimensionality": 1,
"thermal": "x-lumped",
}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_thermal_2plus1D(self):
options = {
"current collector": "potential pair",
"dimensionality": 2,
"thermal": "x-lumped",
}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_particle_fast_diffusion(self):
options = {"particle": "fast diffusion"}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_surface_form_differential(self):
options = {"surface form": "differential"}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_surface_form_algebraic(self):
options = {"surface form": "algebraic"}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
class TestDFNWithSEI(unittest.TestCase):
def test_well_posed_constant(self):
options = {"sei": "constant"}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_well_posed_reaction_limited(self):
options = {"sei": "reaction limited"}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_well_posed_reaction_limited_average_film_resistance(self):
options = {"sei": "reaction limited", "sei film resistance": "average"}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_well_posed_solvent_diffusion_limited(self):
options = {"sei": "solvent-diffusion limited"}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_well_posed_electron_migration_limited(self):
options = {"sei": "electron-migration limited"}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_well_posed_interstitial_diffusion_limited(self):
options = {"sei": "interstitial-diffusion limited"}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
def test_well_posed_ec_reaction_limited(self):
options = {"sei": "ec reaction limited", "sei porosity change": True}
model = pybamm.lithium_ion.DFN(options)
model.check_well_posedness()
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
| true | true |
1c49f156253c1ee5a0762a2795736557c9bddbfb | 3,641 | py | Python | engine/account/forms.py | NamoxLabs/BlogEngine | 741549e78b58bbc857e9dcecd88034de49d73304 | [
"BSD-3-Clause"
] | 1 | 2018-12-28T04:57:41.000Z | 2018-12-28T04:57:41.000Z | engine/account/forms.py | NamoxLabs/BlogEngine | 741549e78b58bbc857e9dcecd88034de49d73304 | [
"BSD-3-Clause"
] | null | null | null | engine/account/forms.py | NamoxLabs/BlogEngine | 741549e78b58bbc857e9dcecd88034de49d73304 | [
"BSD-3-Clause"
] | 2 | 2019-01-25T04:34:55.000Z | 2020-04-11T09:01:24.000Z | #from captcha.fields import ReCaptchaField
from django import forms
from django.conf import settings
from django.contrib.auth import forms as django_forms, update_session_auth_hash
from django.utils.translation import pgettext, pgettext_lazy
#from . import models(User)
from . import models
"""
class FormWithReCaptcha(forms.BaseForm):
def __new__(cls, *args, **kwargs):
if settings.RECAPTCHA_PUBLIC_KEY and settings.RECAPTCHA_PRIVATE_KEY:
# insert a Google reCaptcha field inside the from
# note: label is empty, the reCaptcha is self-explanatory making
# the from simpler for the user.
cls.base_fields['_captcha'] = ReCaptchaField(label='')
return super(FormWithReCaptcha, cls).__new__(cls)
"""
class ChangePasswordForm(django_forms.PasswordChangeForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['new_password1'].user = self.user
self.fields['old_password'].widget.attrs['placeholder'] = ''
self.fields['new_password1'].widget.attrs['placeholder'] = ''
del self.fields['new_password2']
def logout_on_password_change(request, user):
if (update_session_auth_hash is not None and
not settings.LOGOUT_ON_PASSWORD_CHANGE):
update_session_auth_hash(request, user)
#class LoginForm(django_forms.AuthenticationForm, FormWithReCaptcha):
class LoginForm(django_forms.AuthenticationForm):
username = forms.EmailField(
label=pgettext('Form field', 'Email'), max_length=75)
def __init__(self, request=None, *args, **kwargs):
super().__init__(request=request, *args, **kwargs)
if request:
email = request.GET.get('email')
if email:
self.fields['username'].initial = email
#class SignupForm(forms.ModelForm, FormWithReCaptcha):
class SignupForm(forms.ModelForm):
password = forms.CharField(
widget=forms.PasswordInput,
label=pgettext('Password', 'Password'))
email = forms.EmailField(
label=pgettext('Email', 'Email'),
error_messages={
'unique': pgettext_lazy(
'Registration error',
'This email has already been registered.'
)})
class Meta:
model = models.User
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self._meta.model.USERNAME_FIELD in self.fields:
self.fields[self._meta.model.USERNAME_FIELD].widget.attrs.update(
{'autofocus': ''})
def save(self, request=None, commit=True):
user = super().save(commit=False)
password = self.cleaned_data['password']
user.set_password(password)
if commit:
user.save()
return user
#class PasswordResetForm(django_forms.PasswordResetForm, FormWithReCaptcha):
class PasswordResetForm(django_forms.PasswordResetForm):
"""Allow resetting password.
This subclass overrides sending emails to use templated email.
"""
def get_users(self, email):
active_users = models.User.objects.filter(email__iexact=email, is_active=True)
return active_users
def send_mail(
self, subject_template_name, email_template_name, context,
from_email, to_email, html_email_template_name=None):
# Passing the user object to the Celery task throws an
# error "'User' is not JSON serializable". Since it's not used in our
# template, we remove it from the context.
del context['user']
#emails.send_password_reset_email.delay(context, to_email)
| 36.777778 | 86 | 0.675639 | from django import forms
from django.conf import settings
from django.contrib.auth import forms as django_forms, update_session_auth_hash
from django.utils.translation import pgettext, pgettext_lazy
from . import models
class ChangePasswordForm(django_forms.PasswordChangeForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['new_password1'].user = self.user
self.fields['old_password'].widget.attrs['placeholder'] = ''
self.fields['new_password1'].widget.attrs['placeholder'] = ''
del self.fields['new_password2']
def logout_on_password_change(request, user):
if (update_session_auth_hash is not None and
not settings.LOGOUT_ON_PASSWORD_CHANGE):
update_session_auth_hash(request, user)
class LoginForm(django_forms.AuthenticationForm):
username = forms.EmailField(
label=pgettext('Form field', 'Email'), max_length=75)
def __init__(self, request=None, *args, **kwargs):
super().__init__(request=request, *args, **kwargs)
if request:
email = request.GET.get('email')
if email:
self.fields['username'].initial = email
class SignupForm(forms.ModelForm):
password = forms.CharField(
widget=forms.PasswordInput,
label=pgettext('Password', 'Password'))
email = forms.EmailField(
label=pgettext('Email', 'Email'),
error_messages={
'unique': pgettext_lazy(
'Registration error',
'This email has already been registered.'
)})
class Meta:
model = models.User
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self._meta.model.USERNAME_FIELD in self.fields:
self.fields[self._meta.model.USERNAME_FIELD].widget.attrs.update(
{'autofocus': ''})
def save(self, request=None, commit=True):
user = super().save(commit=False)
password = self.cleaned_data['password']
user.set_password(password)
if commit:
user.save()
return user
class PasswordResetForm(django_forms.PasswordResetForm):
def get_users(self, email):
active_users = models.User.objects.filter(email__iexact=email, is_active=True)
return active_users
def send_mail(
self, subject_template_name, email_template_name, context,
from_email, to_email, html_email_template_name=None):
# template, we remove it from the context.
del context['user']
#emails.send_password_reset_email.delay(context, to_email)
| true | true |
1c49f162a350f2ebbca239c540dffc96e43e2bae | 2,574 | py | Python | src/pages/gallery/awesome_panel_express_tests/test_markdown.py | jlstevens/awesome-panel | c67b0f4529a3ce6a8517648f49fef8358e2e2c8b | [
"Apache-2.0"
] | null | null | null | src/pages/gallery/awesome_panel_express_tests/test_markdown.py | jlstevens/awesome-panel | c67b0f4529a3ce6a8517648f49fef8358e2e2c8b | [
"Apache-2.0"
] | null | null | null | src/pages/gallery/awesome_panel_express_tests/test_markdown.py | jlstevens/awesome-panel | c67b0f4529a3ce6a8517648f49fef8358e2e2c8b | [
"Apache-2.0"
] | null | null | null | """In this module we test the `Markdown` functionality of `awesome_panel.express`
The `Markdown` functionality of Panel is limited as it does not support
- One liners for using Markdown from files
- Code blocks
- Indented Markdown text as is often what is used in Editors like VS Code.
Please note you need to run `Code.extend()` in order to add the CODE_HILITE CSS to the app.
"""
import pathlib
import panel as pn
import awesome_panel.express as pnx
from awesome_panel.express.testing import TestApp
TEST_MD_FILE = pathlib.Path(__file__).parent / "data" / "test.md"
pnx.Code.extend()
def test_markdown():
"""We test that
- A "Header is shown"
- The background is blue
- The sizing_mode is "stretch_width" by default. DOES NOT WORK CURRENTLY
"""
return TestApp(
test_markdown,
pnx.Markdown("# Header", name="basic", background="lightblue"),
sizing_mode="stretch_width",
background="lightgray",
max_width=600,
)
def test_markdown_from_file():
"""We test that
- A path to a markdown file can used directly in one line
"""
return TestApp(
test_markdown_from_file,
pnx.Markdown(path=TEST_MD_FILE, name="file", background="lightblue"),
)
def test_markdown_indendation():
"""We test the Markdown pane
- can handle leading spaces, i.e. this line shows as a bullited list and not in mono-space
"""
return TestApp(test_markdown_indendation, sizing_mode="stretch_width",)
def test_markdown_code_block():
"""We test that
- A code blocks are supported. Sort of. BUT THE INDENTATION IS CURRENTLY LOST!
- Indented markdown test from editors is supported. The Panel Markdown does not support this.
"""
code_block = """
This is not indented
```python
print("Hello Awesome Panel World")
return TestApp(
test_markdown_code_block,
pnx.Markdown(code_block, name="code block", background="lightblue"),
```
This is indented```
"""
return TestApp(
test_markdown_code_block,
pnx.Markdown(code_block, name="code block", background="lightblue"),
)
def view() -> pn.Column:
"""Wraps all tests in a Column that can be included in the Gallery or served independently
Returns:
pn.Column -- An Column containing all the tests
"""
return pn.Column(
pnx.Markdown(__doc__),
test_markdown,
test_markdown_from_file,
test_markdown_indendation,
test_markdown_code_block,
)
if __name__.startswith("bk"):
view().servable("test_markdown")
| 25.74 | 97 | 0.688423 | import pathlib
import panel as pn
import awesome_panel.express as pnx
from awesome_panel.express.testing import TestApp
TEST_MD_FILE = pathlib.Path(__file__).parent / "data" / "test.md"
pnx.Code.extend()
def test_markdown():
return TestApp(
test_markdown,
pnx.Markdown("# Header", name="basic", background="lightblue"),
sizing_mode="stretch_width",
background="lightgray",
max_width=600,
)
def test_markdown_from_file():
return TestApp(
test_markdown_from_file,
pnx.Markdown(path=TEST_MD_FILE, name="file", background="lightblue"),
)
def test_markdown_indendation():
return TestApp(test_markdown_indendation, sizing_mode="stretch_width",)
def test_markdown_code_block():
code_block = """
This is not indented
```python
print("Hello Awesome Panel World")
return TestApp(
test_markdown_code_block,
pnx.Markdown(code_block, name="code block", background="lightblue"),
```
This is indented```
"""
return TestApp(
test_markdown_code_block,
pnx.Markdown(code_block, name="code block", background="lightblue"),
)
def view() -> pn.Column:
return pn.Column(
pnx.Markdown(__doc__),
test_markdown,
test_markdown_from_file,
test_markdown_indendation,
test_markdown_code_block,
)
if __name__.startswith("bk"):
view().servable("test_markdown")
| true | true |
1c49f2142861df2d045a482002a519a24cbcd848 | 2,022 | py | Python | tests/nuodb_crypt_test.py | jgetto/nuodb-python | 3a22260e801d8f9d9bd33f911a694e8caeba7282 | [
"BSD-3-Clause"
] | null | null | null | tests/nuodb_crypt_test.py | jgetto/nuodb-python | 3a22260e801d8f9d9bd33f911a694e8caeba7282 | [
"BSD-3-Clause"
] | null | null | null | tests/nuodb_crypt_test.py | jgetto/nuodb-python | 3a22260e801d8f9d9bd33f911a694e8caeba7282 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import pynuodb
import unittest
from nuodb_base import NuoBase
class NuoDBBasicTest(unittest.TestCase):
def test_toByteString(self):
self.assertEqual(pynuodb.crypt.toSignedByteString(1), '01'.decode('hex'))
self.assertEqual(pynuodb.crypt.toSignedByteString(127), '7F'.decode('hex'))
self.assertEqual(pynuodb.crypt.toSignedByteString(254), '00FE'.decode('hex'))
self.assertEqual(pynuodb.crypt.toSignedByteString(255), '00FF'.decode('hex'))
self.assertEqual(pynuodb.crypt.toSignedByteString(-1), 'FF'.decode('hex'))
self.assertEqual(pynuodb.crypt.toSignedByteString(-2), 'FE'.decode('hex'))
self.assertEqual(pynuodb.crypt.toSignedByteString(-256), 'FF00'.decode('hex'))
self.assertEqual(pynuodb.crypt.toSignedByteString(-258), 'FEFE'.decode('hex'))
def test_fromByteString(self):
self.assertEqual(pynuodb.crypt.fromSignedByteString('01'.decode('hex')), 1)
self.assertEqual(pynuodb.crypt.fromSignedByteString('00FF'.decode('hex')), 255)
self.assertEqual(pynuodb.crypt.fromSignedByteString('FF'.decode('hex')), -1)
self.assertEqual(pynuodb.crypt.fromSignedByteString('FF01'.decode('hex')), -255)
self.assertEqual(pynuodb.crypt.fromSignedByteString('FF00'.decode('hex')), -256)
self.assertEqual(pynuodb.crypt.fromSignedByteString('FEFE'.decode('hex')), -258)
def test_bothByteString(self):
self.assertEqual(pynuodb.crypt.fromSignedByteString(pynuodb.crypt.toSignedByteString(1)), 1)
self.assertEqual(pynuodb.crypt.fromSignedByteString(pynuodb.crypt.toSignedByteString(0)), 0)
self.assertEqual(pynuodb.crypt.fromSignedByteString(pynuodb.crypt.toSignedByteString(-1)), -1)
self.assertEqual(pynuodb.crypt.fromSignedByteString(pynuodb.crypt.toSignedByteString(256)), 256)
self.assertEqual(pynuodb.crypt.fromSignedByteString(pynuodb.crypt.toSignedByteString(-256)), -256)
if __name__ == '__main__':
unittest.main() | 54.648649 | 106 | 0.720574 |
import pynuodb
import unittest
from nuodb_base import NuoBase
class NuoDBBasicTest(unittest.TestCase):
def test_toByteString(self):
self.assertEqual(pynuodb.crypt.toSignedByteString(1), '01'.decode('hex'))
self.assertEqual(pynuodb.crypt.toSignedByteString(127), '7F'.decode('hex'))
self.assertEqual(pynuodb.crypt.toSignedByteString(254), '00FE'.decode('hex'))
self.assertEqual(pynuodb.crypt.toSignedByteString(255), '00FF'.decode('hex'))
self.assertEqual(pynuodb.crypt.toSignedByteString(-1), 'FF'.decode('hex'))
self.assertEqual(pynuodb.crypt.toSignedByteString(-2), 'FE'.decode('hex'))
self.assertEqual(pynuodb.crypt.toSignedByteString(-256), 'FF00'.decode('hex'))
self.assertEqual(pynuodb.crypt.toSignedByteString(-258), 'FEFE'.decode('hex'))
def test_fromByteString(self):
self.assertEqual(pynuodb.crypt.fromSignedByteString('01'.decode('hex')), 1)
self.assertEqual(pynuodb.crypt.fromSignedByteString('00FF'.decode('hex')), 255)
self.assertEqual(pynuodb.crypt.fromSignedByteString('FF'.decode('hex')), -1)
self.assertEqual(pynuodb.crypt.fromSignedByteString('FF01'.decode('hex')), -255)
self.assertEqual(pynuodb.crypt.fromSignedByteString('FF00'.decode('hex')), -256)
self.assertEqual(pynuodb.crypt.fromSignedByteString('FEFE'.decode('hex')), -258)
def test_bothByteString(self):
self.assertEqual(pynuodb.crypt.fromSignedByteString(pynuodb.crypt.toSignedByteString(1)), 1)
self.assertEqual(pynuodb.crypt.fromSignedByteString(pynuodb.crypt.toSignedByteString(0)), 0)
self.assertEqual(pynuodb.crypt.fromSignedByteString(pynuodb.crypt.toSignedByteString(-1)), -1)
self.assertEqual(pynuodb.crypt.fromSignedByteString(pynuodb.crypt.toSignedByteString(256)), 256)
self.assertEqual(pynuodb.crypt.fromSignedByteString(pynuodb.crypt.toSignedByteString(-256)), -256)
if __name__ == '__main__':
unittest.main() | true | true |
1c49f2b7d071ce1ebf896fca28a137a5869de6d1 | 4,909 | py | Python | DQMOffline/Configuration/python/DQMOfflineCosmics_SecondStep_cff.py | vjmastra/cmssw | de96df37dbaf3543daef67339179e074bde9e858 | [
"Apache-2.0"
] | 1 | 2019-02-06T13:19:54.000Z | 2019-02-06T13:19:54.000Z | DQMOffline/Configuration/python/DQMOfflineCosmics_SecondStep_cff.py | dellaric/cmssw | cd7470dc554972076740dde7523f311c43f248d3 | [
"Apache-2.0"
] | null | null | null | DQMOffline/Configuration/python/DQMOfflineCosmics_SecondStep_cff.py | dellaric/cmssw | cd7470dc554972076740dde7523f311c43f248d3 | [
"Apache-2.0"
] | null | null | null | import FWCore.ParameterSet.Config as cms
from DQMServices.Components.DQMMessageLoggerClient_cff import *
from DQMServices.Components.DQMFastTimerServiceClient_cfi import *
from DQMOffline.Ecal.ecal_dqm_client_offline_cosmic_cff import *
from DQM.EcalPreshowerMonitorClient.es_dqm_client_offline_cosmic_cff import *
from DQM.HcalTasks.OfflineHarvestingSequence_cosmic import *
from DQM.SiStripMonitorClient.SiStripClientConfig_Tier0_Cosmic_cff import *
from DQM.SiPixelCommon.SiPixelOfflineDQM_client_cff import *
from DQM.DTMonitorClient.dtDQMOfflineClients_Cosmics_cff import *
from DQM.RPCMonitorClient.RPCTier0Client_cff import *
from DQM.CSCMonitorModule.csc_dqm_offlineclient_cosmics_cff import *
from DQM.GEM.gem_dqm_offline_client_cosmics_cff import *
from DQMServices.Components.DQMFEDIntegrityClient_cff import *
DQMNone = cms.Sequence()
DQMOfflineCosmics_SecondStepEcal = cms.Sequence( ecal_dqm_client_offline *
es_dqm_client_offline )
DQMOfflineCosmics_SecondStepHcal = cms.Sequence( hcalOfflineHarvesting )
DQMOfflineCosmics_SecondStepTrackerStrip = cms.Sequence( SiStripCosmicDQMClient )
DQMOfflineCosmics_SecondStepTrackerPixel = cms.Sequence( PixelOfflineDQMClientNoDataCertification_cosmics )
DQMOfflineCosmics_SecondStepMuonDPG = cms.Sequence( dtClientsCosmics *
rpcTier0Client *
cscOfflineCosmicsClients )
from Configuration.Eras.Modifier_run3_GEM_cff import run3_GEM
_run3_GEM_DQMOfflineCosmics_SecondStepMuonDPG = DQMOfflineCosmics_SecondStepMuonDPG.copy()
_run3_GEM_DQMOfflineCosmics_SecondStepMuonDPG += gemClientsCosmics
run3_GEM.toReplaceWith(DQMOfflineCosmics_SecondStepMuonDPG, _run3_GEM_DQMOfflineCosmics_SecondStepMuonDPG)
DQMOfflineCosmics_SecondStepFED = cms.Sequence( dqmFEDIntegrityClient )
DQMOfflineCosmics_SecondStep_PreDPG = cms.Sequence(
DQMOfflineCosmics_SecondStepEcal *
DQMOfflineCosmics_SecondStepHcal *
DQMOfflineCosmics_SecondStepTrackerStrip *
DQMOfflineCosmics_SecondStepTrackerPixel *
DQMOfflineCosmics_SecondStepMuonDPG *
DQMOfflineCosmics_SecondStepFED )
DQMOfflineCosmics_SecondStepDPG = cms.Sequence(
DQMOfflineCosmics_SecondStep_PreDPG *
DQMMessageLoggerClientSeq )
from DQM.TrackingMonitorClient.TrackingClientConfig_Tier0_Cosmic_cff import *
from DQMOffline.Muon.muonQualityTests_cff import *
from DQMOffline.EGamma.photonOfflineDQMClient_cff import *
from DQMOffline.L1Trigger.L1TriggerDqmOffline_cff import *
from DQMOffline.Trigger.DQMOffline_Trigger_Client_cff import *
from DQMOffline.Trigger.DQMOffline_HLT_Client_cff import *
from DQMOffline.JetMET.SusyPostProcessor_cff import *
DQMOfflineCosmics_SecondStepTracking = cms.Sequence( TrackingCosmicDQMClient )
DQMOfflineCosmics_SecondStepMUO = cms.Sequence( cosmicMuonQualityTests )
DQMOfflineCosmics_SecondStepEGamma = cms.Sequence( photonOfflineDQMClient )
DQMOfflineCosmics_SecondStepL1T = cms.Sequence( l1TriggerDqmOfflineCosmicsClient )
DQMOfflineCosmics_SecondStepTrigger = cms.Sequence( triggerOfflineDQMClient *
hltOfflineDQMClient )
DQMOfflineCosmics_SecondStepJetMET = cms.Sequence( SusyPostProcessorSequence )
DQMOfflineCosmics_SecondStep_PrePOG = cms.Sequence( DQMOfflineCosmics_SecondStepTracking *
DQMOfflineCosmics_SecondStepMUO *
DQMOfflineCosmics_SecondStepEGamma *
DQMOfflineCosmics_SecondStepL1T *
DQMOfflineCosmics_SecondStepJetMET
)
DQMOfflineCosmics_SecondStep_PrePOG.remove(fsqClient)
DQMOfflineCosmics_SecondStepPOG = cms.Sequence(
DQMOfflineCosmics_SecondStep_PrePOG *
DQMMessageLoggerClientSeq *
dqmFastTimerServiceClient)
DQMOfflineCosmics_SecondStep = cms.Sequence(
DQMOfflineCosmics_SecondStep_PreDPG *
DQMOfflineCosmics_SecondStep_PrePOG *
DQMOfflineCosmics_SecondStepTrigger *
DQMMessageLoggerClientSeq )
DQMOfflineCosmics_SecondStep_FakeHLT = cms.Sequence(DQMOfflineCosmics_SecondStep )
DQMOfflineCosmics_SecondStep_FakeHLT.remove( DQMOfflineCosmics_SecondStepTrigger )
| 52.223404 | 107 | 0.695457 | import FWCore.ParameterSet.Config as cms
from DQMServices.Components.DQMMessageLoggerClient_cff import *
from DQMServices.Components.DQMFastTimerServiceClient_cfi import *
from DQMOffline.Ecal.ecal_dqm_client_offline_cosmic_cff import *
from DQM.EcalPreshowerMonitorClient.es_dqm_client_offline_cosmic_cff import *
from DQM.HcalTasks.OfflineHarvestingSequence_cosmic import *
from DQM.SiStripMonitorClient.SiStripClientConfig_Tier0_Cosmic_cff import *
from DQM.SiPixelCommon.SiPixelOfflineDQM_client_cff import *
from DQM.DTMonitorClient.dtDQMOfflineClients_Cosmics_cff import *
from DQM.RPCMonitorClient.RPCTier0Client_cff import *
from DQM.CSCMonitorModule.csc_dqm_offlineclient_cosmics_cff import *
from DQM.GEM.gem_dqm_offline_client_cosmics_cff import *
from DQMServices.Components.DQMFEDIntegrityClient_cff import *
DQMNone = cms.Sequence()
DQMOfflineCosmics_SecondStepEcal = cms.Sequence( ecal_dqm_client_offline *
es_dqm_client_offline )
DQMOfflineCosmics_SecondStepHcal = cms.Sequence( hcalOfflineHarvesting )
DQMOfflineCosmics_SecondStepTrackerStrip = cms.Sequence( SiStripCosmicDQMClient )
DQMOfflineCosmics_SecondStepTrackerPixel = cms.Sequence( PixelOfflineDQMClientNoDataCertification_cosmics )
DQMOfflineCosmics_SecondStepMuonDPG = cms.Sequence( dtClientsCosmics *
rpcTier0Client *
cscOfflineCosmicsClients )
from Configuration.Eras.Modifier_run3_GEM_cff import run3_GEM
_run3_GEM_DQMOfflineCosmics_SecondStepMuonDPG = DQMOfflineCosmics_SecondStepMuonDPG.copy()
_run3_GEM_DQMOfflineCosmics_SecondStepMuonDPG += gemClientsCosmics
run3_GEM.toReplaceWith(DQMOfflineCosmics_SecondStepMuonDPG, _run3_GEM_DQMOfflineCosmics_SecondStepMuonDPG)
DQMOfflineCosmics_SecondStepFED = cms.Sequence( dqmFEDIntegrityClient )
DQMOfflineCosmics_SecondStep_PreDPG = cms.Sequence(
DQMOfflineCosmics_SecondStepEcal *
DQMOfflineCosmics_SecondStepHcal *
DQMOfflineCosmics_SecondStepTrackerStrip *
DQMOfflineCosmics_SecondStepTrackerPixel *
DQMOfflineCosmics_SecondStepMuonDPG *
DQMOfflineCosmics_SecondStepFED )
DQMOfflineCosmics_SecondStepDPG = cms.Sequence(
DQMOfflineCosmics_SecondStep_PreDPG *
DQMMessageLoggerClientSeq )
from DQM.TrackingMonitorClient.TrackingClientConfig_Tier0_Cosmic_cff import *
from DQMOffline.Muon.muonQualityTests_cff import *
from DQMOffline.EGamma.photonOfflineDQMClient_cff import *
from DQMOffline.L1Trigger.L1TriggerDqmOffline_cff import *
from DQMOffline.Trigger.DQMOffline_Trigger_Client_cff import *
from DQMOffline.Trigger.DQMOffline_HLT_Client_cff import *
from DQMOffline.JetMET.SusyPostProcessor_cff import *
DQMOfflineCosmics_SecondStepTracking = cms.Sequence( TrackingCosmicDQMClient )
DQMOfflineCosmics_SecondStepMUO = cms.Sequence( cosmicMuonQualityTests )
DQMOfflineCosmics_SecondStepEGamma = cms.Sequence( photonOfflineDQMClient )
DQMOfflineCosmics_SecondStepL1T = cms.Sequence( l1TriggerDqmOfflineCosmicsClient )
DQMOfflineCosmics_SecondStepTrigger = cms.Sequence( triggerOfflineDQMClient *
hltOfflineDQMClient )
DQMOfflineCosmics_SecondStepJetMET = cms.Sequence( SusyPostProcessorSequence )
DQMOfflineCosmics_SecondStep_PrePOG = cms.Sequence( DQMOfflineCosmics_SecondStepTracking *
DQMOfflineCosmics_SecondStepMUO *
DQMOfflineCosmics_SecondStepEGamma *
DQMOfflineCosmics_SecondStepL1T *
DQMOfflineCosmics_SecondStepJetMET
)
DQMOfflineCosmics_SecondStep_PrePOG.remove(fsqClient)
DQMOfflineCosmics_SecondStepPOG = cms.Sequence(
DQMOfflineCosmics_SecondStep_PrePOG *
DQMMessageLoggerClientSeq *
dqmFastTimerServiceClient)
DQMOfflineCosmics_SecondStep = cms.Sequence(
DQMOfflineCosmics_SecondStep_PreDPG *
DQMOfflineCosmics_SecondStep_PrePOG *
DQMOfflineCosmics_SecondStepTrigger *
DQMMessageLoggerClientSeq )
DQMOfflineCosmics_SecondStep_FakeHLT = cms.Sequence(DQMOfflineCosmics_SecondStep )
DQMOfflineCosmics_SecondStep_FakeHLT.remove( DQMOfflineCosmics_SecondStepTrigger )
| true | true |
1c49f324acb2a047c20500c33a13ef6f0f53f559 | 73 | py | Python | pvpc/__init__.py | David-Lor/python-pvpc | a5aac6f32a6eaf464ee374fd7da32a79fbbd18ba | [
"ISC"
] | null | null | null | pvpc/__init__.py | David-Lor/python-pvpc | a5aac6f32a6eaf464ee374fd7da32a79fbbd18ba | [
"ISC"
] | null | null | null | pvpc/__init__.py | David-Lor/python-pvpc | a5aac6f32a6eaf464ee374fd7da32a79fbbd18ba | [
"ISC"
] | null | null | null | from .models import *
from .requester import *
from .exceptions import *
| 18.25 | 25 | 0.753425 | from .models import *
from .requester import *
from .exceptions import *
| true | true |
1c49f39ec3628b8aaf020ff4bb77d86834de746f | 1,407 | py | Python | kicad_cicd/plotter.py | sillevl/pcbops_template | 68107607412245df168acdab978447ab82da33f7 | [
"MIT"
] | 10 | 2019-04-30T22:14:20.000Z | 2021-02-24T13:51:57.000Z | kicad_cicd/plotter.py | sillevl/pcbops_template | 68107607412245df168acdab978447ab82da33f7 | [
"MIT"
] | 2 | 2019-05-09T13:59:39.000Z | 2019-09-25T14:07:25.000Z | kicad_cicd/plotter.py | sillevl/pcbops_template | 68107607412245df168acdab978447ab82da33f7 | [
"MIT"
] | 3 | 2019-04-29T10:01:48.000Z | 2020-06-04T10:14:26.000Z | import sys
import os
import pcbnew
from pcbnew import *
file_name = os.path.abspath(sys.argv[1])
output_dir = os.path.abspath(sys.argv[2])
print("Running KiCAD Plotter CI/CD Script on %s output to %s"%(file_name, output_dir,))
try:
os.makedirs(output_dir)
except OSError:
pass
board = pcbnew.LoadBoard(file_name)
pctl = pcbnew.PLOT_CONTROLLER(board)
popt = pctl.GetPlotOptions()
popt.SetOutputDirectory(output_dir)
popt.SetPlotFrameRef(False)
popt.SetLineWidth(pcbnew.FromMM(0.1))
popt.SetAutoScale(False)
popt.SetScale(1)
popt.SetMirror(False)
popt.SetUseGerberAttributes(True)
popt.SetUseGerberProtelExtensions(True)
popt.SetExcludeEdgeLayer(True)
popt.SetUseAuxOrigin(False)
pctl.SetColorMode(True)
popt.SetSubtractMaskFromSilk(False)
popt.SetPlotReference(True)
popt.SetPlotValue(False)
layers = [
("F.Cu", pcbnew.F_Cu, "Top layer"),
("B.Cu", pcbnew.B_Cu, "Bottom layer"),
("F.Paste", pcbnew.F_Paste, "Paste top"),
("B.Paste", pcbnew.B_Paste, "Paste bottom"),
("F.SilkS", pcbnew.F_SilkS, "Silk top"),
("B.SilkS", pcbnew.B_SilkS, "Silk top"),
("F.Mask", pcbnew.F_Mask, "Mask top"),
("B.Mask", pcbnew.B_Mask, "Mask bottom"),
("Edge.Cuts", pcbnew.Edge_Cuts, "Edges"),
]
for layer_info in layers:
pctl.SetLayer(layer_info[1])
pctl.OpenPlotfile(layer_info[0], pcbnew.PLOT_FORMAT_GERBER, layer_info[2])
pctl.PlotLayer()
pctl.ClosePlot()
| 24.684211 | 87 | 0.721393 | import sys
import os
import pcbnew
from pcbnew import *
file_name = os.path.abspath(sys.argv[1])
output_dir = os.path.abspath(sys.argv[2])
print("Running KiCAD Plotter CI/CD Script on %s output to %s"%(file_name, output_dir,))
try:
os.makedirs(output_dir)
except OSError:
pass
board = pcbnew.LoadBoard(file_name)
pctl = pcbnew.PLOT_CONTROLLER(board)
popt = pctl.GetPlotOptions()
popt.SetOutputDirectory(output_dir)
popt.SetPlotFrameRef(False)
popt.SetLineWidth(pcbnew.FromMM(0.1))
popt.SetAutoScale(False)
popt.SetScale(1)
popt.SetMirror(False)
popt.SetUseGerberAttributes(True)
popt.SetUseGerberProtelExtensions(True)
popt.SetExcludeEdgeLayer(True)
popt.SetUseAuxOrigin(False)
pctl.SetColorMode(True)
popt.SetSubtractMaskFromSilk(False)
popt.SetPlotReference(True)
popt.SetPlotValue(False)
layers = [
("F.Cu", pcbnew.F_Cu, "Top layer"),
("B.Cu", pcbnew.B_Cu, "Bottom layer"),
("F.Paste", pcbnew.F_Paste, "Paste top"),
("B.Paste", pcbnew.B_Paste, "Paste bottom"),
("F.SilkS", pcbnew.F_SilkS, "Silk top"),
("B.SilkS", pcbnew.B_SilkS, "Silk top"),
("F.Mask", pcbnew.F_Mask, "Mask top"),
("B.Mask", pcbnew.B_Mask, "Mask bottom"),
("Edge.Cuts", pcbnew.Edge_Cuts, "Edges"),
]
for layer_info in layers:
pctl.SetLayer(layer_info[1])
pctl.OpenPlotfile(layer_info[0], pcbnew.PLOT_FORMAT_GERBER, layer_info[2])
pctl.PlotLayer()
pctl.ClosePlot()
| true | true |
1c49f3bd8d21302a83182466fe1ef519c82625f3 | 49,318 | py | Python | python_modules/dagster/dagster_tests/core_tests/storage_tests/utils/run_storage.py | cvb/dagster | 6c735708790febe79ffe727225a4445c033ab79d | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster_tests/core_tests/storage_tests/utils/run_storage.py | cvb/dagster | 6c735708790febe79ffe727225a4445c033ab79d | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster_tests/core_tests/storage_tests/utils/run_storage.py | cvb/dagster | 6c735708790febe79ffe727225a4445c033ab79d | [
"Apache-2.0"
] | null | null | null | import sys
import tempfile
from datetime import datetime
import pendulum
import pytest
from dagster import job, op, seven
from dagster.core.definitions import PipelineDefinition
from dagster.core.errors import (
DagsterRunAlreadyExists,
DagsterRunNotFoundError,
DagsterSnapshotDoesNotExist,
)
from dagster.core.events import DagsterEvent, DagsterEventType
from dagster.core.execution.backfill import BulkActionStatus, PartitionBackfill
from dagster.core.host_representation import (
ExternalRepositoryOrigin,
ManagedGrpcPythonEnvRepositoryLocationOrigin,
)
from dagster.core.instance import DagsterInstance, InstanceType
from dagster.core.launcher.sync_in_memory_run_launcher import SyncInMemoryRunLauncher
from dagster.core.run_coordinator import DefaultRunCoordinator
from dagster.core.snap import create_pipeline_snapshot_id
from dagster.core.storage.event_log import InMemoryEventLogStorage
from dagster.core.storage.noop_compute_log_manager import NoOpComputeLogManager
from dagster.core.storage.pipeline_run import (
DagsterRun,
JobBucket,
PipelineRunStatus,
PipelineRunsFilter,
TagBucket,
)
from dagster.core.storage.root import LocalArtifactStorage
from dagster.core.storage.runs.migration import REQUIRED_DATA_MIGRATIONS
from dagster.core.storage.runs.sql_run_storage import SqlRunStorage
from dagster.core.storage.tags import PARENT_RUN_ID_TAG, ROOT_RUN_ID_TAG
from dagster.core.types.loadable_target_origin import LoadableTargetOrigin
from dagster.core.utils import make_new_run_id
from dagster.daemon.daemon import SensorDaemon
from dagster.daemon.types import DaemonHeartbeat
from dagster.serdes import serialize_pp
from dagster.seven.compat.pendulum import create_pendulum_time, to_timezone
win_py36 = seven.IS_WINDOWS and sys.version_info[0] == 3 and sys.version_info[1] == 6
class TestRunStorage:
"""
You can extend this class to easily run these set of tests on any run storage. When extending,
you simply need to override the `run_storage` fixture and return your implementation of
`RunStorage`.
For example:
```
class TestMyStorageImplementation(TestRunStorage):
__test__ = True
@pytest.fixture(scope='function', name='storage')
def run_storage(self): # pylint: disable=arguments-differ
return MyStorageImplementation()
```
"""
__test__ = False
@pytest.fixture(name="storage", params=[])
def run_storage(self, request):
with request.param() as s:
yield s
# Override for storages that are not allowed to delete runs
def can_delete_runs(self):
return True
@staticmethod
def fake_repo_target():
return ExternalRepositoryOrigin(
ManagedGrpcPythonEnvRepositoryLocationOrigin(
LoadableTargetOrigin(
executable_path=sys.executable, module_name="fake", attribute="fake"
),
),
"fake_repo_name",
)
@classmethod
def fake_partition_set_origin(cls, partition_set_name):
return cls.fake_repo_target().get_partition_set_origin(partition_set_name)
@staticmethod
def build_run(
run_id,
pipeline_name,
mode="default",
tags=None,
status=PipelineRunStatus.NOT_STARTED,
parent_run_id=None,
root_run_id=None,
pipeline_snapshot_id=None,
):
return DagsterRun(
pipeline_name=pipeline_name,
run_id=run_id,
run_config=None,
mode=mode,
tags=tags,
status=status,
root_run_id=root_run_id,
parent_run_id=parent_run_id,
pipeline_snapshot_id=pipeline_snapshot_id,
)
def test_basic_storage(self, storage):
assert storage
run_id = make_new_run_id()
added = storage.add_run(
TestRunStorage.build_run(
run_id=run_id, pipeline_name="some_pipeline", tags={"foo": "bar"}
)
)
assert added
runs = storage.get_runs()
assert len(runs) == 1
run = runs[0]
assert run.run_id == run_id
assert run.pipeline_name == "some_pipeline"
assert run.tags
assert run.tags.get("foo") == "bar"
assert storage.has_run(run_id)
fetched_run = storage.get_run_by_id(run_id)
assert fetched_run.run_id == run_id
assert fetched_run.pipeline_name == "some_pipeline"
def test_clear(self, storage):
if not self.can_delete_runs():
pytest.skip("storage cannot delete")
assert storage
run_id = make_new_run_id()
storage.add_run(TestRunStorage.build_run(run_id=run_id, pipeline_name="some_pipeline"))
assert len(storage.get_runs()) == 1
storage.wipe()
assert list(storage.get_runs()) == []
def test_storage_telemetry(self, storage):
assert storage
storage_id = storage.get_run_storage_id()
assert isinstance(storage_id, str)
storage_id_again = storage.get_run_storage_id()
assert storage_id == storage_id_again
def test_fetch_by_pipeline(self, storage):
assert storage
one = make_new_run_id()
two = make_new_run_id()
storage.add_run(TestRunStorage.build_run(run_id=one, pipeline_name="some_pipeline"))
storage.add_run(TestRunStorage.build_run(run_id=two, pipeline_name="some_other_pipeline"))
assert len(storage.get_runs()) == 2
some_runs = storage.get_runs(PipelineRunsFilter(pipeline_name="some_pipeline"))
assert len(some_runs) == 1
assert some_runs[0].run_id == one
def test_fetch_by_snapshot_id(self, storage):
assert storage
pipeline_def_a = PipelineDefinition(name="some_pipeline", solid_defs=[])
pipeline_def_b = PipelineDefinition(name="some_other_pipeline", solid_defs=[])
pipeline_snapshot_a = pipeline_def_a.get_pipeline_snapshot()
pipeline_snapshot_b = pipeline_def_b.get_pipeline_snapshot()
pipeline_snapshot_a_id = create_pipeline_snapshot_id(pipeline_snapshot_a)
pipeline_snapshot_b_id = create_pipeline_snapshot_id(pipeline_snapshot_b)
assert storage.add_pipeline_snapshot(pipeline_snapshot_a) == pipeline_snapshot_a_id
assert storage.add_pipeline_snapshot(pipeline_snapshot_b) == pipeline_snapshot_b_id
one = make_new_run_id()
two = make_new_run_id()
storage.add_run(
TestRunStorage.build_run(
run_id=one,
pipeline_name="some_pipeline",
pipeline_snapshot_id=pipeline_snapshot_a_id,
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=two,
pipeline_name="some_other_pipeline",
pipeline_snapshot_id=pipeline_snapshot_b_id,
)
)
assert len(storage.get_runs()) == 2
runs_a = storage.get_runs(PipelineRunsFilter(snapshot_id=pipeline_snapshot_a_id))
assert len(runs_a) == 1
assert runs_a[0].run_id == one
runs_b = storage.get_runs(PipelineRunsFilter(snapshot_id=pipeline_snapshot_b_id))
assert len(runs_b) == 1
assert runs_b[0].run_id == two
def test_add_run_tags(self, storage):
assert storage
one = make_new_run_id()
two = make_new_run_id()
storage.add_run(TestRunStorage.build_run(run_id=one, pipeline_name="foo"))
storage.add_run(TestRunStorage.build_run(run_id=two, pipeline_name="bar"))
assert storage.get_run_tags() == []
storage.add_run_tags(one, {"tag1": "val1", "tag2": "val2"})
storage.add_run_tags(two, {"tag1": "val1"})
assert storage.get_run_tags() == [("tag1", {"val1"}), ("tag2", {"val2"})]
# Adding both existing tags and a new tag
storage.add_run_tags(one, {"tag1": "val2", "tag3": "val3"})
test_run = storage.get_run_by_id(one)
assert len(test_run.tags) == 3
assert test_run.tags["tag1"] == "val2"
assert test_run.tags["tag2"] == "val2"
assert test_run.tags["tag3"] == "val3"
assert storage.get_run_tags() == [
("tag1", {"val1", "val2"}),
("tag2", {"val2"}),
("tag3", {"val3"}),
]
# Adding only existing tags
storage.add_run_tags(one, {"tag1": "val3"})
test_run = storage.get_run_by_id(one)
assert len(test_run.tags) == 3
assert test_run.tags["tag1"] == "val3"
assert test_run.tags["tag2"] == "val2"
assert test_run.tags["tag3"] == "val3"
assert storage.get_run_tags() == [
("tag1", {"val1", "val3"}),
("tag2", {"val2"}),
("tag3", {"val3"}),
]
# Adding only a new tag that wasn't there before
storage.add_run_tags(one, {"tag4": "val4"})
test_run = storage.get_run_by_id(one)
assert len(test_run.tags) == 4
assert test_run.tags["tag1"] == "val3"
assert test_run.tags["tag2"] == "val2"
assert test_run.tags["tag3"] == "val3"
assert test_run.tags["tag4"] == "val4"
assert storage.get_run_tags() == [
("tag1", {"val1", "val3"}),
("tag2", {"val2"}),
("tag3", {"val3"}),
("tag4", {"val4"}),
]
test_run = storage.get_run_by_id(one)
assert len(test_run.tags) == 4
assert test_run.tags["tag1"] == "val3"
assert test_run.tags["tag2"] == "val2"
assert test_run.tags["tag3"] == "val3"
assert test_run.tags["tag4"] == "val4"
some_runs = storage.get_runs(PipelineRunsFilter(tags={"tag3": "val3"}))
assert len(some_runs) == 1
assert some_runs[0].run_id == one
runs_with_old_tag = storage.get_runs(PipelineRunsFilter(tags={"tag1": "val1"}))
assert len(runs_with_old_tag) == 1
assert runs_with_old_tag[0].tags == {"tag1": "val1"}
runs_with_new_tag = storage.get_runs(PipelineRunsFilter(tags={"tag1": "val3"}))
assert len(runs_with_new_tag) == 1
assert runs_with_new_tag[0].tags == {
"tag1": "val3",
"tag2": "val2",
"tag3": "val3",
"tag4": "val4",
}
def test_fetch_by_filter(self, storage):
assert storage
one = make_new_run_id()
two = make_new_run_id()
three = make_new_run_id()
storage.add_run(
TestRunStorage.build_run(
run_id=one,
pipeline_name="some_pipeline",
tags={"tag": "hello", "tag2": "world"},
status=PipelineRunStatus.SUCCESS,
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=two,
pipeline_name="some_pipeline",
tags={"tag": "hello"},
status=PipelineRunStatus.FAILURE,
),
)
storage.add_run(
TestRunStorage.build_run(
run_id=three, pipeline_name="other_pipeline", status=PipelineRunStatus.SUCCESS
)
)
assert len(storage.get_runs()) == 3
some_runs = storage.get_runs(PipelineRunsFilter(run_ids=[one]))
count = storage.get_runs_count(PipelineRunsFilter(run_ids=[one]))
assert len(some_runs) == 1
assert count == 1
assert some_runs[0].run_id == one
some_runs = storage.get_runs(PipelineRunsFilter(pipeline_name="some_pipeline"))
count = storage.get_runs_count(PipelineRunsFilter(pipeline_name="some_pipeline"))
assert len(some_runs) == 2
assert count == 2
assert some_runs[0].run_id == two
assert some_runs[1].run_id == one
some_runs = storage.get_runs(PipelineRunsFilter(statuses=[PipelineRunStatus.SUCCESS]))
count = storage.get_runs_count(PipelineRunsFilter(statuses=[PipelineRunStatus.SUCCESS]))
assert len(some_runs) == 2
assert count == 2
assert some_runs[0].run_id == three
assert some_runs[1].run_id == one
some_runs = storage.get_runs(PipelineRunsFilter(tags={"tag": "hello"}))
count = storage.get_runs_count(PipelineRunsFilter(tags={"tag": "hello"}))
assert len(some_runs) == 2
assert count == 2
assert some_runs[0].run_id == two
assert some_runs[1].run_id == one
some_runs = storage.get_runs(PipelineRunsFilter(tags={"tag": "hello", "tag2": "world"}))
count = storage.get_runs_count(PipelineRunsFilter(tags={"tag": "hello", "tag2": "world"}))
assert len(some_runs) == 1
assert count == 1
assert some_runs[0].run_id == one
some_runs = storage.get_runs(
PipelineRunsFilter(pipeline_name="some_pipeline", tags={"tag": "hello"})
)
count = storage.get_runs_count(
PipelineRunsFilter(pipeline_name="some_pipeline", tags={"tag": "hello"})
)
assert len(some_runs) == 2
assert count == 2
assert some_runs[0].run_id == two
assert some_runs[1].run_id == one
some_runs = storage.get_runs(
PipelineRunsFilter(
pipeline_name="some_pipeline",
tags={"tag": "hello"},
statuses=[PipelineRunStatus.SUCCESS],
)
)
count = storage.get_runs_count(
PipelineRunsFilter(
pipeline_name="some_pipeline",
tags={"tag": "hello"},
statuses=[PipelineRunStatus.SUCCESS],
)
)
assert len(some_runs) == 1
assert count == 1
assert some_runs[0].run_id == one
# All filters
some_runs = storage.get_runs(
PipelineRunsFilter(
run_ids=[one],
pipeline_name="some_pipeline",
tags={"tag": "hello"},
statuses=[PipelineRunStatus.SUCCESS],
)
)
count = storage.get_runs_count(
PipelineRunsFilter(
run_ids=[one],
pipeline_name="some_pipeline",
tags={"tag": "hello"},
statuses=[PipelineRunStatus.SUCCESS],
)
)
assert len(some_runs) == 1
assert count == 1
assert some_runs[0].run_id == one
some_runs = storage.get_runs(PipelineRunsFilter())
count = storage.get_runs_count(PipelineRunsFilter())
assert len(some_runs) == 3
assert count == 3
def test_fetch_count_by_tag(self, storage):
assert storage
one = make_new_run_id()
two = make_new_run_id()
three = make_new_run_id()
storage.add_run(
TestRunStorage.build_run(
run_id=one,
pipeline_name="some_pipeline",
tags={"mytag": "hello", "mytag2": "world"},
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=two,
pipeline_name="some_pipeline",
tags={"mytag": "goodbye", "mytag2": "world"},
)
)
storage.add_run(TestRunStorage.build_run(run_id=three, pipeline_name="some_pipeline"))
assert len(storage.get_runs()) == 3
run_count = storage.get_runs_count(
filters=PipelineRunsFilter(tags={"mytag": "hello", "mytag2": "world"})
)
assert run_count == 1
run_count = storage.get_runs_count(filters=PipelineRunsFilter(tags={"mytag2": "world"}))
assert run_count == 2
run_count = storage.get_runs_count()
assert run_count == 3
assert storage.get_run_tags() == [("mytag", {"hello", "goodbye"}), ("mytag2", {"world"})]
def test_fetch_by_tags(self, storage):
assert storage
one = make_new_run_id()
two = make_new_run_id()
three = make_new_run_id()
storage.add_run(
TestRunStorage.build_run(
run_id=one,
pipeline_name="some_pipeline",
tags={"mytag": "hello", "mytag2": "world"},
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=two,
pipeline_name="some_pipeline",
tags={"mytag": "goodbye", "mytag2": "world"},
)
)
storage.add_run(TestRunStorage.build_run(run_id=three, pipeline_name="some_pipeline"))
assert len(storage.get_runs()) == 3
some_runs = storage.get_runs(PipelineRunsFilter(tags={"mytag": "hello", "mytag2": "world"}))
assert len(some_runs) == 1
assert some_runs[0].run_id == one
some_runs = storage.get_runs(PipelineRunsFilter(tags={"mytag2": "world"}))
assert len(some_runs) == 2
assert some_runs[0].run_id == two
assert some_runs[1].run_id == one
some_runs = storage.get_runs(PipelineRunsFilter(tags={}))
assert len(some_runs) == 3
def test_paginated_fetch(self, storage):
assert storage
one, two, three = [make_new_run_id(), make_new_run_id(), make_new_run_id()]
storage.add_run(
TestRunStorage.build_run(
run_id=one, pipeline_name="some_pipeline", tags={"mytag": "hello"}
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=two, pipeline_name="some_pipeline", tags={"mytag": "hello"}
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=three, pipeline_name="some_pipeline", tags={"mytag": "hello"}
)
)
all_runs = storage.get_runs()
assert len(all_runs) == 3
sliced_runs = storage.get_runs(cursor=three, limit=1)
assert len(sliced_runs) == 1
assert sliced_runs[0].run_id == two
all_runs = storage.get_runs(PipelineRunsFilter(pipeline_name="some_pipeline"))
assert len(all_runs) == 3
sliced_runs = storage.get_runs(
PipelineRunsFilter(pipeline_name="some_pipeline"), cursor=three, limit=1
)
assert len(sliced_runs) == 1
assert sliced_runs[0].run_id == two
all_runs = storage.get_runs(PipelineRunsFilter(tags={"mytag": "hello"}))
assert len(all_runs) == 3
sliced_runs = storage.get_runs(
PipelineRunsFilter(tags={"mytag": "hello"}), cursor=three, limit=1
)
assert len(sliced_runs) == 1
assert sliced_runs[0].run_id == two
def test_fetch_by_status(self, storage):
assert storage
one = make_new_run_id()
two = make_new_run_id()
three = make_new_run_id()
four = make_new_run_id()
storage.add_run(
TestRunStorage.build_run(
run_id=one, pipeline_name="some_pipeline", status=PipelineRunStatus.NOT_STARTED
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=two, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=three, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=four, pipeline_name="some_pipeline", status=PipelineRunStatus.FAILURE
)
)
assert {
run.run_id
for run in storage.get_runs(
PipelineRunsFilter(statuses=[PipelineRunStatus.NOT_STARTED])
)
} == {one}
assert {
run.run_id
for run in storage.get_runs(PipelineRunsFilter(statuses=[PipelineRunStatus.STARTED]))
} == {
two,
three,
}
assert {
run.run_id
for run in storage.get_runs(PipelineRunsFilter(statuses=[PipelineRunStatus.FAILURE]))
} == {four}
assert {
run.run_id
for run in storage.get_runs(PipelineRunsFilter(statuses=[PipelineRunStatus.SUCCESS]))
} == set()
def test_fetch_records_by_update_timestamp(self, storage):
assert storage
self._skip_in_memory(storage)
one = make_new_run_id()
two = make_new_run_id()
three = make_new_run_id()
storage.add_run(
TestRunStorage.build_run(
run_id=one, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=two, pipeline_name="some_pipeline", status=PipelineRunStatus.FAILURE
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=three, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED
)
)
storage.handle_run_event(
three, # three succeeds
DagsterEvent(
message="a message",
event_type_value=DagsterEventType.PIPELINE_SUCCESS.value,
pipeline_name="some_pipeline",
),
)
storage.handle_run_event(
one, # fail one after two has fails and three has succeeded
DagsterEvent(
message="a message",
event_type_value=DagsterEventType.PIPELINE_FAILURE.value,
pipeline_name="some_pipeline",
),
)
record_two = storage.get_run_records(
filters=PipelineRunsFilter(run_ids=[two], updated_after=datetime(2020, 1, 1))
)[0]
run_two_update_timestamp = record_two.update_timestamp
assert [
record.pipeline_run.run_id
for record in storage.get_run_records(
filters=PipelineRunsFilter(updated_after=run_two_update_timestamp),
order_by="update_timestamp",
ascending=True,
)
] == [three, one]
assert [
record.pipeline_run.run_id
for record in storage.get_run_records(
filters=PipelineRunsFilter(
statuses=[PipelineRunStatus.FAILURE], updated_after=run_two_update_timestamp
),
)
] == [one]
def test_fetch_by_status_cursored(self, storage):
assert storage
one = make_new_run_id()
two = make_new_run_id()
three = make_new_run_id()
four = make_new_run_id()
storage.add_run(
TestRunStorage.build_run(
run_id=one, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=two, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=three, pipeline_name="some_pipeline", status=PipelineRunStatus.NOT_STARTED
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=four, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED
)
)
cursor_four_runs = storage.get_runs(
PipelineRunsFilter(statuses=[PipelineRunStatus.STARTED]), cursor=four
)
assert len(cursor_four_runs) == 2
assert {run.run_id for run in cursor_four_runs} == {one, two}
cursor_two_runs = storage.get_runs(
PipelineRunsFilter(statuses=[PipelineRunStatus.STARTED]), cursor=two
)
assert len(cursor_two_runs) == 1
assert {run.run_id for run in cursor_two_runs} == {one}
cursor_one_runs = storage.get_runs(
PipelineRunsFilter(statuses=[PipelineRunStatus.STARTED]), cursor=one
)
assert not cursor_one_runs
cursor_four_limit_one = storage.get_runs(
PipelineRunsFilter(statuses=[PipelineRunStatus.STARTED]), cursor=four, limit=1
)
assert len(cursor_four_limit_one) == 1
assert cursor_four_limit_one[0].run_id == two
def test_delete(self, storage):
if not self.can_delete_runs():
pytest.skip("storage cannot delete runs")
assert storage
run_id = make_new_run_id()
storage.add_run(TestRunStorage.build_run(run_id=run_id, pipeline_name="some_pipeline"))
assert len(storage.get_runs()) == 1
storage.delete_run(run_id)
assert list(storage.get_runs()) == []
def test_delete_with_tags(self, storage):
if not self.can_delete_runs():
pytest.skip("storage cannot delete runs")
assert storage
run_id = make_new_run_id()
storage.add_run(
TestRunStorage.build_run(
run_id=run_id,
pipeline_name="some_pipeline",
tags={run_id: run_id},
)
)
assert len(storage.get_runs()) == 1
assert run_id in [key for key, value in storage.get_run_tags()]
storage.delete_run(run_id)
assert list(storage.get_runs()) == []
assert run_id not in [key for key, value in storage.get_run_tags()]
def test_wipe_tags(self, storage):
if not self.can_delete_runs():
pytest.skip("storage cannot delete")
run_id = "some_run_id"
run = DagsterRun(run_id=run_id, pipeline_name="a_pipeline", tags={"foo": "bar"})
storage.add_run(run)
assert storage.get_run_by_id(run_id) == run
assert dict(storage.get_run_tags()) == {"foo": {"bar"}}
storage.wipe()
assert list(storage.get_runs()) == []
assert dict(storage.get_run_tags()) == {}
def test_write_conflicting_run_id(self, storage):
double_run_id = "double_run_id"
pipeline_def = PipelineDefinition(name="some_pipeline", solid_defs=[])
run = DagsterRun(run_id=double_run_id, pipeline_name=pipeline_def.name)
assert storage.add_run(run)
with pytest.raises(DagsterRunAlreadyExists):
storage.add_run(run)
def test_add_get_snapshot(self, storage):
pipeline_def = PipelineDefinition(name="some_pipeline", solid_defs=[])
pipeline_snapshot = pipeline_def.get_pipeline_snapshot()
pipeline_snapshot_id = create_pipeline_snapshot_id(pipeline_snapshot)
assert storage.add_pipeline_snapshot(pipeline_snapshot) == pipeline_snapshot_id
fetched_pipeline_snapshot = storage.get_pipeline_snapshot(pipeline_snapshot_id)
assert fetched_pipeline_snapshot
assert serialize_pp(fetched_pipeline_snapshot) == serialize_pp(pipeline_snapshot)
assert storage.has_pipeline_snapshot(pipeline_snapshot_id)
assert not storage.has_pipeline_snapshot("nope")
if self.can_delete_runs():
storage.wipe()
assert not storage.has_pipeline_snapshot(pipeline_snapshot_id)
def test_single_write_read_with_snapshot(self, storage):
run_with_snapshot_id = "lkasjdflkjasdf"
pipeline_def = PipelineDefinition(name="some_pipeline", solid_defs=[])
pipeline_snapshot = pipeline_def.get_pipeline_snapshot()
pipeline_snapshot_id = create_pipeline_snapshot_id(pipeline_snapshot)
run_with_snapshot = DagsterRun(
run_id=run_with_snapshot_id,
pipeline_name=pipeline_def.name,
pipeline_snapshot_id=pipeline_snapshot_id,
)
assert not storage.has_pipeline_snapshot(pipeline_snapshot_id)
assert storage.add_pipeline_snapshot(pipeline_snapshot) == pipeline_snapshot_id
assert serialize_pp(storage.get_pipeline_snapshot(pipeline_snapshot_id)) == serialize_pp(
pipeline_snapshot
)
storage.add_run(run_with_snapshot)
assert storage.get_run_by_id(run_with_snapshot_id) == run_with_snapshot
if self.can_delete_runs():
storage.wipe()
assert not storage.has_pipeline_snapshot(pipeline_snapshot_id)
assert not storage.has_run(run_with_snapshot_id)
def test_single_write_with_missing_snapshot(self, storage):
run_with_snapshot_id = "lkasjdflkjasdf"
pipeline_def = PipelineDefinition(name="some_pipeline", solid_defs=[])
run_with_missing_snapshot = DagsterRun(
run_id=run_with_snapshot_id,
pipeline_name=pipeline_def.name,
pipeline_snapshot_id="nope",
)
with pytest.raises(DagsterSnapshotDoesNotExist):
storage.add_run(run_with_missing_snapshot)
def test_add_get_execution_snapshot(self, storage):
from dagster.core.execution.api import create_execution_plan
from dagster.core.snap import snapshot_from_execution_plan
pipeline_def = PipelineDefinition(name="some_pipeline", solid_defs=[])
execution_plan = create_execution_plan(pipeline_def)
ep_snapshot = snapshot_from_execution_plan(
execution_plan, pipeline_def.get_pipeline_snapshot_id()
)
snapshot_id = storage.add_execution_plan_snapshot(ep_snapshot)
fetched_ep_snapshot = storage.get_execution_plan_snapshot(snapshot_id)
assert fetched_ep_snapshot
assert serialize_pp(fetched_ep_snapshot) == serialize_pp(ep_snapshot)
assert storage.has_execution_plan_snapshot(snapshot_id)
assert not storage.has_execution_plan_snapshot("nope")
if self.can_delete_runs():
storage.wipe()
assert not storage.has_execution_plan_snapshot(snapshot_id)
def test_fetch_run_filter(self, storage):
assert storage
one = make_new_run_id()
two = make_new_run_id()
storage.add_run(
TestRunStorage.build_run(
run_id=one,
pipeline_name="some_pipeline",
status=PipelineRunStatus.SUCCESS,
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=two,
pipeline_name="some_pipeline",
status=PipelineRunStatus.SUCCESS,
),
)
assert len(storage.get_runs()) == 2
some_runs = storage.get_runs(PipelineRunsFilter(run_ids=[one, two]))
count = storage.get_runs_count(PipelineRunsFilter(run_ids=[one, two]))
assert len(some_runs) == 2
assert count == 2
def test_fetch_run_group(self, storage):
assert storage
root_run = TestRunStorage.build_run(run_id=make_new_run_id(), pipeline_name="foo_pipeline")
runs = [root_run]
# Create 3 children and 3 descendants of the rightmost child:
# root
# / | \
# [0] [1] [2]
# |
# [a]
# |
# [b]
# |
# [c]
for _ in range(3):
runs.append(
TestRunStorage.build_run(
run_id=make_new_run_id(),
pipeline_name="foo_pipeline",
root_run_id=root_run.run_id,
parent_run_id=root_run.run_id,
tags={PARENT_RUN_ID_TAG: root_run.run_id, ROOT_RUN_ID_TAG: root_run.run_id},
)
)
for _ in range(3):
# get root run id from the previous run if exists, otherwise use previous run's id
root_run_id = runs[-1].root_run_id if runs[-1].root_run_id else runs[-1].run_id
parent_run_id = runs[-1].run_id
runs.append(
TestRunStorage.build_run(
run_id=make_new_run_id(),
pipeline_name="foo_pipeline",
root_run_id=root_run_id,
parent_run_id=parent_run_id,
tags={PARENT_RUN_ID_TAG: parent_run_id, ROOT_RUN_ID_TAG: root_run_id},
)
)
for run in runs:
storage.add_run(run)
run_group_one = storage.get_run_group(root_run.run_id)
assert len(run_group_one[1]) == 7
run_group_two = storage.get_run_group(runs[-1].run_id)
assert len(run_group_two[1]) == 7
assert run_group_one[0] == run_group_two[0]
assert run_group_one[1] == run_group_two[1]
def test_fetch_run_group_not_found(self, storage):
assert storage
run = TestRunStorage.build_run(run_id=make_new_run_id(), pipeline_name="foo_pipeline")
storage.add_run(run)
with pytest.raises(DagsterRunNotFoundError):
storage.get_run_group(make_new_run_id())
def test_fetch_run_groups(self, storage):
assert storage
root_runs = [
TestRunStorage.build_run(run_id=make_new_run_id(), pipeline_name="foo_pipeline")
for i in range(3)
]
runs = [run for run in root_runs]
for _ in range(5):
for root_run in root_runs:
runs.append(
TestRunStorage.build_run(
run_id=make_new_run_id(),
pipeline_name="foo_pipeline",
tags={PARENT_RUN_ID_TAG: root_run.run_id, ROOT_RUN_ID_TAG: root_run.run_id},
)
)
for run in runs:
storage.add_run(run)
run_groups = storage.get_run_groups(limit=5)
assert len(run_groups) == 3
expected_group_lens = {
root_runs[i].run_id: expected_len for i, expected_len in enumerate([2, 3, 3])
}
for root_run_id in run_groups:
assert len(run_groups[root_run_id]["runs"]) == expected_group_lens[root_run_id]
assert run_groups[root_run_id]["count"] == 6
def test_fetch_run_groups_filter(self, storage):
assert storage
root_runs = [
TestRunStorage.build_run(run_id=make_new_run_id(), pipeline_name="foo_pipeline")
for i in range(3)
]
runs = [run for run in root_runs]
for root_run in root_runs:
failed_run_id = make_new_run_id()
runs.append(
TestRunStorage.build_run(
run_id=failed_run_id,
pipeline_name="foo_pipeline",
tags={PARENT_RUN_ID_TAG: root_run.run_id, ROOT_RUN_ID_TAG: root_run.run_id},
status=PipelineRunStatus.FAILURE,
)
)
for _ in range(3):
runs.append(
TestRunStorage.build_run(
run_id=make_new_run_id(),
pipeline_name="foo_pipeline",
tags={PARENT_RUN_ID_TAG: failed_run_id, ROOT_RUN_ID_TAG: root_run.run_id},
)
)
for run in runs:
storage.add_run(run)
run_groups = storage.get_run_groups(
limit=5, filters=PipelineRunsFilter(statuses=[PipelineRunStatus.FAILURE])
)
assert len(run_groups) == 3
for root_run_id in run_groups:
assert len(run_groups[root_run_id]["runs"]) == 2
assert run_groups[root_run_id]["count"] == 5
def test_fetch_run_groups_ordering(self, storage):
assert storage
first_root_run = TestRunStorage.build_run(
run_id=make_new_run_id(), pipeline_name="foo_pipeline"
)
storage.add_run(first_root_run)
second_root_run = TestRunStorage.build_run(
run_id=make_new_run_id(), pipeline_name="foo_pipeline"
)
storage.add_run(second_root_run)
second_root_run_child = TestRunStorage.build_run(
run_id=make_new_run_id(),
pipeline_name="foo_pipeline",
tags={
PARENT_RUN_ID_TAG: second_root_run.run_id,
ROOT_RUN_ID_TAG: second_root_run.run_id,
},
)
storage.add_run(second_root_run_child)
first_root_run_child = TestRunStorage.build_run(
run_id=make_new_run_id(),
pipeline_name="foo_pipeline",
tags={
PARENT_RUN_ID_TAG: first_root_run.run_id,
ROOT_RUN_ID_TAG: first_root_run.run_id,
},
)
storage.add_run(first_root_run_child)
run_groups = storage.get_run_groups(limit=1)
assert first_root_run.run_id in run_groups
assert second_root_run.run_id not in run_groups
def _skip_in_memory(self, storage):
from dagster.core.storage.runs import InMemoryRunStorage
if isinstance(storage, InMemoryRunStorage):
pytest.skip()
def test_empty_heartbeat(self, storage):
self._skip_in_memory(storage)
assert storage.get_daemon_heartbeats() == {}
def test_add_heartbeat(self, storage):
self._skip_in_memory(storage)
# test insert
added_heartbeat = DaemonHeartbeat(
timestamp=pendulum.from_timestamp(1000).float_timestamp,
daemon_type=SensorDaemon.daemon_type(),
daemon_id=None,
errors=[],
)
storage.add_daemon_heartbeat(added_heartbeat)
assert len(storage.get_daemon_heartbeats()) == 1
stored_heartbeat = storage.get_daemon_heartbeats()[SensorDaemon.daemon_type()]
assert stored_heartbeat == added_heartbeat
# test update
second_added_heartbeat = DaemonHeartbeat(
timestamp=pendulum.from_timestamp(2000).float_timestamp,
daemon_type=SensorDaemon.daemon_type(),
daemon_id=None,
errors=[],
)
storage.add_daemon_heartbeat(second_added_heartbeat)
assert len(storage.get_daemon_heartbeats()) == 1
stored_heartbeat = storage.get_daemon_heartbeats()[SensorDaemon.daemon_type()]
assert stored_heartbeat == second_added_heartbeat
def test_wipe_heartbeats(self, storage):
self._skip_in_memory(storage)
if not self.can_delete_runs():
pytest.skip("storage cannot delete")
added_heartbeat = DaemonHeartbeat(
timestamp=pendulum.from_timestamp(1000).float_timestamp,
daemon_type=SensorDaemon.daemon_type(),
daemon_id=None,
errors=[],
)
storage.add_daemon_heartbeat(added_heartbeat)
storage.wipe_daemon_heartbeats()
def test_backfill(self, storage):
origin = self.fake_partition_set_origin("fake_partition_set")
backfills = storage.get_backfills()
assert len(backfills) == 0
one = PartitionBackfill(
"one",
origin,
BulkActionStatus.REQUESTED,
["a", "b", "c"],
False,
None,
None,
pendulum.now().timestamp(),
)
storage.add_backfill(one)
assert len(storage.get_backfills()) == 1
assert len(storage.get_backfills(status=BulkActionStatus.REQUESTED)) == 1
backfill = storage.get_backfill(one.backfill_id)
assert backfill == one
storage.update_backfill(one.with_status(status=BulkActionStatus.COMPLETED))
assert len(storage.get_backfills()) == 1
assert len(storage.get_backfills(status=BulkActionStatus.REQUESTED)) == 0
def test_secondary_index(self, storage):
if not isinstance(storage, SqlRunStorage):
return
for name in REQUIRED_DATA_MIGRATIONS.keys():
assert storage.has_built_index(name)
def test_handle_run_event_pipeline_success_test(self, storage):
run_id = make_new_run_id()
run_to_add = TestRunStorage.build_run(pipeline_name="pipeline_name", run_id=run_id)
storage.add_run(run_to_add)
dagster_pipeline_start_event = DagsterEvent(
message="a message",
event_type_value=DagsterEventType.PIPELINE_START.value,
pipeline_name="pipeline_name",
step_key=None,
solid_handle=None,
step_kind_value=None,
logging_tags=None,
)
storage.handle_run_event(run_id, dagster_pipeline_start_event)
assert storage.get_run_by_id(run_id).status == PipelineRunStatus.STARTED
storage.handle_run_event(
make_new_run_id(), # diff run
DagsterEvent(
message="a message",
event_type_value=DagsterEventType.PIPELINE_SUCCESS.value,
pipeline_name="pipeline_name",
step_key=None,
solid_handle=None,
step_kind_value=None,
logging_tags=None,
),
)
assert storage.get_run_by_id(run_id).status == PipelineRunStatus.STARTED
storage.handle_run_event(
run_id, # correct run
DagsterEvent(
message="a message",
event_type_value=DagsterEventType.PIPELINE_SUCCESS.value,
pipeline_name="pipeline_name",
step_key=None,
solid_handle=None,
step_kind_value=None,
logging_tags=None,
),
)
assert storage.get_run_by_id(run_id).status == PipelineRunStatus.SUCCESS
def test_debug_snapshot_import(self, storage):
from dagster.core.execution.api import create_execution_plan
from dagster.core.snap import (
snapshot_from_execution_plan,
create_execution_plan_snapshot_id,
)
run_id = make_new_run_id()
run_to_add = TestRunStorage.build_run(pipeline_name="pipeline_name", run_id=run_id)
storage.add_run(run_to_add)
pipeline_def = PipelineDefinition(name="some_pipeline", solid_defs=[])
pipeline_snapshot = pipeline_def.get_pipeline_snapshot()
pipeline_snapshot_id = create_pipeline_snapshot_id(pipeline_snapshot)
new_pipeline_snapshot_id = f"{pipeline_snapshot_id}-new-snapshot"
storage.add_snapshot(pipeline_snapshot, snapshot_id=new_pipeline_snapshot_id)
assert not storage.has_snapshot(pipeline_snapshot_id)
assert storage.has_snapshot(new_pipeline_snapshot_id)
execution_plan = create_execution_plan(pipeline_def)
ep_snapshot = snapshot_from_execution_plan(execution_plan, new_pipeline_snapshot_id)
ep_snapshot_id = create_execution_plan_snapshot_id(ep_snapshot)
new_ep_snapshot_id = f"{ep_snapshot_id}-new-snapshot"
storage.add_snapshot(ep_snapshot, snapshot_id=new_ep_snapshot_id)
assert not storage.has_snapshot(ep_snapshot_id)
assert storage.has_snapshot(new_ep_snapshot_id)
def test_run_record_stats(self, storage):
assert storage
self._skip_in_memory(storage)
run_id = make_new_run_id()
run_to_add = TestRunStorage.build_run(pipeline_name="pipeline_name", run_id=run_id)
storage.add_run(run_to_add)
run_record = storage.get_run_records(PipelineRunsFilter(run_ids=[run_id]))[0]
assert run_record.start_time is None
assert run_record.end_time is None
storage.handle_run_event(
run_id,
DagsterEvent(
message="a message",
event_type_value=DagsterEventType.PIPELINE_START.value,
pipeline_name="pipeline_name",
),
)
run_record = storage.get_run_records(PipelineRunsFilter(run_ids=[run_id]))[0]
assert run_record.start_time is not None
assert run_record.end_time is None
storage.handle_run_event(
run_id,
DagsterEvent(
message="a message",
event_type_value=DagsterEventType.PIPELINE_SUCCESS.value,
pipeline_name="pipeline_name",
),
)
run_record = storage.get_run_records(PipelineRunsFilter(run_ids=[run_id]))[0]
assert run_record.start_time is not None
assert run_record.end_time is not None
assert run_record.end_time >= run_record.start_time
@pytest.mark.skipif(win_py36, reason="Sqlite rank queries not working on windows py36")
def test_by_job(self, storage):
def _add_run(job_name, tags=None):
return storage.add_run(
TestRunStorage.build_run(
pipeline_name=job_name, run_id=make_new_run_id(), tags=tags
)
)
_a_one = _add_run("a_pipeline", tags={"a": "A"})
a_two = _add_run("a_pipeline", tags={"a": "A"})
_b_one = _add_run("b_pipeline", tags={"a": "A"})
b_two = _add_run("b_pipeline", tags={"a": "A"})
c_one = _add_run("c_pipeline", tags={"a": "A"})
c_two = _add_run("c_pipeline", tags={"a": "B"})
runs_by_job = {
run.pipeline_name: run
for run in storage.get_runs(
bucket_by=JobBucket(
job_names=["a_pipeline", "b_pipeline", "c_pipeline"], bucket_limit=1
)
)
}
assert set(runs_by_job.keys()) == {"a_pipeline", "b_pipeline", "c_pipeline"}
assert runs_by_job.get("a_pipeline").run_id == a_two.run_id
assert runs_by_job.get("b_pipeline").run_id == b_two.run_id
assert runs_by_job.get("c_pipeline").run_id == c_two.run_id
# fetch with a runs filter applied
runs_by_job = {
run.pipeline_name: run
for run in storage.get_runs(
filters=PipelineRunsFilter(tags={"a": "A"}),
bucket_by=JobBucket(
job_names=["a_pipeline", "b_pipeline", "c_pipeline"], bucket_limit=1
),
)
}
assert set(runs_by_job.keys()) == {"a_pipeline", "b_pipeline", "c_pipeline"}
assert runs_by_job.get("a_pipeline").run_id == a_two.run_id
assert runs_by_job.get("b_pipeline").run_id == b_two.run_id
assert runs_by_job.get("c_pipeline").run_id == c_one.run_id
@pytest.mark.skipif(win_py36, reason="Sqlite rank queries not working on windows py36")
def test_by_tag(self, storage):
def _add_run(job_name, tags=None):
return storage.add_run(
TestRunStorage.build_run(
pipeline_name=job_name, run_id=make_new_run_id(), tags=tags
)
)
_one = _add_run("a", tags={"a": "1"})
_two = _add_run("a", tags={"a": "2"})
three = _add_run("a", tags={"a": "3"})
_none = _add_run("a")
b = _add_run("b", tags={"a": "4"})
one = _add_run("a", tags={"a": "1"})
two = _add_run("a", tags={"a": "2"})
runs_by_tag = {
run.tags.get("a"): run
for run in storage.get_runs(
bucket_by=TagBucket(tag_key="a", tag_values=["1", "2", "3", "4"], bucket_limit=1)
)
}
assert set(runs_by_tag.keys()) == {"1", "2", "3", "4"}
assert runs_by_tag.get("1").run_id == one.run_id
assert runs_by_tag.get("2").run_id == two.run_id
assert runs_by_tag.get("3").run_id == three.run_id
assert runs_by_tag.get("4").run_id == b.run_id
runs_by_tag = {
run.tags.get("a"): run
for run in storage.get_runs(
filters=PipelineRunsFilter(pipeline_name="a"),
bucket_by=TagBucket(tag_key="a", tag_values=["1", "2", "3", "4"], bucket_limit=1),
)
}
assert set(runs_by_tag.keys()) == {"1", "2", "3"}
assert runs_by_tag.get("1").run_id == one.run_id
assert runs_by_tag.get("2").run_id == two.run_id
assert runs_by_tag.get("3").run_id == three.run_id
def test_run_record_timestamps(self, storage):
assert storage
self._skip_in_memory(storage)
@op
def a():
pass
@job
def my_job():
a()
with tempfile.TemporaryDirectory() as temp_dir:
if storage._instance: # pylint: disable=protected-access
instance = storage._instance # pylint: disable=protected-access
else:
instance = DagsterInstance(
instance_type=InstanceType.EPHEMERAL,
local_artifact_storage=LocalArtifactStorage(temp_dir),
run_storage=storage,
event_storage=InMemoryEventLogStorage(),
compute_log_manager=NoOpComputeLogManager(),
run_coordinator=DefaultRunCoordinator(),
run_launcher=SyncInMemoryRunLauncher(),
)
freeze_datetime = to_timezone(
create_pendulum_time(2019, 11, 2, 0, 0, 0, tz="US/Central"), "US/Pacific"
)
with pendulum.test(freeze_datetime):
result = my_job.execute_in_process(instance=instance)
records = instance.get_run_records(
filters=PipelineRunsFilter(run_ids=[result.run_id])
)
assert len(records) == 1
record = records[0]
assert record.start_time == freeze_datetime.timestamp()
assert record.end_time == freeze_datetime.timestamp()
| 36.209985 | 100 | 0.611278 | import sys
import tempfile
from datetime import datetime
import pendulum
import pytest
from dagster import job, op, seven
from dagster.core.definitions import PipelineDefinition
from dagster.core.errors import (
DagsterRunAlreadyExists,
DagsterRunNotFoundError,
DagsterSnapshotDoesNotExist,
)
from dagster.core.events import DagsterEvent, DagsterEventType
from dagster.core.execution.backfill import BulkActionStatus, PartitionBackfill
from dagster.core.host_representation import (
ExternalRepositoryOrigin,
ManagedGrpcPythonEnvRepositoryLocationOrigin,
)
from dagster.core.instance import DagsterInstance, InstanceType
from dagster.core.launcher.sync_in_memory_run_launcher import SyncInMemoryRunLauncher
from dagster.core.run_coordinator import DefaultRunCoordinator
from dagster.core.snap import create_pipeline_snapshot_id
from dagster.core.storage.event_log import InMemoryEventLogStorage
from dagster.core.storage.noop_compute_log_manager import NoOpComputeLogManager
from dagster.core.storage.pipeline_run import (
DagsterRun,
JobBucket,
PipelineRunStatus,
PipelineRunsFilter,
TagBucket,
)
from dagster.core.storage.root import LocalArtifactStorage
from dagster.core.storage.runs.migration import REQUIRED_DATA_MIGRATIONS
from dagster.core.storage.runs.sql_run_storage import SqlRunStorage
from dagster.core.storage.tags import PARENT_RUN_ID_TAG, ROOT_RUN_ID_TAG
from dagster.core.types.loadable_target_origin import LoadableTargetOrigin
from dagster.core.utils import make_new_run_id
from dagster.daemon.daemon import SensorDaemon
from dagster.daemon.types import DaemonHeartbeat
from dagster.serdes import serialize_pp
from dagster.seven.compat.pendulum import create_pendulum_time, to_timezone
win_py36 = seven.IS_WINDOWS and sys.version_info[0] == 3 and sys.version_info[1] == 6
class TestRunStorage:
__test__ = False
@pytest.fixture(name="storage", params=[])
def run_storage(self, request):
with request.param() as s:
yield s
def can_delete_runs(self):
return True
@staticmethod
def fake_repo_target():
return ExternalRepositoryOrigin(
ManagedGrpcPythonEnvRepositoryLocationOrigin(
LoadableTargetOrigin(
executable_path=sys.executable, module_name="fake", attribute="fake"
),
),
"fake_repo_name",
)
@classmethod
def fake_partition_set_origin(cls, partition_set_name):
return cls.fake_repo_target().get_partition_set_origin(partition_set_name)
@staticmethod
def build_run(
run_id,
pipeline_name,
mode="default",
tags=None,
status=PipelineRunStatus.NOT_STARTED,
parent_run_id=None,
root_run_id=None,
pipeline_snapshot_id=None,
):
return DagsterRun(
pipeline_name=pipeline_name,
run_id=run_id,
run_config=None,
mode=mode,
tags=tags,
status=status,
root_run_id=root_run_id,
parent_run_id=parent_run_id,
pipeline_snapshot_id=pipeline_snapshot_id,
)
def test_basic_storage(self, storage):
assert storage
run_id = make_new_run_id()
added = storage.add_run(
TestRunStorage.build_run(
run_id=run_id, pipeline_name="some_pipeline", tags={"foo": "bar"}
)
)
assert added
runs = storage.get_runs()
assert len(runs) == 1
run = runs[0]
assert run.run_id == run_id
assert run.pipeline_name == "some_pipeline"
assert run.tags
assert run.tags.get("foo") == "bar"
assert storage.has_run(run_id)
fetched_run = storage.get_run_by_id(run_id)
assert fetched_run.run_id == run_id
assert fetched_run.pipeline_name == "some_pipeline"
def test_clear(self, storage):
if not self.can_delete_runs():
pytest.skip("storage cannot delete")
assert storage
run_id = make_new_run_id()
storage.add_run(TestRunStorage.build_run(run_id=run_id, pipeline_name="some_pipeline"))
assert len(storage.get_runs()) == 1
storage.wipe()
assert list(storage.get_runs()) == []
def test_storage_telemetry(self, storage):
assert storage
storage_id = storage.get_run_storage_id()
assert isinstance(storage_id, str)
storage_id_again = storage.get_run_storage_id()
assert storage_id == storage_id_again
def test_fetch_by_pipeline(self, storage):
assert storage
one = make_new_run_id()
two = make_new_run_id()
storage.add_run(TestRunStorage.build_run(run_id=one, pipeline_name="some_pipeline"))
storage.add_run(TestRunStorage.build_run(run_id=two, pipeline_name="some_other_pipeline"))
assert len(storage.get_runs()) == 2
some_runs = storage.get_runs(PipelineRunsFilter(pipeline_name="some_pipeline"))
assert len(some_runs) == 1
assert some_runs[0].run_id == one
def test_fetch_by_snapshot_id(self, storage):
assert storage
pipeline_def_a = PipelineDefinition(name="some_pipeline", solid_defs=[])
pipeline_def_b = PipelineDefinition(name="some_other_pipeline", solid_defs=[])
pipeline_snapshot_a = pipeline_def_a.get_pipeline_snapshot()
pipeline_snapshot_b = pipeline_def_b.get_pipeline_snapshot()
pipeline_snapshot_a_id = create_pipeline_snapshot_id(pipeline_snapshot_a)
pipeline_snapshot_b_id = create_pipeline_snapshot_id(pipeline_snapshot_b)
assert storage.add_pipeline_snapshot(pipeline_snapshot_a) == pipeline_snapshot_a_id
assert storage.add_pipeline_snapshot(pipeline_snapshot_b) == pipeline_snapshot_b_id
one = make_new_run_id()
two = make_new_run_id()
storage.add_run(
TestRunStorage.build_run(
run_id=one,
pipeline_name="some_pipeline",
pipeline_snapshot_id=pipeline_snapshot_a_id,
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=two,
pipeline_name="some_other_pipeline",
pipeline_snapshot_id=pipeline_snapshot_b_id,
)
)
assert len(storage.get_runs()) == 2
runs_a = storage.get_runs(PipelineRunsFilter(snapshot_id=pipeline_snapshot_a_id))
assert len(runs_a) == 1
assert runs_a[0].run_id == one
runs_b = storage.get_runs(PipelineRunsFilter(snapshot_id=pipeline_snapshot_b_id))
assert len(runs_b) == 1
assert runs_b[0].run_id == two
def test_add_run_tags(self, storage):
assert storage
one = make_new_run_id()
two = make_new_run_id()
storage.add_run(TestRunStorage.build_run(run_id=one, pipeline_name="foo"))
storage.add_run(TestRunStorage.build_run(run_id=two, pipeline_name="bar"))
assert storage.get_run_tags() == []
storage.add_run_tags(one, {"tag1": "val1", "tag2": "val2"})
storage.add_run_tags(two, {"tag1": "val1"})
assert storage.get_run_tags() == [("tag1", {"val1"}), ("tag2", {"val2"})]
storage.add_run_tags(one, {"tag1": "val2", "tag3": "val3"})
test_run = storage.get_run_by_id(one)
assert len(test_run.tags) == 3
assert test_run.tags["tag1"] == "val2"
assert test_run.tags["tag2"] == "val2"
assert test_run.tags["tag3"] == "val3"
assert storage.get_run_tags() == [
("tag1", {"val1", "val2"}),
("tag2", {"val2"}),
("tag3", {"val3"}),
]
storage.add_run_tags(one, {"tag1": "val3"})
test_run = storage.get_run_by_id(one)
assert len(test_run.tags) == 3
assert test_run.tags["tag1"] == "val3"
assert test_run.tags["tag2"] == "val2"
assert test_run.tags["tag3"] == "val3"
assert storage.get_run_tags() == [
("tag1", {"val1", "val3"}),
("tag2", {"val2"}),
("tag3", {"val3"}),
]
storage.add_run_tags(one, {"tag4": "val4"})
test_run = storage.get_run_by_id(one)
assert len(test_run.tags) == 4
assert test_run.tags["tag1"] == "val3"
assert test_run.tags["tag2"] == "val2"
assert test_run.tags["tag3"] == "val3"
assert test_run.tags["tag4"] == "val4"
assert storage.get_run_tags() == [
("tag1", {"val1", "val3"}),
("tag2", {"val2"}),
("tag3", {"val3"}),
("tag4", {"val4"}),
]
test_run = storage.get_run_by_id(one)
assert len(test_run.tags) == 4
assert test_run.tags["tag1"] == "val3"
assert test_run.tags["tag2"] == "val2"
assert test_run.tags["tag3"] == "val3"
assert test_run.tags["tag4"] == "val4"
some_runs = storage.get_runs(PipelineRunsFilter(tags={"tag3": "val3"}))
assert len(some_runs) == 1
assert some_runs[0].run_id == one
runs_with_old_tag = storage.get_runs(PipelineRunsFilter(tags={"tag1": "val1"}))
assert len(runs_with_old_tag) == 1
assert runs_with_old_tag[0].tags == {"tag1": "val1"}
runs_with_new_tag = storage.get_runs(PipelineRunsFilter(tags={"tag1": "val3"}))
assert len(runs_with_new_tag) == 1
assert runs_with_new_tag[0].tags == {
"tag1": "val3",
"tag2": "val2",
"tag3": "val3",
"tag4": "val4",
}
def test_fetch_by_filter(self, storage):
assert storage
one = make_new_run_id()
two = make_new_run_id()
three = make_new_run_id()
storage.add_run(
TestRunStorage.build_run(
run_id=one,
pipeline_name="some_pipeline",
tags={"tag": "hello", "tag2": "world"},
status=PipelineRunStatus.SUCCESS,
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=two,
pipeline_name="some_pipeline",
tags={"tag": "hello"},
status=PipelineRunStatus.FAILURE,
),
)
storage.add_run(
TestRunStorage.build_run(
run_id=three, pipeline_name="other_pipeline", status=PipelineRunStatus.SUCCESS
)
)
assert len(storage.get_runs()) == 3
some_runs = storage.get_runs(PipelineRunsFilter(run_ids=[one]))
count = storage.get_runs_count(PipelineRunsFilter(run_ids=[one]))
assert len(some_runs) == 1
assert count == 1
assert some_runs[0].run_id == one
some_runs = storage.get_runs(PipelineRunsFilter(pipeline_name="some_pipeline"))
count = storage.get_runs_count(PipelineRunsFilter(pipeline_name="some_pipeline"))
assert len(some_runs) == 2
assert count == 2
assert some_runs[0].run_id == two
assert some_runs[1].run_id == one
some_runs = storage.get_runs(PipelineRunsFilter(statuses=[PipelineRunStatus.SUCCESS]))
count = storage.get_runs_count(PipelineRunsFilter(statuses=[PipelineRunStatus.SUCCESS]))
assert len(some_runs) == 2
assert count == 2
assert some_runs[0].run_id == three
assert some_runs[1].run_id == one
some_runs = storage.get_runs(PipelineRunsFilter(tags={"tag": "hello"}))
count = storage.get_runs_count(PipelineRunsFilter(tags={"tag": "hello"}))
assert len(some_runs) == 2
assert count == 2
assert some_runs[0].run_id == two
assert some_runs[1].run_id == one
some_runs = storage.get_runs(PipelineRunsFilter(tags={"tag": "hello", "tag2": "world"}))
count = storage.get_runs_count(PipelineRunsFilter(tags={"tag": "hello", "tag2": "world"}))
assert len(some_runs) == 1
assert count == 1
assert some_runs[0].run_id == one
some_runs = storage.get_runs(
PipelineRunsFilter(pipeline_name="some_pipeline", tags={"tag": "hello"})
)
count = storage.get_runs_count(
PipelineRunsFilter(pipeline_name="some_pipeline", tags={"tag": "hello"})
)
assert len(some_runs) == 2
assert count == 2
assert some_runs[0].run_id == two
assert some_runs[1].run_id == one
some_runs = storage.get_runs(
PipelineRunsFilter(
pipeline_name="some_pipeline",
tags={"tag": "hello"},
statuses=[PipelineRunStatus.SUCCESS],
)
)
count = storage.get_runs_count(
PipelineRunsFilter(
pipeline_name="some_pipeline",
tags={"tag": "hello"},
statuses=[PipelineRunStatus.SUCCESS],
)
)
assert len(some_runs) == 1
assert count == 1
assert some_runs[0].run_id == one
# All filters
some_runs = storage.get_runs(
PipelineRunsFilter(
run_ids=[one],
pipeline_name="some_pipeline",
tags={"tag": "hello"},
statuses=[PipelineRunStatus.SUCCESS],
)
)
count = storage.get_runs_count(
PipelineRunsFilter(
run_ids=[one],
pipeline_name="some_pipeline",
tags={"tag": "hello"},
statuses=[PipelineRunStatus.SUCCESS],
)
)
assert len(some_runs) == 1
assert count == 1
assert some_runs[0].run_id == one
some_runs = storage.get_runs(PipelineRunsFilter())
count = storage.get_runs_count(PipelineRunsFilter())
assert len(some_runs) == 3
assert count == 3
def test_fetch_count_by_tag(self, storage):
assert storage
one = make_new_run_id()
two = make_new_run_id()
three = make_new_run_id()
storage.add_run(
TestRunStorage.build_run(
run_id=one,
pipeline_name="some_pipeline",
tags={"mytag": "hello", "mytag2": "world"},
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=two,
pipeline_name="some_pipeline",
tags={"mytag": "goodbye", "mytag2": "world"},
)
)
storage.add_run(TestRunStorage.build_run(run_id=three, pipeline_name="some_pipeline"))
assert len(storage.get_runs()) == 3
run_count = storage.get_runs_count(
filters=PipelineRunsFilter(tags={"mytag": "hello", "mytag2": "world"})
)
assert run_count == 1
run_count = storage.get_runs_count(filters=PipelineRunsFilter(tags={"mytag2": "world"}))
assert run_count == 2
run_count = storage.get_runs_count()
assert run_count == 3
assert storage.get_run_tags() == [("mytag", {"hello", "goodbye"}), ("mytag2", {"world"})]
def test_fetch_by_tags(self, storage):
assert storage
one = make_new_run_id()
two = make_new_run_id()
three = make_new_run_id()
storage.add_run(
TestRunStorage.build_run(
run_id=one,
pipeline_name="some_pipeline",
tags={"mytag": "hello", "mytag2": "world"},
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=two,
pipeline_name="some_pipeline",
tags={"mytag": "goodbye", "mytag2": "world"},
)
)
storage.add_run(TestRunStorage.build_run(run_id=three, pipeline_name="some_pipeline"))
assert len(storage.get_runs()) == 3
some_runs = storage.get_runs(PipelineRunsFilter(tags={"mytag": "hello", "mytag2": "world"}))
assert len(some_runs) == 1
assert some_runs[0].run_id == one
some_runs = storage.get_runs(PipelineRunsFilter(tags={"mytag2": "world"}))
assert len(some_runs) == 2
assert some_runs[0].run_id == two
assert some_runs[1].run_id == one
some_runs = storage.get_runs(PipelineRunsFilter(tags={}))
assert len(some_runs) == 3
def test_paginated_fetch(self, storage):
assert storage
one, two, three = [make_new_run_id(), make_new_run_id(), make_new_run_id()]
storage.add_run(
TestRunStorage.build_run(
run_id=one, pipeline_name="some_pipeline", tags={"mytag": "hello"}
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=two, pipeline_name="some_pipeline", tags={"mytag": "hello"}
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=three, pipeline_name="some_pipeline", tags={"mytag": "hello"}
)
)
all_runs = storage.get_runs()
assert len(all_runs) == 3
sliced_runs = storage.get_runs(cursor=three, limit=1)
assert len(sliced_runs) == 1
assert sliced_runs[0].run_id == two
all_runs = storage.get_runs(PipelineRunsFilter(pipeline_name="some_pipeline"))
assert len(all_runs) == 3
sliced_runs = storage.get_runs(
PipelineRunsFilter(pipeline_name="some_pipeline"), cursor=three, limit=1
)
assert len(sliced_runs) == 1
assert sliced_runs[0].run_id == two
all_runs = storage.get_runs(PipelineRunsFilter(tags={"mytag": "hello"}))
assert len(all_runs) == 3
sliced_runs = storage.get_runs(
PipelineRunsFilter(tags={"mytag": "hello"}), cursor=three, limit=1
)
assert len(sliced_runs) == 1
assert sliced_runs[0].run_id == two
def test_fetch_by_status(self, storage):
assert storage
one = make_new_run_id()
two = make_new_run_id()
three = make_new_run_id()
four = make_new_run_id()
storage.add_run(
TestRunStorage.build_run(
run_id=one, pipeline_name="some_pipeline", status=PipelineRunStatus.NOT_STARTED
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=two, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=three, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=four, pipeline_name="some_pipeline", status=PipelineRunStatus.FAILURE
)
)
assert {
run.run_id
for run in storage.get_runs(
PipelineRunsFilter(statuses=[PipelineRunStatus.NOT_STARTED])
)
} == {one}
assert {
run.run_id
for run in storage.get_runs(PipelineRunsFilter(statuses=[PipelineRunStatus.STARTED]))
} == {
two,
three,
}
assert {
run.run_id
for run in storage.get_runs(PipelineRunsFilter(statuses=[PipelineRunStatus.FAILURE]))
} == {four}
assert {
run.run_id
for run in storage.get_runs(PipelineRunsFilter(statuses=[PipelineRunStatus.SUCCESS]))
} == set()
def test_fetch_records_by_update_timestamp(self, storage):
assert storage
self._skip_in_memory(storage)
one = make_new_run_id()
two = make_new_run_id()
three = make_new_run_id()
storage.add_run(
TestRunStorage.build_run(
run_id=one, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=two, pipeline_name="some_pipeline", status=PipelineRunStatus.FAILURE
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=three, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED
)
)
storage.handle_run_event(
three, # three succeeds
DagsterEvent(
message="a message",
event_type_value=DagsterEventType.PIPELINE_SUCCESS.value,
pipeline_name="some_pipeline",
),
)
storage.handle_run_event(
one, # fail one after two has fails and three has succeeded
DagsterEvent(
message="a message",
event_type_value=DagsterEventType.PIPELINE_FAILURE.value,
pipeline_name="some_pipeline",
),
)
record_two = storage.get_run_records(
filters=PipelineRunsFilter(run_ids=[two], updated_after=datetime(2020, 1, 1))
)[0]
run_two_update_timestamp = record_two.update_timestamp
assert [
record.pipeline_run.run_id
for record in storage.get_run_records(
filters=PipelineRunsFilter(updated_after=run_two_update_timestamp),
order_by="update_timestamp",
ascending=True,
)
] == [three, one]
assert [
record.pipeline_run.run_id
for record in storage.get_run_records(
filters=PipelineRunsFilter(
statuses=[PipelineRunStatus.FAILURE], updated_after=run_two_update_timestamp
),
)
] == [one]
def test_fetch_by_status_cursored(self, storage):
assert storage
one = make_new_run_id()
two = make_new_run_id()
three = make_new_run_id()
four = make_new_run_id()
storage.add_run(
TestRunStorage.build_run(
run_id=one, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=two, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=three, pipeline_name="some_pipeline", status=PipelineRunStatus.NOT_STARTED
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=four, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED
)
)
cursor_four_runs = storage.get_runs(
PipelineRunsFilter(statuses=[PipelineRunStatus.STARTED]), cursor=four
)
assert len(cursor_four_runs) == 2
assert {run.run_id for run in cursor_four_runs} == {one, two}
cursor_two_runs = storage.get_runs(
PipelineRunsFilter(statuses=[PipelineRunStatus.STARTED]), cursor=two
)
assert len(cursor_two_runs) == 1
assert {run.run_id for run in cursor_two_runs} == {one}
cursor_one_runs = storage.get_runs(
PipelineRunsFilter(statuses=[PipelineRunStatus.STARTED]), cursor=one
)
assert not cursor_one_runs
cursor_four_limit_one = storage.get_runs(
PipelineRunsFilter(statuses=[PipelineRunStatus.STARTED]), cursor=four, limit=1
)
assert len(cursor_four_limit_one) == 1
assert cursor_four_limit_one[0].run_id == two
def test_delete(self, storage):
if not self.can_delete_runs():
pytest.skip("storage cannot delete runs")
assert storage
run_id = make_new_run_id()
storage.add_run(TestRunStorage.build_run(run_id=run_id, pipeline_name="some_pipeline"))
assert len(storage.get_runs()) == 1
storage.delete_run(run_id)
assert list(storage.get_runs()) == []
def test_delete_with_tags(self, storage):
if not self.can_delete_runs():
pytest.skip("storage cannot delete runs")
assert storage
run_id = make_new_run_id()
storage.add_run(
TestRunStorage.build_run(
run_id=run_id,
pipeline_name="some_pipeline",
tags={run_id: run_id},
)
)
assert len(storage.get_runs()) == 1
assert run_id in [key for key, value in storage.get_run_tags()]
storage.delete_run(run_id)
assert list(storage.get_runs()) == []
assert run_id not in [key for key, value in storage.get_run_tags()]
def test_wipe_tags(self, storage):
if not self.can_delete_runs():
pytest.skip("storage cannot delete")
run_id = "some_run_id"
run = DagsterRun(run_id=run_id, pipeline_name="a_pipeline", tags={"foo": "bar"})
storage.add_run(run)
assert storage.get_run_by_id(run_id) == run
assert dict(storage.get_run_tags()) == {"foo": {"bar"}}
storage.wipe()
assert list(storage.get_runs()) == []
assert dict(storage.get_run_tags()) == {}
def test_write_conflicting_run_id(self, storage):
double_run_id = "double_run_id"
pipeline_def = PipelineDefinition(name="some_pipeline", solid_defs=[])
run = DagsterRun(run_id=double_run_id, pipeline_name=pipeline_def.name)
assert storage.add_run(run)
with pytest.raises(DagsterRunAlreadyExists):
storage.add_run(run)
def test_add_get_snapshot(self, storage):
pipeline_def = PipelineDefinition(name="some_pipeline", solid_defs=[])
pipeline_snapshot = pipeline_def.get_pipeline_snapshot()
pipeline_snapshot_id = create_pipeline_snapshot_id(pipeline_snapshot)
assert storage.add_pipeline_snapshot(pipeline_snapshot) == pipeline_snapshot_id
fetched_pipeline_snapshot = storage.get_pipeline_snapshot(pipeline_snapshot_id)
assert fetched_pipeline_snapshot
assert serialize_pp(fetched_pipeline_snapshot) == serialize_pp(pipeline_snapshot)
assert storage.has_pipeline_snapshot(pipeline_snapshot_id)
assert not storage.has_pipeline_snapshot("nope")
if self.can_delete_runs():
storage.wipe()
assert not storage.has_pipeline_snapshot(pipeline_snapshot_id)
def test_single_write_read_with_snapshot(self, storage):
run_with_snapshot_id = "lkasjdflkjasdf"
pipeline_def = PipelineDefinition(name="some_pipeline", solid_defs=[])
pipeline_snapshot = pipeline_def.get_pipeline_snapshot()
pipeline_snapshot_id = create_pipeline_snapshot_id(pipeline_snapshot)
run_with_snapshot = DagsterRun(
run_id=run_with_snapshot_id,
pipeline_name=pipeline_def.name,
pipeline_snapshot_id=pipeline_snapshot_id,
)
assert not storage.has_pipeline_snapshot(pipeline_snapshot_id)
assert storage.add_pipeline_snapshot(pipeline_snapshot) == pipeline_snapshot_id
assert serialize_pp(storage.get_pipeline_snapshot(pipeline_snapshot_id)) == serialize_pp(
pipeline_snapshot
)
storage.add_run(run_with_snapshot)
assert storage.get_run_by_id(run_with_snapshot_id) == run_with_snapshot
if self.can_delete_runs():
storage.wipe()
assert not storage.has_pipeline_snapshot(pipeline_snapshot_id)
assert not storage.has_run(run_with_snapshot_id)
def test_single_write_with_missing_snapshot(self, storage):
run_with_snapshot_id = "lkasjdflkjasdf"
pipeline_def = PipelineDefinition(name="some_pipeline", solid_defs=[])
run_with_missing_snapshot = DagsterRun(
run_id=run_with_snapshot_id,
pipeline_name=pipeline_def.name,
pipeline_snapshot_id="nope",
)
with pytest.raises(DagsterSnapshotDoesNotExist):
storage.add_run(run_with_missing_snapshot)
def test_add_get_execution_snapshot(self, storage):
from dagster.core.execution.api import create_execution_plan
from dagster.core.snap import snapshot_from_execution_plan
pipeline_def = PipelineDefinition(name="some_pipeline", solid_defs=[])
execution_plan = create_execution_plan(pipeline_def)
ep_snapshot = snapshot_from_execution_plan(
execution_plan, pipeline_def.get_pipeline_snapshot_id()
)
snapshot_id = storage.add_execution_plan_snapshot(ep_snapshot)
fetched_ep_snapshot = storage.get_execution_plan_snapshot(snapshot_id)
assert fetched_ep_snapshot
assert serialize_pp(fetched_ep_snapshot) == serialize_pp(ep_snapshot)
assert storage.has_execution_plan_snapshot(snapshot_id)
assert not storage.has_execution_plan_snapshot("nope")
if self.can_delete_runs():
storage.wipe()
assert not storage.has_execution_plan_snapshot(snapshot_id)
def test_fetch_run_filter(self, storage):
assert storage
one = make_new_run_id()
two = make_new_run_id()
storage.add_run(
TestRunStorage.build_run(
run_id=one,
pipeline_name="some_pipeline",
status=PipelineRunStatus.SUCCESS,
)
)
storage.add_run(
TestRunStorage.build_run(
run_id=two,
pipeline_name="some_pipeline",
status=PipelineRunStatus.SUCCESS,
),
)
assert len(storage.get_runs()) == 2
some_runs = storage.get_runs(PipelineRunsFilter(run_ids=[one, two]))
count = storage.get_runs_count(PipelineRunsFilter(run_ids=[one, two]))
assert len(some_runs) == 2
assert count == 2
def test_fetch_run_group(self, storage):
assert storage
root_run = TestRunStorage.build_run(run_id=make_new_run_id(), pipeline_name="foo_pipeline")
runs = [root_run]
# Create 3 children and 3 descendants of the rightmost child:
# root
# / | \
# [0] [1] [2]
# |
# [a]
# |
# [b]
# |
# [c]
for _ in range(3):
runs.append(
TestRunStorage.build_run(
run_id=make_new_run_id(),
pipeline_name="foo_pipeline",
root_run_id=root_run.run_id,
parent_run_id=root_run.run_id,
tags={PARENT_RUN_ID_TAG: root_run.run_id, ROOT_RUN_ID_TAG: root_run.run_id},
)
)
for _ in range(3):
# get root run id from the previous run if exists, otherwise use previous run's id
root_run_id = runs[-1].root_run_id if runs[-1].root_run_id else runs[-1].run_id
parent_run_id = runs[-1].run_id
runs.append(
TestRunStorage.build_run(
run_id=make_new_run_id(),
pipeline_name="foo_pipeline",
root_run_id=root_run_id,
parent_run_id=parent_run_id,
tags={PARENT_RUN_ID_TAG: parent_run_id, ROOT_RUN_ID_TAG: root_run_id},
)
)
for run in runs:
storage.add_run(run)
run_group_one = storage.get_run_group(root_run.run_id)
assert len(run_group_one[1]) == 7
run_group_two = storage.get_run_group(runs[-1].run_id)
assert len(run_group_two[1]) == 7
assert run_group_one[0] == run_group_two[0]
assert run_group_one[1] == run_group_two[1]
def test_fetch_run_group_not_found(self, storage):
assert storage
run = TestRunStorage.build_run(run_id=make_new_run_id(), pipeline_name="foo_pipeline")
storage.add_run(run)
with pytest.raises(DagsterRunNotFoundError):
storage.get_run_group(make_new_run_id())
def test_fetch_run_groups(self, storage):
assert storage
root_runs = [
TestRunStorage.build_run(run_id=make_new_run_id(), pipeline_name="foo_pipeline")
for i in range(3)
]
runs = [run for run in root_runs]
for _ in range(5):
for root_run in root_runs:
runs.append(
TestRunStorage.build_run(
run_id=make_new_run_id(),
pipeline_name="foo_pipeline",
tags={PARENT_RUN_ID_TAG: root_run.run_id, ROOT_RUN_ID_TAG: root_run.run_id},
)
)
for run in runs:
storage.add_run(run)
run_groups = storage.get_run_groups(limit=5)
assert len(run_groups) == 3
expected_group_lens = {
root_runs[i].run_id: expected_len for i, expected_len in enumerate([2, 3, 3])
}
for root_run_id in run_groups:
assert len(run_groups[root_run_id]["runs"]) == expected_group_lens[root_run_id]
assert run_groups[root_run_id]["count"] == 6
def test_fetch_run_groups_filter(self, storage):
assert storage
root_runs = [
TestRunStorage.build_run(run_id=make_new_run_id(), pipeline_name="foo_pipeline")
for i in range(3)
]
runs = [run for run in root_runs]
for root_run in root_runs:
failed_run_id = make_new_run_id()
runs.append(
TestRunStorage.build_run(
run_id=failed_run_id,
pipeline_name="foo_pipeline",
tags={PARENT_RUN_ID_TAG: root_run.run_id, ROOT_RUN_ID_TAG: root_run.run_id},
status=PipelineRunStatus.FAILURE,
)
)
for _ in range(3):
runs.append(
TestRunStorage.build_run(
run_id=make_new_run_id(),
pipeline_name="foo_pipeline",
tags={PARENT_RUN_ID_TAG: failed_run_id, ROOT_RUN_ID_TAG: root_run.run_id},
)
)
for run in runs:
storage.add_run(run)
run_groups = storage.get_run_groups(
limit=5, filters=PipelineRunsFilter(statuses=[PipelineRunStatus.FAILURE])
)
assert len(run_groups) == 3
for root_run_id in run_groups:
assert len(run_groups[root_run_id]["runs"]) == 2
assert run_groups[root_run_id]["count"] == 5
def test_fetch_run_groups_ordering(self, storage):
assert storage
first_root_run = TestRunStorage.build_run(
run_id=make_new_run_id(), pipeline_name="foo_pipeline"
)
storage.add_run(first_root_run)
second_root_run = TestRunStorage.build_run(
run_id=make_new_run_id(), pipeline_name="foo_pipeline"
)
storage.add_run(second_root_run)
second_root_run_child = TestRunStorage.build_run(
run_id=make_new_run_id(),
pipeline_name="foo_pipeline",
tags={
PARENT_RUN_ID_TAG: second_root_run.run_id,
ROOT_RUN_ID_TAG: second_root_run.run_id,
},
)
storage.add_run(second_root_run_child)
first_root_run_child = TestRunStorage.build_run(
run_id=make_new_run_id(),
pipeline_name="foo_pipeline",
tags={
PARENT_RUN_ID_TAG: first_root_run.run_id,
ROOT_RUN_ID_TAG: first_root_run.run_id,
},
)
storage.add_run(first_root_run_child)
run_groups = storage.get_run_groups(limit=1)
assert first_root_run.run_id in run_groups
assert second_root_run.run_id not in run_groups
def _skip_in_memory(self, storage):
from dagster.core.storage.runs import InMemoryRunStorage
if isinstance(storage, InMemoryRunStorage):
pytest.skip()
def test_empty_heartbeat(self, storage):
self._skip_in_memory(storage)
assert storage.get_daemon_heartbeats() == {}
def test_add_heartbeat(self, storage):
self._skip_in_memory(storage)
added_heartbeat = DaemonHeartbeat(
timestamp=pendulum.from_timestamp(1000).float_timestamp,
daemon_type=SensorDaemon.daemon_type(),
daemon_id=None,
errors=[],
)
storage.add_daemon_heartbeat(added_heartbeat)
assert len(storage.get_daemon_heartbeats()) == 1
stored_heartbeat = storage.get_daemon_heartbeats()[SensorDaemon.daemon_type()]
assert stored_heartbeat == added_heartbeat
second_added_heartbeat = DaemonHeartbeat(
timestamp=pendulum.from_timestamp(2000).float_timestamp,
daemon_type=SensorDaemon.daemon_type(),
daemon_id=None,
errors=[],
)
storage.add_daemon_heartbeat(second_added_heartbeat)
assert len(storage.get_daemon_heartbeats()) == 1
stored_heartbeat = storage.get_daemon_heartbeats()[SensorDaemon.daemon_type()]
assert stored_heartbeat == second_added_heartbeat
def test_wipe_heartbeats(self, storage):
self._skip_in_memory(storage)
if not self.can_delete_runs():
pytest.skip("storage cannot delete")
added_heartbeat = DaemonHeartbeat(
timestamp=pendulum.from_timestamp(1000).float_timestamp,
daemon_type=SensorDaemon.daemon_type(),
daemon_id=None,
errors=[],
)
storage.add_daemon_heartbeat(added_heartbeat)
storage.wipe_daemon_heartbeats()
def test_backfill(self, storage):
origin = self.fake_partition_set_origin("fake_partition_set")
backfills = storage.get_backfills()
assert len(backfills) == 0
one = PartitionBackfill(
"one",
origin,
BulkActionStatus.REQUESTED,
["a", "b", "c"],
False,
None,
None,
pendulum.now().timestamp(),
)
storage.add_backfill(one)
assert len(storage.get_backfills()) == 1
assert len(storage.get_backfills(status=BulkActionStatus.REQUESTED)) == 1
backfill = storage.get_backfill(one.backfill_id)
assert backfill == one
storage.update_backfill(one.with_status(status=BulkActionStatus.COMPLETED))
assert len(storage.get_backfills()) == 1
assert len(storage.get_backfills(status=BulkActionStatus.REQUESTED)) == 0
def test_secondary_index(self, storage):
if not isinstance(storage, SqlRunStorage):
return
for name in REQUIRED_DATA_MIGRATIONS.keys():
assert storage.has_built_index(name)
def test_handle_run_event_pipeline_success_test(self, storage):
run_id = make_new_run_id()
run_to_add = TestRunStorage.build_run(pipeline_name="pipeline_name", run_id=run_id)
storage.add_run(run_to_add)
dagster_pipeline_start_event = DagsterEvent(
message="a message",
event_type_value=DagsterEventType.PIPELINE_START.value,
pipeline_name="pipeline_name",
step_key=None,
solid_handle=None,
step_kind_value=None,
logging_tags=None,
)
storage.handle_run_event(run_id, dagster_pipeline_start_event)
assert storage.get_run_by_id(run_id).status == PipelineRunStatus.STARTED
storage.handle_run_event(
make_new_run_id(), DagsterEvent(
message="a message",
event_type_value=DagsterEventType.PIPELINE_SUCCESS.value,
pipeline_name="pipeline_name",
step_key=None,
solid_handle=None,
step_kind_value=None,
logging_tags=None,
),
)
assert storage.get_run_by_id(run_id).status == PipelineRunStatus.STARTED
storage.handle_run_event(
run_id, DagsterEvent(
message="a message",
event_type_value=DagsterEventType.PIPELINE_SUCCESS.value,
pipeline_name="pipeline_name",
step_key=None,
solid_handle=None,
step_kind_value=None,
logging_tags=None,
),
)
assert storage.get_run_by_id(run_id).status == PipelineRunStatus.SUCCESS
def test_debug_snapshot_import(self, storage):
from dagster.core.execution.api import create_execution_plan
from dagster.core.snap import (
snapshot_from_execution_plan,
create_execution_plan_snapshot_id,
)
run_id = make_new_run_id()
run_to_add = TestRunStorage.build_run(pipeline_name="pipeline_name", run_id=run_id)
storage.add_run(run_to_add)
pipeline_def = PipelineDefinition(name="some_pipeline", solid_defs=[])
pipeline_snapshot = pipeline_def.get_pipeline_snapshot()
pipeline_snapshot_id = create_pipeline_snapshot_id(pipeline_snapshot)
new_pipeline_snapshot_id = f"{pipeline_snapshot_id}-new-snapshot"
storage.add_snapshot(pipeline_snapshot, snapshot_id=new_pipeline_snapshot_id)
assert not storage.has_snapshot(pipeline_snapshot_id)
assert storage.has_snapshot(new_pipeline_snapshot_id)
execution_plan = create_execution_plan(pipeline_def)
ep_snapshot = snapshot_from_execution_plan(execution_plan, new_pipeline_snapshot_id)
ep_snapshot_id = create_execution_plan_snapshot_id(ep_snapshot)
new_ep_snapshot_id = f"{ep_snapshot_id}-new-snapshot"
storage.add_snapshot(ep_snapshot, snapshot_id=new_ep_snapshot_id)
assert not storage.has_snapshot(ep_snapshot_id)
assert storage.has_snapshot(new_ep_snapshot_id)
def test_run_record_stats(self, storage):
assert storage
self._skip_in_memory(storage)
run_id = make_new_run_id()
run_to_add = TestRunStorage.build_run(pipeline_name="pipeline_name", run_id=run_id)
storage.add_run(run_to_add)
run_record = storage.get_run_records(PipelineRunsFilter(run_ids=[run_id]))[0]
assert run_record.start_time is None
assert run_record.end_time is None
storage.handle_run_event(
run_id,
DagsterEvent(
message="a message",
event_type_value=DagsterEventType.PIPELINE_START.value,
pipeline_name="pipeline_name",
),
)
run_record = storage.get_run_records(PipelineRunsFilter(run_ids=[run_id]))[0]
assert run_record.start_time is not None
assert run_record.end_time is None
storage.handle_run_event(
run_id,
DagsterEvent(
message="a message",
event_type_value=DagsterEventType.PIPELINE_SUCCESS.value,
pipeline_name="pipeline_name",
),
)
run_record = storage.get_run_records(PipelineRunsFilter(run_ids=[run_id]))[0]
assert run_record.start_time is not None
assert run_record.end_time is not None
assert run_record.end_time >= run_record.start_time
@pytest.mark.skipif(win_py36, reason="Sqlite rank queries not working on windows py36")
def test_by_job(self, storage):
def _add_run(job_name, tags=None):
return storage.add_run(
TestRunStorage.build_run(
pipeline_name=job_name, run_id=make_new_run_id(), tags=tags
)
)
_a_one = _add_run("a_pipeline", tags={"a": "A"})
a_two = _add_run("a_pipeline", tags={"a": "A"})
_b_one = _add_run("b_pipeline", tags={"a": "A"})
b_two = _add_run("b_pipeline", tags={"a": "A"})
c_one = _add_run("c_pipeline", tags={"a": "A"})
c_two = _add_run("c_pipeline", tags={"a": "B"})
runs_by_job = {
run.pipeline_name: run
for run in storage.get_runs(
bucket_by=JobBucket(
job_names=["a_pipeline", "b_pipeline", "c_pipeline"], bucket_limit=1
)
)
}
assert set(runs_by_job.keys()) == {"a_pipeline", "b_pipeline", "c_pipeline"}
assert runs_by_job.get("a_pipeline").run_id == a_two.run_id
assert runs_by_job.get("b_pipeline").run_id == b_two.run_id
assert runs_by_job.get("c_pipeline").run_id == c_two.run_id
runs_by_job = {
run.pipeline_name: run
for run in storage.get_runs(
filters=PipelineRunsFilter(tags={"a": "A"}),
bucket_by=JobBucket(
job_names=["a_pipeline", "b_pipeline", "c_pipeline"], bucket_limit=1
),
)
}
assert set(runs_by_job.keys()) == {"a_pipeline", "b_pipeline", "c_pipeline"}
assert runs_by_job.get("a_pipeline").run_id == a_two.run_id
assert runs_by_job.get("b_pipeline").run_id == b_two.run_id
assert runs_by_job.get("c_pipeline").run_id == c_one.run_id
@pytest.mark.skipif(win_py36, reason="Sqlite rank queries not working on windows py36")
def test_by_tag(self, storage):
def _add_run(job_name, tags=None):
return storage.add_run(
TestRunStorage.build_run(
pipeline_name=job_name, run_id=make_new_run_id(), tags=tags
)
)
_one = _add_run("a", tags={"a": "1"})
_two = _add_run("a", tags={"a": "2"})
three = _add_run("a", tags={"a": "3"})
_none = _add_run("a")
b = _add_run("b", tags={"a": "4"})
one = _add_run("a", tags={"a": "1"})
two = _add_run("a", tags={"a": "2"})
runs_by_tag = {
run.tags.get("a"): run
for run in storage.get_runs(
bucket_by=TagBucket(tag_key="a", tag_values=["1", "2", "3", "4"], bucket_limit=1)
)
}
assert set(runs_by_tag.keys()) == {"1", "2", "3", "4"}
assert runs_by_tag.get("1").run_id == one.run_id
assert runs_by_tag.get("2").run_id == two.run_id
assert runs_by_tag.get("3").run_id == three.run_id
assert runs_by_tag.get("4").run_id == b.run_id
runs_by_tag = {
run.tags.get("a"): run
for run in storage.get_runs(
filters=PipelineRunsFilter(pipeline_name="a"),
bucket_by=TagBucket(tag_key="a", tag_values=["1", "2", "3", "4"], bucket_limit=1),
)
}
assert set(runs_by_tag.keys()) == {"1", "2", "3"}
assert runs_by_tag.get("1").run_id == one.run_id
assert runs_by_tag.get("2").run_id == two.run_id
assert runs_by_tag.get("3").run_id == three.run_id
def test_run_record_timestamps(self, storage):
assert storage
self._skip_in_memory(storage)
@op
def a():
pass
@job
def my_job():
a()
with tempfile.TemporaryDirectory() as temp_dir:
if storage._instance: instance = storage._instance else:
instance = DagsterInstance(
instance_type=InstanceType.EPHEMERAL,
local_artifact_storage=LocalArtifactStorage(temp_dir),
run_storage=storage,
event_storage=InMemoryEventLogStorage(),
compute_log_manager=NoOpComputeLogManager(),
run_coordinator=DefaultRunCoordinator(),
run_launcher=SyncInMemoryRunLauncher(),
)
freeze_datetime = to_timezone(
create_pendulum_time(2019, 11, 2, 0, 0, 0, tz="US/Central"), "US/Pacific"
)
with pendulum.test(freeze_datetime):
result = my_job.execute_in_process(instance=instance)
records = instance.get_run_records(
filters=PipelineRunsFilter(run_ids=[result.run_id])
)
assert len(records) == 1
record = records[0]
assert record.start_time == freeze_datetime.timestamp()
assert record.end_time == freeze_datetime.timestamp()
| true | true |
1c49f411229f1de6a15db374752a524ef0e0ee0b | 12,639 | py | Python | rnn/train_search.py | cclauss/darts | b6d4fe1692a67d81adaa3d4bfd7c13e3dcb1d443 | [
"Apache-2.0"
] | 1 | 2018-07-26T01:16:31.000Z | 2018-07-26T01:16:31.000Z | rnn/train_search.py | wangxinchina/darts | 77a461b62edb232406891028645b2331a24a8b4d | [
"Apache-2.0"
] | null | null | null | rnn/train_search.py | wangxinchina/darts | 77a461b62edb232406891028645b2331a24a8b4d | [
"Apache-2.0"
] | 1 | 2019-06-18T05:53:16.000Z | 2019-06-18T05:53:16.000Z | import argparse
import os, sys, glob
import time
import math
import numpy as np
import torch
import logging
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from architect import Architect
import gc
import data
import model_search as model
from utils import batchify, get_batch, repackage_hidden, create_exp_dir, save_checkpoint
parser = argparse.ArgumentParser(description='PyTorch PennTreeBank/WikiText2 Language Model')
parser.add_argument('--data', type=str, default='../data/penn/',
help='location of the data corpus')
parser.add_argument('--emsize', type=int, default=300,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=300,
help='number of hidden units per layer')
parser.add_argument('--nhidlast', type=int, default=300,
help='number of hidden units for the last rnn layer')
parser.add_argument('--lr', type=float, default=20,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=50,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=256, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.75,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--dropouth', type=float, default=0.25,
help='dropout for hidden nodes in rnn layers (0 = no dropout)')
parser.add_argument('--dropoutx', type=float, default=0.75,
help='dropout for input nodes in rnn layers (0 = no dropout)')
parser.add_argument('--dropouti', type=float, default=0.2,
help='dropout for input embedding layers (0 = no dropout)')
parser.add_argument('--dropoute', type=float, default=0,
help='dropout to remove words from embedding layer (0 = no dropout)')
parser.add_argument('--seed', type=int, default=2,
help='random seed')
parser.add_argument('--nonmono', type=int, default=5,
help='random seed')
parser.add_argument('--cuda', action='store_false',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=50, metavar='N',
help='report interval')
parser.add_argument('--save', type=str, default='EXP',
help='path to save the final model')
parser.add_argument('--alpha', type=float, default=0,
help='alpha L2 regularization on RNN activation (alpha = 0 means no regularization)')
parser.add_argument('--beta', type=float, default=1e-3,
help='beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)')
parser.add_argument('--wdecay', type=float, default=5e-7,
help='weight decay applied to all weights')
parser.add_argument('--continue_train', action='store_true',
help='continue train from a checkpoint')
parser.add_argument('--small_batch_size', type=int, default=-1,
help='the batch size for computation. batch_size should be divisible by small_batch_size.\
In our implementation, we compute gradients with small_batch_size multiple times, and accumulate the gradients\
until batch_size is reached. An update step is then performed.')
parser.add_argument('--max_seq_len_delta', type=int, default=20,
help='max sequence length')
parser.add_argument('--single_gpu', default=True, action='store_false',
help='use single GPU')
parser.add_argument('--gpu', type=int, default=0, help='GPU device to use')
parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')
parser.add_argument('--arch_wdecay', type=float, default=1e-3,
help='weight decay for the architecture encoding alpha')
parser.add_argument('--arch_lr', type=float, default=3e-3,
help='learning rate for the architecture encoding alpha')
args = parser.parse_args()
if args.nhidlast < 0:
args.nhidlast = args.emsize
if args.small_batch_size < 0:
args.small_batch_size = args.batch_size
if not args.continue_train:
args.save = 'search-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
# Set the random seed manually for reproducibility.
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
cudnn.enabled=True
torch.cuda.manual_seed_all(args.seed)
corpus = data.Corpus(args.data)
eval_batch_size = 10
test_batch_size = 1
train_data = batchify(corpus.train, args.batch_size, args)
search_data = batchify(corpus.valid, args.batch_size, args)
val_data = batchify(corpus.valid, eval_batch_size, args)
test_data = batchify(corpus.test, test_batch_size, args)
ntokens = len(corpus.dictionary)
if args.continue_train:
model = torch.load(os.path.join(args.save, 'model.pt'))
else:
model = model.RNNModelSearch(ntokens, args.emsize, args.nhid, args.nhidlast,
args.dropout, args.dropouth, args.dropoutx, args.dropouti, args.dropoute)
size = 0
for p in model.parameters():
size += p.nelement()
logging.info('param size: {}'.format(size))
logging.info('initial genotype:')
logging.info(model.genotype())
if args.cuda:
if args.single_gpu:
parallel_model = model.cuda()
else:
parallel_model = nn.DataParallel(model, dim=1).cuda()
else:
parallel_model = model
architect = Architect(parallel_model, args)
total_params = sum(x.data.nelement() for x in model.parameters())
logging.info('Args: {}'.format(args))
logging.info('Model total parameters: {}'.format(total_params))
def evaluate(data_source, batch_size=10):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args, evaluation=True)
targets = targets.view(-1)
log_prob, hidden = parallel_model(data, hidden)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data
total_loss += loss * len(data)
hidden = repackage_hidden(hidden)
return total_loss[0] / len(data_source)
def train():
assert args.batch_size % args.small_batch_size == 0, 'batch_size must be divisible by small_batch_size'
# Turn on training mode which enables dropout.
total_loss = 0
start_time = time.time()
ntokens = len(corpus.dictionary)
hidden = [model.init_hidden(args.small_batch_size) for _ in range(args.batch_size // args.small_batch_size)]
hidden_valid = [model.init_hidden(args.small_batch_size) for _ in range(args.batch_size // args.small_batch_size)]
batch, i = 0, 0
while i < train_data.size(0) - 1 - 1:
bptt = args.bptt if np.random.random() < 0.95 else args.bptt / 2.
# Prevent excessively small or negative sequence lengths
# seq_len = max(5, int(np.random.normal(bptt, 5)))
# # There's a very small chance that it could select a very long sequence length resulting in OOM
# seq_len = min(seq_len, args.bptt + args.max_seq_len_delta)
seq_len = int(bptt)
lr2 = optimizer.param_groups[0]['lr']
optimizer.param_groups[0]['lr'] = lr2 * seq_len / args.bptt
model.train()
data_valid, targets_valid = get_batch(search_data, i % (search_data.size(0) - 1), args)
data, targets = get_batch(train_data, i, args, seq_len=seq_len)
optimizer.zero_grad()
start, end, s_id = 0, args.small_batch_size, 0
while start < args.batch_size:
cur_data, cur_targets = data[:, start: end], targets[:, start: end].contiguous().view(-1)
cur_data_valid, cur_targets_valid = data_valid[:, start: end], targets_valid[:, start: end].contiguous().view(-1)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
hidden[s_id] = repackage_hidden(hidden[s_id])
hidden_valid[s_id] = repackage_hidden(hidden_valid[s_id])
hidden_valid[s_id], grad_norm = architect.step(
hidden[s_id], cur_data, cur_targets,
hidden_valid[s_id], cur_data_valid, cur_targets_valid,
optimizer,
args.unrolled)
# assuming small_batch_size = batch_size so we don't accumulate gradients
optimizer.zero_grad()
hidden[s_id] = repackage_hidden(hidden[s_id])
log_prob, hidden[s_id], rnn_hs, dropped_rnn_hs = parallel_model(cur_data, hidden[s_id], return_h=True)
raw_loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), cur_targets)
loss = raw_loss
# Activiation Regularization
if args.alpha > 0:
loss = loss + sum(args.alpha * dropped_rnn_h.pow(2).mean() for dropped_rnn_h in dropped_rnn_hs[-1:])
# Temporal Activation Regularization (slowness)
loss = loss + sum(args.beta * (rnn_h[1:] - rnn_h[:-1]).pow(2).mean() for rnn_h in rnn_hs[-1:])
loss *= args.small_batch_size / args.batch_size
total_loss += raw_loss.data * args.small_batch_size / args.batch_size
loss.backward()
s_id += 1
start = end
end = start + args.small_batch_size
gc.collect()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs.
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
optimizer.step()
# total_loss += raw_loss.data
optimizer.param_groups[0]['lr'] = lr2
if batch % args.log_interval == 0 and batch > 0:
logging.info(parallel_model.genotype())
print(F.softmax(parallel_model.weights, dim=-1))
cur_loss = total_loss[0] / args.log_interval
elapsed = time.time() - start_time
logging.info('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // args.bptt, optimizer.param_groups[0]['lr'],
elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
batch += 1
i += seq_len
# Loop over epochs.
lr = args.lr
best_val_loss = []
stored_loss = 100000000
if args.continue_train:
optimizer_state = torch.load(os.path.join(args.save, 'optimizer.pt'))
if 't0' in optimizer_state['param_groups'][0]:
optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
optimizer.load_state_dict(optimizer_state)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
for epoch in range(1, args.epochs+1):
epoch_start_time = time.time()
train()
val_loss = evaluate(val_data, eval_batch_size)
logging.info('-' * 89)
logging.info('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
logging.info('-' * 89)
if val_loss < stored_loss:
save_checkpoint(model, optimizer, epoch, args.save)
logging.info('Saving Normal!')
stored_loss = val_loss
best_val_loss.append(val_loss)
| 44.038328 | 132 | 0.652742 | import argparse
import os, sys, glob
import time
import math
import numpy as np
import torch
import logging
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from architect import Architect
import gc
import data
import model_search as model
from utils import batchify, get_batch, repackage_hidden, create_exp_dir, save_checkpoint
parser = argparse.ArgumentParser(description='PyTorch PennTreeBank/WikiText2 Language Model')
parser.add_argument('--data', type=str, default='../data/penn/',
help='location of the data corpus')
parser.add_argument('--emsize', type=int, default=300,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=300,
help='number of hidden units per layer')
parser.add_argument('--nhidlast', type=int, default=300,
help='number of hidden units for the last rnn layer')
parser.add_argument('--lr', type=float, default=20,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=50,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=256, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.75,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--dropouth', type=float, default=0.25,
help='dropout for hidden nodes in rnn layers (0 = no dropout)')
parser.add_argument('--dropoutx', type=float, default=0.75,
help='dropout for input nodes in rnn layers (0 = no dropout)')
parser.add_argument('--dropouti', type=float, default=0.2,
help='dropout for input embedding layers (0 = no dropout)')
parser.add_argument('--dropoute', type=float, default=0,
help='dropout to remove words from embedding layer (0 = no dropout)')
parser.add_argument('--seed', type=int, default=2,
help='random seed')
parser.add_argument('--nonmono', type=int, default=5,
help='random seed')
parser.add_argument('--cuda', action='store_false',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=50, metavar='N',
help='report interval')
parser.add_argument('--save', type=str, default='EXP',
help='path to save the final model')
parser.add_argument('--alpha', type=float, default=0,
help='alpha L2 regularization on RNN activation (alpha = 0 means no regularization)')
parser.add_argument('--beta', type=float, default=1e-3,
help='beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)')
parser.add_argument('--wdecay', type=float, default=5e-7,
help='weight decay applied to all weights')
parser.add_argument('--continue_train', action='store_true',
help='continue train from a checkpoint')
parser.add_argument('--small_batch_size', type=int, default=-1,
help='the batch size for computation. batch_size should be divisible by small_batch_size.\
In our implementation, we compute gradients with small_batch_size multiple times, and accumulate the gradients\
until batch_size is reached. An update step is then performed.')
parser.add_argument('--max_seq_len_delta', type=int, default=20,
help='max sequence length')
parser.add_argument('--single_gpu', default=True, action='store_false',
help='use single GPU')
parser.add_argument('--gpu', type=int, default=0, help='GPU device to use')
parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')
parser.add_argument('--arch_wdecay', type=float, default=1e-3,
help='weight decay for the architecture encoding alpha')
parser.add_argument('--arch_lr', type=float, default=3e-3,
help='learning rate for the architecture encoding alpha')
args = parser.parse_args()
if args.nhidlast < 0:
args.nhidlast = args.emsize
if args.small_batch_size < 0:
args.small_batch_size = args.batch_size
if not args.continue_train:
args.save = 'search-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
cudnn.enabled=True
torch.cuda.manual_seed_all(args.seed)
corpus = data.Corpus(args.data)
eval_batch_size = 10
test_batch_size = 1
train_data = batchify(corpus.train, args.batch_size, args)
search_data = batchify(corpus.valid, args.batch_size, args)
val_data = batchify(corpus.valid, eval_batch_size, args)
test_data = batchify(corpus.test, test_batch_size, args)
ntokens = len(corpus.dictionary)
if args.continue_train:
model = torch.load(os.path.join(args.save, 'model.pt'))
else:
model = model.RNNModelSearch(ntokens, args.emsize, args.nhid, args.nhidlast,
args.dropout, args.dropouth, args.dropoutx, args.dropouti, args.dropoute)
size = 0
for p in model.parameters():
size += p.nelement()
logging.info('param size: {}'.format(size))
logging.info('initial genotype:')
logging.info(model.genotype())
if args.cuda:
if args.single_gpu:
parallel_model = model.cuda()
else:
parallel_model = nn.DataParallel(model, dim=1).cuda()
else:
parallel_model = model
architect = Architect(parallel_model, args)
total_params = sum(x.data.nelement() for x in model.parameters())
logging.info('Args: {}'.format(args))
logging.info('Model total parameters: {}'.format(total_params))
def evaluate(data_source, batch_size=10):
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args, evaluation=True)
targets = targets.view(-1)
log_prob, hidden = parallel_model(data, hidden)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data
total_loss += loss * len(data)
hidden = repackage_hidden(hidden)
return total_loss[0] / len(data_source)
def train():
assert args.batch_size % args.small_batch_size == 0, 'batch_size must be divisible by small_batch_size'
total_loss = 0
start_time = time.time()
ntokens = len(corpus.dictionary)
hidden = [model.init_hidden(args.small_batch_size) for _ in range(args.batch_size // args.small_batch_size)]
hidden_valid = [model.init_hidden(args.small_batch_size) for _ in range(args.batch_size // args.small_batch_size)]
batch, i = 0, 0
while i < train_data.size(0) - 1 - 1:
bptt = args.bptt if np.random.random() < 0.95 else args.bptt / 2.
# seq_len = min(seq_len, args.bptt + args.max_seq_len_delta)
seq_len = int(bptt)
lr2 = optimizer.param_groups[0]['lr']
optimizer.param_groups[0]['lr'] = lr2 * seq_len / args.bptt
model.train()
data_valid, targets_valid = get_batch(search_data, i % (search_data.size(0) - 1), args)
data, targets = get_batch(train_data, i, args, seq_len=seq_len)
optimizer.zero_grad()
start, end, s_id = 0, args.small_batch_size, 0
while start < args.batch_size:
cur_data, cur_targets = data[:, start: end], targets[:, start: end].contiguous().view(-1)
cur_data_valid, cur_targets_valid = data_valid[:, start: end], targets_valid[:, start: end].contiguous().view(-1)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
hidden[s_id] = repackage_hidden(hidden[s_id])
hidden_valid[s_id] = repackage_hidden(hidden_valid[s_id])
hidden_valid[s_id], grad_norm = architect.step(
hidden[s_id], cur_data, cur_targets,
hidden_valid[s_id], cur_data_valid, cur_targets_valid,
optimizer,
args.unrolled)
optimizer.zero_grad()
hidden[s_id] = repackage_hidden(hidden[s_id])
log_prob, hidden[s_id], rnn_hs, dropped_rnn_hs = parallel_model(cur_data, hidden[s_id], return_h=True)
raw_loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), cur_targets)
loss = raw_loss
# Activiation Regularization
if args.alpha > 0:
loss = loss + sum(args.alpha * dropped_rnn_h.pow(2).mean() for dropped_rnn_h in dropped_rnn_hs[-1:])
# Temporal Activation Regularization (slowness)
loss = loss + sum(args.beta * (rnn_h[1:] - rnn_h[:-1]).pow(2).mean() for rnn_h in rnn_hs[-1:])
loss *= args.small_batch_size / args.batch_size
total_loss += raw_loss.data * args.small_batch_size / args.batch_size
loss.backward()
s_id += 1
start = end
end = start + args.small_batch_size
gc.collect()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs.
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
optimizer.step()
# total_loss += raw_loss.data
optimizer.param_groups[0]['lr'] = lr2
if batch % args.log_interval == 0 and batch > 0:
logging.info(parallel_model.genotype())
print(F.softmax(parallel_model.weights, dim=-1))
cur_loss = total_loss[0] / args.log_interval
elapsed = time.time() - start_time
logging.info('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // args.bptt, optimizer.param_groups[0]['lr'],
elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
batch += 1
i += seq_len
# Loop over epochs.
lr = args.lr
best_val_loss = []
stored_loss = 100000000
if args.continue_train:
optimizer_state = torch.load(os.path.join(args.save, 'optimizer.pt'))
if 't0' in optimizer_state['param_groups'][0]:
optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
optimizer.load_state_dict(optimizer_state)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
for epoch in range(1, args.epochs+1):
epoch_start_time = time.time()
train()
val_loss = evaluate(val_data, eval_batch_size)
logging.info('-' * 89)
logging.info('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
logging.info('-' * 89)
if val_loss < stored_loss:
save_checkpoint(model, optimizer, epoch, args.save)
logging.info('Saving Normal!')
stored_loss = val_loss
best_val_loss.append(val_loss)
| true | true |
1c49f5c305ab49c62a991b94780ce7e3479571cc | 4,046 | py | Python | codejobs/settings.py | amanfojnr/open-jobs-api | e70aa2c0d5031981cd571c50753fa5b28f5dce07 | [
"MIT"
] | null | null | null | codejobs/settings.py | amanfojnr/open-jobs-api | e70aa2c0d5031981cd571c50753fa5b28f5dce07 | [
"MIT"
] | null | null | null | codejobs/settings.py | amanfojnr/open-jobs-api | e70aa2c0d5031981cd571c50753fa5b28f5dce07 | [
"MIT"
] | null | null | null | """
Django settings for codejobs project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'b(=fum!6g93&xfvwmd^8#bz-2t8nqxbuum!9_ke!t$d&f@hztp'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'api',
'rest_framework.authtoken',
'rest_auth',
'django.contrib.sites',
'allauth',
'allauth.account',
'rest_auth.registration',
]
SITE_ID = 1
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'codejobs.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'codejobs.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# django-allauth config
ACCOUNT_EMAIL_VERIFICATION = None
# heroku django settings
try:
import local_settings
except ImportError:
import dj_database_url
...
DEBUG = False
ALLOWED_HOSTS = ['127.0.0.1', '.herokuapp.com']
...
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'codelabs',
'USER': 'amanfojnr',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
...
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
| 24.227545 | 91 | 0.674246 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'b(=fum!6g93&xfvwmd^8#bz-2t8nqxbuum!9_ke!t$d&f@hztp'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'api',
'rest_framework.authtoken',
'rest_auth',
'django.contrib.sites',
'allauth',
'allauth.account',
'rest_auth.registration',
]
SITE_ID = 1
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'codejobs.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'codejobs.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# django-allauth config
ACCOUNT_EMAIL_VERIFICATION = None
# heroku django settings
try:
import local_settings
except ImportError:
import dj_database_url
...
DEBUG = False
ALLOWED_HOSTS = ['127.0.0.1', '.herokuapp.com']
...
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'codelabs',
'USER': 'amanfojnr',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
...
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
| true | true |
1c49f7108d5c141fdd81026bb117e58efba5174b | 1,394 | py | Python | AutomationFramework/tests/network_instance/test_ni_protocol_instances.py | sbarguil/Testing-framework | f3ef69f1c4f0aeafd02e222d846162c711783b15 | [
"Apache-2.0"
] | 1 | 2020-04-23T15:22:16.000Z | 2020-04-23T15:22:16.000Z | AutomationFramework/tests/network_instance/test_ni_protocol_instances.py | sbarguil/Testing-framework | f3ef69f1c4f0aeafd02e222d846162c711783b15 | [
"Apache-2.0"
] | 44 | 2020-08-13T19:35:41.000Z | 2021-03-01T09:08:00.000Z | AutomationFramework/tests/network_instance/test_ni_protocol_instances.py | sbarguil/Testing-framework | f3ef69f1c4f0aeafd02e222d846162c711783b15 | [
"Apache-2.0"
] | 6 | 2020-04-23T15:29:38.000Z | 2022-03-03T14:23:38.000Z | import pytest
from AutomationFramework.page_objects.network_instance.network_intance import NetworkInstance
from AutomationFramework.tests.base_test import BaseTest
class TestNetworkInstanceProtocolInstances(BaseTest):
test_case_file = 'ni_protocol_instances.yml'
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'ni_protocol_instances_creation',
'page_object_class': NetworkInstance}])
def test_ni_protocol_instances_creation(self, create_page_object):
create_page_object.execute_network_instance_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'ni_protocol_instances_enabled',
'page_object_class': NetworkInstance}])
def test_ni_protocol_instances_enabled(self, create_page_object):
create_page_object.execute_network_instance_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
| 63.363636 | 117 | 0.700861 | import pytest
from AutomationFramework.page_objects.network_instance.network_intance import NetworkInstance
from AutomationFramework.tests.base_test import BaseTest
class TestNetworkInstanceProtocolInstances(BaseTest):
test_case_file = 'ni_protocol_instances.yml'
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'ni_protocol_instances_creation',
'page_object_class': NetworkInstance}])
def test_ni_protocol_instances_creation(self, create_page_object):
create_page_object.execute_network_instance_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'ni_protocol_instances_enabled',
'page_object_class': NetworkInstance}])
def test_ni_protocol_instances_enabled(self, create_page_object):
create_page_object.execute_network_instance_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
| true | true |
1c49f76aea09c7aaa661dfbbe91e896bbb4e690a | 844 | py | Python | Python/AWS_Scripts/moveFile.py | CharvyJain/Rotten-Scripts | c9b8f7dde378620e4a82eae7aacec53f1eeea3c5 | [
"MIT"
] | 3 | 2021-02-06T16:16:46.000Z | 2021-08-20T03:19:01.000Z | Python/Aws/moveFile.py | SKAUL05/Rotten-Scripts | c44e69754bbecb8a547fe2cc3a29be5acf97c46a | [
"MIT"
] | null | null | null | Python/Aws/moveFile.py | SKAUL05/Rotten-Scripts | c44e69754bbecb8a547fe2cc3a29be5acf97c46a | [
"MIT"
] | 1 | 2021-08-08T16:03:40.000Z | 2021-08-08T16:03:40.000Z | import boto3
awsAccessKeyId = ""
awsSecretAccessKey = ""
bucketName= ""
directoryName = ""
s3 = boto3.resource(
's3',
aws_access_key_id=awsAccessKeyId,
aws_secret_access_key=awsSecretAccessKey
)
myBucket = s3.Bucket(bucketName)
def moveFile():
try:
for objectSummary in myBucket.objects.filter(Prefix=directoryName):
s3FilePath = objectSummary.key
sourceFilename = (s3FilePath).split("/")[-1]
copySource = {"Bucket": bucketName, "Key": s3FilePath}
targetFilename = f"{destinationDirectory}/{sourceFilename}"
s3.meta.client.copy(copySource, bucketName, targetFilename)
s3.Object(bucketName, s3FilePath).delete()
except Exception as err:
print(err)
if __name__ == '__main__':
moveFile()
| 30.142857 | 79 | 0.631517 | import boto3
awsAccessKeyId = ""
awsSecretAccessKey = ""
bucketName= ""
directoryName = ""
s3 = boto3.resource(
's3',
aws_access_key_id=awsAccessKeyId,
aws_secret_access_key=awsSecretAccessKey
)
myBucket = s3.Bucket(bucketName)
def moveFile():
try:
for objectSummary in myBucket.objects.filter(Prefix=directoryName):
s3FilePath = objectSummary.key
sourceFilename = (s3FilePath).split("/")[-1]
copySource = {"Bucket": bucketName, "Key": s3FilePath}
targetFilename = f"{destinationDirectory}/{sourceFilename}"
s3.meta.client.copy(copySource, bucketName, targetFilename)
s3.Object(bucketName, s3FilePath).delete()
except Exception as err:
print(err)
if __name__ == '__main__':
moveFile()
| true | true |
1c49f7929e2a520d2bfeebebc3d0b9896156a77e | 2,768 | py | Python | starfish/image/_filter/scale_by_percentile.py | vipulsinghal02/starfish | c3d347954ad40a7a4be9a50d89974f5fbbc2919d | [
"MIT"
] | null | null | null | starfish/image/_filter/scale_by_percentile.py | vipulsinghal02/starfish | c3d347954ad40a7a4be9a50d89974f5fbbc2919d | [
"MIT"
] | null | null | null | starfish/image/_filter/scale_by_percentile.py | vipulsinghal02/starfish | c3d347954ad40a7a4be9a50d89974f5fbbc2919d | [
"MIT"
] | null | null | null | from functools import partial
from typing import Optional
import numpy as np
from starfish.imagestack.imagestack import ImageStack
from ._base import FilterAlgorithmBase
from .util import preserve_float_range
class ScaleByPercentile(FilterAlgorithmBase):
def __init__(self, p: int=0, is_volume: bool=False, **kwargs) -> None:
"""Image scaling filter
Parameters
----------
p : int
each image in the stack is scaled by this percentile. must be in [0, 100]
is_volume : bool
If True, 3d (z, y, x) volumes will be filtered. By default, filter 2-d (y, x) tiles
kwargs
"""
self.p = p
self.is_volume = is_volume
_DEFAULT_TESTING_PARAMETERS = {"p": 0}
@classmethod
def _add_arguments(cls, group_parser) -> None:
group_parser.add_argument(
"--p", default=100, type=int, help="scale images by this percentile")
@staticmethod
def _scale(image: np.ndarray, p: int) -> np.ndarray:
"""Clip values of img below and above percentiles p_min and p_max
Parameters
----------
image : np.ndarray
image to be scaled
p : int
each image in the stack is scaled by this percentile. must be in [0, 100]
Notes
-----
- Setting p to 100 scales the image by it's maximum value
- No shifting or transformation to adjust dynamic range is done after scaling
Returns
-------
np.ndarray :
Numpy array of same shape as img
"""
v = np.percentile(image, p)
image = image / v
image = preserve_float_range(image)
return image
def run(
self, stack: ImageStack, in_place: bool=False, verbose: bool=False,
n_processes: Optional[int]=None
) -> ImageStack:
"""Perform filtering of an image stack
Parameters
----------
stack : ImageStack
Stack to be filtered.
in_place : bool
if True, process ImageStack in-place, otherwise return a new stack
verbose : bool
If True, report on the percentage completed (default = False) during processing
n_processes : Optional[int]
Number of parallel processes to devote to calculating the filter
Returns
-------
ImageStack :
If in-place is False, return the results of filter as a new stack. Otherwise return the
original stack.
"""
clip = partial(self._scale, p=self.p)
result = stack.apply(
clip,
is_volume=self.is_volume, verbose=verbose, in_place=in_place, n_processes=n_processes
)
return result
| 29.136842 | 100 | 0.595737 | from functools import partial
from typing import Optional
import numpy as np
from starfish.imagestack.imagestack import ImageStack
from ._base import FilterAlgorithmBase
from .util import preserve_float_range
class ScaleByPercentile(FilterAlgorithmBase):
def __init__(self, p: int=0, is_volume: bool=False, **kwargs) -> None:
self.p = p
self.is_volume = is_volume
_DEFAULT_TESTING_PARAMETERS = {"p": 0}
@classmethod
def _add_arguments(cls, group_parser) -> None:
group_parser.add_argument(
"--p", default=100, type=int, help="scale images by this percentile")
@staticmethod
def _scale(image: np.ndarray, p: int) -> np.ndarray:
v = np.percentile(image, p)
image = image / v
image = preserve_float_range(image)
return image
def run(
self, stack: ImageStack, in_place: bool=False, verbose: bool=False,
n_processes: Optional[int]=None
) -> ImageStack:
clip = partial(self._scale, p=self.p)
result = stack.apply(
clip,
is_volume=self.is_volume, verbose=verbose, in_place=in_place, n_processes=n_processes
)
return result
| true | true |
1c49f7f555a1957609cf19ef4517fb9da15f2e1a | 688 | py | Python | mogan/image/__init__.py | GURUIFENG9139/rocky-mogan | 6008c1d12b00e70d2cc651f7bd5d47968fc3aec7 | [
"Apache-2.0"
] | null | null | null | mogan/image/__init__.py | GURUIFENG9139/rocky-mogan | 6008c1d12b00e70d2cc651f7bd5d47968fc3aec7 | [
"Apache-2.0"
] | null | null | null | mogan/image/__init__.py | GURUIFENG9139/rocky-mogan | 6008c1d12b00e70d2cc651f7bd5d47968fc3aec7 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def API():
# Needed to prevent circular import...
import mogan.image.api
return mogan.image.api.API()
| 38.222222 | 78 | 0.715116 |
def API():
import mogan.image.api
return mogan.image.api.API()
| true | true |
1c49fa29619138af962a51bbcd48f42a98d3cb02 | 1,274 | py | Python | perfkitbenchmarker/linux_packages/nfs_server.py | msidana/PerfKitBenchmarker | 2784642d3e6b20b3f474c4e27edb1ef163804f66 | [
"Apache-2.0"
] | 2 | 2021-01-15T09:40:28.000Z | 2021-01-15T09:40:36.000Z | perfkitbenchmarker/linux_packages/nfs_server.py | msidana/PerfKitBenchmarker | 2784642d3e6b20b3f474c4e27edb1ef163804f66 | [
"Apache-2.0"
] | 1 | 2021-02-23T12:07:44.000Z | 2021-02-23T12:07:44.000Z | perfkitbenchmarker/linux_packages/nfs_server.py | msidana/PerfKitBenchmarker | 2784642d3e6b20b3f474c4e27edb1ef163804f66 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing installation for NFSv4 Server.
Network File System (NFS) is a distributed file system protocol that allows a
user on a client computer to access files over a computer network much like
local storage is accessed.
This is mainly used for scientific-computing distributed workloads that
require file copying between master and worker nodes. Server can be used on the
master node.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
PACKAGE_NAME = 'nfs_server'
def YumInstall(vm):
vm.InstallPackages('nfs-utils')
def AptInstall(vm):
vm.InstallPackages('nfs-kernel-server')
| 32.666667 | 79 | 0.787284 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
PACKAGE_NAME = 'nfs_server'
def YumInstall(vm):
vm.InstallPackages('nfs-utils')
def AptInstall(vm):
vm.InstallPackages('nfs-kernel-server')
| true | true |
1c49fa7a717d7e4d70535ca92cd54ab0fc3c3e50 | 19,387 | py | Python | speech/utils/textgrid.py | dzubke/speech-lite | 65f83ac2b7551650820f079ce5152741f2a6fdb8 | [
"Apache-2.0"
] | null | null | null | speech/utils/textgrid.py | dzubke/speech-lite | 65f83ac2b7551650820f079ce5152741f2a6fdb8 | [
"Apache-2.0"
] | null | null | null | speech/utils/textgrid.py | dzubke/speech-lite | 65f83ac2b7551650820f079ce5152741f2a6fdb8 | [
"Apache-2.0"
] | null | null | null | # Natural Language Toolkit: TextGrid analysis
#
# Copyright (C) 2001-2011 NLTK Project
# Author: Margaret Mitchell <[email protected]>
# Steven Bird <[email protected]> (revisions)
# URL: <http://www.nltk.org>
# For license information, see LICENSE.TXT
#
"""
Tools for reading TextGrid files, the format used by Praat.
Module contents
===============
The textgrid corpus reader provides 4 data items and 1 function
for each textgrid file. For each tier in the file, the reader
provides 10 data items and 2 functions.
For the full textgrid file:
- size
The number of tiers in the file.
- xmin
First marked time of the file.
- xmax
Last marked time of the file.
- t_time
xmax - xmin.
- text_type
The style of TextGrid format:
- ooTextFile: Organized by tier.
- ChronTextFile: Organized by time.
- OldooTextFile: Similar to ooTextFile.
- to_chron()
Convert given file to a ChronTextFile format.
- to_oo()
Convert given file to an ooTextFile format.
For each tier:
- text_type
The style of TextGrid format, as above.
- classid
The style of transcription on this tier:
- IntervalTier: Transcription is marked as intervals.
- TextTier: Transcription is marked as single points.
- nameid
The name of the tier.
- xmin
First marked time of the tier.
- xmax
Last marked time of the tier.
- size
Number of entries in the tier.
- transcript
The raw transcript for the tier.
- simple_transcript
The transcript formatted as a list of tuples: (time1, time2, utterance).
- tier_info
List of (classid, nameid, xmin, xmax, size, transcript).
- min_max()
A tuple of (xmin, xmax).
- time(non_speech_marker)
Returns the utterance time of a given tier.
Excludes entries that begin with a non-speech marker.
"""
# needs more cleanup, subclassing, epydoc docstrings
import sys
import re
TEXTTIER = "TextTier"
INTERVALTIER = "IntervalTier"
OOTEXTFILE = re.compile(r"""(?x)
xmin\ =\ (.*)[\r\n]+
xmax\ =\ (.*)[\r\n]+
[\s\S]+?size\ =\ (.*)[\r\n]+
""")
CHRONTEXTFILE = re.compile(r"""(?x)
[\r\n]+(\S+)\
(\S+)\ +!\ Time\ domain.\ *[\r\n]+
(\S+)\ +!\ Number\ of\ tiers.\ *[\r\n]+"
""")
OLDOOTEXTFILE = re.compile(r"""(?x)
[\r\n]+(\S+)
[\r\n]+(\S+)
[\r\n]+.+[\r\n]+(\S+)
""")
#################################################################
# TextGrid Class
#################################################################
class TextGrid(object):
"""
Class to manipulate the TextGrid format used by Praat.
Separates each tier within this file into its own Tier
object. Each TextGrid object has
a number of tiers (size), xmin, xmax, a text type to help
with the different styles of TextGrid format, and tiers with their
own attributes.
"""
def __init__(self, read_file, config:dict=None):
"""
Takes open read file as input, initializes attributes
of the TextGrid file.
@type read_file: An open TextGrid file, mode "r".
@arg config (dict): dict configuration file to create a TextGrid object
@param size: Number of tiers.
@param xmin: xmin.
@param xmax: xmax.
@param t_time: Total time of TextGrid file.
@param text_type: TextGrid format.
@type tiers: A list of tier objects.
"""
# default creation of textgrid object
if config is None:
self.read_file = read_file
self.size = 0
self.xmin = 0
self.xmax = 0
self.t_time = 0
self.text_type = self._check_type()
self.tiers = self._find_tiers()
# creating a textgrid from a dict config
else:
self.read_file = None
self.size = config['size']
self.xmin = config['xmin']
self.xmax = config['xmax']
self.t_time = config['t_time']
def __iter__(self):
for tier in self.tiers:
yield tier
def next(self):
if self.idx == (self.size - 1):
raise StopIteration
self.idx += 1
return self.tiers[self.idx]
@staticmethod
def load(file):
"""
@param file: a file in TextGrid format
"""
return TextGrid(open(file).read())
def _load_tiers(self, header):
"""
Iterates over each tier and grabs tier information.
"""
tiers = []
if self.text_type == "ChronTextFile":
m = re.compile(header)
tier_headers = m.findall(self.read_file)
tier_re = " \d+.?\d* \d+.?\d*[\r\n]+\"[^\"]*\""
for i in range(0, self.size):
tier_info = [tier_headers[i]] + \
re.findall(str(i + 1) + tier_re, self.read_file)
tier_info = "\n".join(tier_info)
tiers.append(Tier(tier_info, self.text_type, self.t_time))
return tiers
tier_re = header + "[\s\S]+?(?=" + header + "|$$)"
m = re.compile(tier_re)
tier_iter = m.finditer(self.read_file)
for iterator in tier_iter:
(begin, end) = iterator.span()
tier_info = self.read_file[begin:end]
tiers.append(Tier(tier_info, self.text_type, self.t_time))
return tiers
def _check_type(self):
"""
Figures out the TextGrid format.
"""
m = re.match("(.*)[\r\n](.*)[\r\n](.*)[\r\n](.*)", self.read_file)
try:
type_id = m.group(1).strip()
except AttributeError:
raise TypeError("Cannot read file -- try TextGrid.load()")
xmin = m.group(4)
if type_id == "File type = \"ooTextFile\"":
if "xmin" not in xmin:
text_type = "OldooTextFile"
else:
text_type = "ooTextFile"
elif type_id == "\"Praat chronological TextGrid text file\"":
text_type = "ChronTextFile"
else:
raise TypeError("Unknown format '(%s)'", (type_id))
return text_type
def _find_tiers(self):
"""
Splits the textgrid file into substrings corresponding to tiers.
"""
if self.text_type == "ooTextFile":
m = OOTEXTFILE
header = "\\t+item \["
elif self.text_type == "ChronTextFile":
m = CHRONTEXTFILE
header = "\"\S+\" \".*\" \d+\.?\d* \d+\.?\d*"
elif self.text_type == "OldooTextFile":
m = OLDOOTEXTFILE
header = "\".*\"[\r\n]+\".*\""
file_info = m.findall(self.read_file)[0]
self.xmin = float(file_info[0])
self.xmax = float(file_info[1])
self.t_time = self.xmax - self.xmin
self.size = int(file_info[2])
tiers = self._load_tiers(header)
return tiers
def to_chron(self):
"""
@return: String in Chronological TextGrid file format.
"""
chron_file = ""
chron_file += "\"Praat chronological TextGrid text file\"\n"
chron_file += str(self.xmin) + " " + str(self.xmax)
chron_file += " ! Time domain.\n"
chron_file += str(self.size) + " ! Number of tiers.\n"
for tier in self.tiers:
idx = (self.tiers.index(tier)) + 1
tier_header = "\"" + tier.classid + "\" \"" \
+ tier.nameid + "\" " + str(tier.xmin) \
+ " " + str(tier.xmax)
chron_file += tier_header + "\n"
transcript = tier.simple_transcript
for (xmin, xmax, utt) in transcript:
chron_file += str(idx) + " " + str(xmin)
chron_file += " " + str(xmax) +"\n"
chron_file += "\"" + utt + "\"\n"
return chron_file
def to_oo(self):
"""
@return: A string in OoTextGrid file format.
"""
oo_file = ""
oo_file += "File type = \"ooTextFile\"\n"
oo_file += "Object class = \"TextGrid\"\n\n"
oo_file += "xmin = ", self.xmin, "\n"
oo_file += "xmax = ", self.xmax, "\n"
oo_file += "tiers? <exists>\n"
oo_file += "size = ", self.size, "\n"
oo_file += "item []:\n"
for i in range(len(self.tiers)):
oo_file += "%4s%s [%s]" % ("", "item", i + 1)
_curr_tier = self.tiers[i]
for (x, y) in _curr_tier.header:
oo_file += "%8s%s = \"%s\"" % ("", x, y)
if _curr_tier.classid != TEXTTIER:
for (xmin, xmax, text) in _curr_tier.simple_transcript:
oo_file += "%12s%s = %s" % ("", "xmin", xmin)
oo_file += "%12s%s = %s" % ("", "xmax", xmax)
oo_file += "%12s%s = \"%s\"" % ("", "text", text)
else:
for (time, mark) in _curr_tier.simple_transcript:
oo_file += "%12s%s = %s" % ("", "time", time)
oo_file += "%12s%s = %s" % ("", "mark", mark)
return oo_file
#################################################################
# Tier Class
#################################################################
class Tier(object):
"""
A container for each tier.
"""
def __init__(self, tier, text_type, t_time, config=None):
"""
Initializes attributes of the tier: class, name, xmin, xmax
size, transcript, total time.
Utilizes text_type to guide how to parse the file.
@type tier: a tier object; single item in the TextGrid list.
@param text_type: TextGrid format
@param t_time: Total time of TextGrid file.
@param classid: Type of tier (point or interval).
@param nameid: Name of tier.
@param xmin: xmin of the tier.
@param xmax: xmax of the tier.
@param size: Number of entries in the tier
@param transcript: The raw transcript for the tier.
@arg config (dict): a dictionary configuration of a tier object
"""
if config is None:
self.tier = tier
self.text_type = text_type
self.t_time = t_time
self.classid = ""
self.nameid = ""
self.xmin = 0
self.xmax = 0
self.size = 0
self.transcript = ""
self.tier_info = ""
self._make_info()
self.simple_transcript = self.make_simple_transcript()
if self.classid != TEXTTIER:
self.mark_type = "intervals"
else:
self.mark_type = "points"
self.header = [("class", self.classid), ("name", self.nameid), \
("xmin", self.xmin), ("xmax", self.xmax), ("size", self.size)]
else:
pass
def __iter__(self):
return self
def _make_info(self):
"""
Figures out most attributes of the tier object:
class, name, xmin, xmax, transcript.
"""
trans = "([\S\s]*)"
if self.text_type == "ChronTextFile":
classid = "\"(.*)\" +"
nameid = "\"(.*)\" +"
xmin = "(\d+\.?\d*) +"
xmax = "(\d+\.?\d*) *[\r\n]+"
# No size values are given in the Chronological Text File format.
self.size = None
size = ""
elif self.text_type == "ooTextFile":
classid = "\t+class = \"(.*)\" *[\r\n]+"
nameid = "\t+name = \"(.*)\" *[\r\n]+"
xmin = "\t+xmin = (\d+\.?\d*) *[\r\n]+"
xmax = "\t+xmax = (\d+\.?\d*) *[\r\n]+"
size = "\t+\S+: size = (\d+) *[\r\n]+"
elif self.text_type == "OldooTextFile":
classid = "\"(.*)\" *[\r\n]+"
nameid = "\"(.*)\" *[\r\n]+"
xmin = "(\d+\.?\d*) *[\r\n]+"
xmax = "(\d+\.?\d*) *[\r\n]+"
size = "(\d+) *[\r\n]+"
m = re.compile(classid + nameid + xmin + xmax + size + trans)
self.tier_info = m.findall(self.tier)[0]
self.classid = self.tier_info[0]
self.nameid = self.tier_info[1]
self.xmin = float(self.tier_info[2])
self.xmax = float(self.tier_info[3])
if self.size != None:
self.size = int(self.tier_info[4])
self.transcript = self.tier_info[-1]
def make_simple_transcript(self):
"""
@return: Transcript of the tier, in form [(start_time end_time label)]
"""
if self.text_type == "ChronTextFile":
trans_head = ""
trans_xmin = " (\S+)"
trans_xmax = " (\S+)[\r\n]+"
trans_text = "\"([\S\s]*?)\""
elif self.text_type == "ooTextFile":
trans_head = "\\t+\S+ \[\d+\]: *[\r\n]+"
trans_xmin = "\\t+\S+ = (\S+) *[\r\n]+"
trans_xmax = "\\t+\S+ = (\S+) *[\r\n]+"
trans_text = "\\t+\S+ = \"([^\"]*?)\""
elif self.text_type == "OldooTextFile":
trans_head = ""
trans_xmin = "(.*)[\r\n]+"
trans_xmax = "(.*)[\r\n]+"
trans_text = "\"([\S\s]*?)\""
if self.classid == TEXTTIER:
trans_xmin = ""
trans_m = re.compile(trans_head + trans_xmin + trans_xmax + trans_text)
self.simple_transcript = trans_m.findall(self.transcript)
return self.simple_transcript
def transcript(self):
"""
@return: Transcript of the tier, as it appears in the file.
"""
return self.transcript
def time(self, non_speech_char="."):
"""
@return: Utterance time of a given tier.
Screens out entries that begin with a non-speech marker.
"""
total = 0.0
if self.classid != TEXTTIER:
for (time1, time2, utt) in self.simple_transcript:
utt = utt.strip()
if utt and not utt[0] == ".":
total += (float(time2) - float(time1))
return total
def tier_name(self):
"""
@return: Tier name of a given tier.
"""
return self.nameid
def classid(self):
"""
@return: Type of transcription on tier.
"""
return self.classid
def min_max(self):
"""
@return: (xmin, xmax) tuple for a given tier.
"""
return (self.xmin, self.xmax)
def __repr__(self):
return "<%s \"%s\" (%.2f, %.2f) %.2f%%>" % (self.classid, self.nameid, self.xmin, self.xmax, 100*self.time()/self.t_time)
def __str__(self):
return self.__repr__() + "\n " + "\n ".join(" ".join(row) for row in self.simple_transcript)
def demo_TextGrid(demo_data):
print("** Demo of the TextGrid class. **")
fid = TextGrid(demo_data)
print("Tiers: %s" % (fid.size))
for i, tier in enumerate(fid):
print("\n***")
print("Tier: %s" % (i + 1))
print(tier)
def demo():
# Each demo demonstrates different TextGrid formats.
print("Format 1")
demo_TextGrid(demo_data1)
print("\nFormat 2")
demo_TextGrid(demo_data2)
print("\nFormat 3")
demo_TextGrid(demo_data3)
demo_data1 = """File type = "ooTextFile"
Object class = "TextGrid"
xmin = 0
xmax = 2045.144149659864
tiers? <exists>
size = 3
item []:
item [1]:
class = "IntervalTier"
name = "utterances"
xmin = 0
xmax = 2045.144149659864
intervals: size = 5
intervals [1]:
xmin = 0
xmax = 2041.4217474125382
text = ""
intervals [2]:
xmin = 2041.4217474125382
xmax = 2041.968276643991
text = "this"
intervals [3]:
xmin = 2041.968276643991
xmax = 2042.5281632653062
text = "is"
intervals [4]:
xmin = 2042.5281632653062
xmax = 2044.0487352585324
text = "a"
intervals [5]:
xmin = 2044.0487352585324
xmax = 2045.144149659864
text = "demo"
item [2]:
class = "TextTier"
name = "notes"
xmin = 0
xmax = 2045.144149659864
points: size = 3
points [1]:
time = 2041.4217474125382
mark = ".begin_demo"
points [2]:
time = 2043.8338291031832
mark = "voice gets quiet here"
points [3]:
time = 2045.144149659864
mark = ".end_demo"
item [3]:
class = "IntervalTier"
name = "phones"
xmin = 0
xmax = 2045.144149659864
intervals: size = 12
intervals [1]:
xmin = 0
xmax = 2041.4217474125382
text = ""
intervals [2]:
xmin = 2041.4217474125382
xmax = 2041.5438290324326
text = "D"
intervals [3]:
xmin = 2041.5438290324326
xmax = 2041.7321032910372
text = "I"
intervals [4]:
xmin = 2041.7321032910372
xmax = 2041.968276643991
text = "s"
intervals [5]:
xmin = 2041.968276643991
xmax = 2042.232189031843
text = "I"
intervals [6]:
xmin = 2042.232189031843
xmax = 2042.5281632653062
text = "z"
intervals [7]:
xmin = 2042.5281632653062
xmax = 2044.0487352585324
text = "eI"
intervals [8]:
xmin = 2044.0487352585324
xmax = 2044.2487352585324
text = "dc"
intervals [9]:
xmin = 2044.2487352585324
xmax = 2044.3102321849011
text = "d"
intervals [10]:
xmin = 2044.3102321849011
xmax = 2044.5748932104329
text = "E"
intervals [11]:
xmin = 2044.5748932104329
xmax = 2044.8329108578437
text = "m"
intervals [12]:
xmin = 2044.8329108578437
xmax = 2045.144149659864
text = "oU"
"""
demo_data2 = """File type = "ooTextFile"
Object class = "TextGrid"
0
2.8
<exists>
2
"IntervalTier"
"utterances"
0
2.8
3
0
1.6229213249309031
""
1.6229213249309031
2.341428074708195
"demo"
2.341428074708195
2.8
""
"IntervalTier"
"phones"
0
2.8
6
0
1.6229213249309031
""
1.6229213249309031
1.6428291382019483
"dc"
1.6428291382019483
1.65372183721983721
"d"
1.65372183721983721
1.94372874328943728
"E"
1.94372874328943728
2.13821938291038210
"m"
2.13821938291038210
2.341428074708195
"oU"
2.341428074708195
2.8
""
"""
demo_data3 = """"Praat chronological TextGrid text file"
0 2.8 ! Time domain.
2 ! Number of tiers.
"IntervalTier" "utterances" 0 2.8
"IntervalTier" "utterances" 0 2.8
1 0 1.6229213249309031
""
2 0 1.6229213249309031
""
2 1.6229213249309031 1.6428291382019483
"dc"
2 1.6428291382019483 1.65372183721983721
"d"
2 1.65372183721983721 1.94372874328943728
"E"
2 1.94372874328943728 2.13821938291038210
"m"
2 2.13821938291038210 2.341428074708195
"oU"
1 1.6229213249309031 2.341428074708195
"demo"
1 2.341428074708195 2.8
""
2 2.341428074708195 2.8
""
"""
if __name__ == "__main__":
demo()
| 30.104037 | 129 | 0.516429 |
import sys
import re
TEXTTIER = "TextTier"
INTERVALTIER = "IntervalTier"
OOTEXTFILE = re.compile(r"""(?x)
xmin\ =\ (.*)[\r\n]+
xmax\ =\ (.*)[\r\n]+
[\s\S]+?size\ =\ (.*)[\r\n]+
""")
CHRONTEXTFILE = re.compile(r"""(?x)
[\r\n]+(\S+)\
(\S+)\ +!\ Time\ domain.\ *[\r\n]+
(\S+)\ +!\ Number\ of\ tiers.\ *[\r\n]+"
""")
OLDOOTEXTFILE = re.compile(r"""(?x)
[\r\n]+(\S+)
[\r\n]+(\S+)
[\r\n]+.+[\r\n]+(\S+)
""")
#################################################################
# TextGrid Class
#################################################################
class TextGrid(object):
def __init__(self, read_file, config:dict=None):
# default creation of textgrid object
if config is None:
self.read_file = read_file
self.size = 0
self.xmin = 0
self.xmax = 0
self.t_time = 0
self.text_type = self._check_type()
self.tiers = self._find_tiers()
# creating a textgrid from a dict config
else:
self.read_file = None
self.size = config['size']
self.xmin = config['xmin']
self.xmax = config['xmax']
self.t_time = config['t_time']
def __iter__(self):
for tier in self.tiers:
yield tier
def next(self):
if self.idx == (self.size - 1):
raise StopIteration
self.idx += 1
return self.tiers[self.idx]
@staticmethod
def load(file):
return TextGrid(open(file).read())
def _load_tiers(self, header):
tiers = []
if self.text_type == "ChronTextFile":
m = re.compile(header)
tier_headers = m.findall(self.read_file)
tier_re = " \d+.?\d* \d+.?\d*[\r\n]+\"[^\"]*\""
for i in range(0, self.size):
tier_info = [tier_headers[i]] + \
re.findall(str(i + 1) + tier_re, self.read_file)
tier_info = "\n".join(tier_info)
tiers.append(Tier(tier_info, self.text_type, self.t_time))
return tiers
tier_re = header + "[\s\S]+?(?=" + header + "|$$)"
m = re.compile(tier_re)
tier_iter = m.finditer(self.read_file)
for iterator in tier_iter:
(begin, end) = iterator.span()
tier_info = self.read_file[begin:end]
tiers.append(Tier(tier_info, self.text_type, self.t_time))
return tiers
def _check_type(self):
m = re.match("(.*)[\r\n](.*)[\r\n](.*)[\r\n](.*)", self.read_file)
try:
type_id = m.group(1).strip()
except AttributeError:
raise TypeError("Cannot read file -- try TextGrid.load()")
xmin = m.group(4)
if type_id == "File type = \"ooTextFile\"":
if "xmin" not in xmin:
text_type = "OldooTextFile"
else:
text_type = "ooTextFile"
elif type_id == "\"Praat chronological TextGrid text file\"":
text_type = "ChronTextFile"
else:
raise TypeError("Unknown format '(%s)'", (type_id))
return text_type
def _find_tiers(self):
if self.text_type == "ooTextFile":
m = OOTEXTFILE
header = "\\t+item \["
elif self.text_type == "ChronTextFile":
m = CHRONTEXTFILE
header = "\"\S+\" \".*\" \d+\.?\d* \d+\.?\d*"
elif self.text_type == "OldooTextFile":
m = OLDOOTEXTFILE
header = "\".*\"[\r\n]+\".*\""
file_info = m.findall(self.read_file)[0]
self.xmin = float(file_info[0])
self.xmax = float(file_info[1])
self.t_time = self.xmax - self.xmin
self.size = int(file_info[2])
tiers = self._load_tiers(header)
return tiers
def to_chron(self):
chron_file = ""
chron_file += "\"Praat chronological TextGrid text file\"\n"
chron_file += str(self.xmin) + " " + str(self.xmax)
chron_file += " ! Time domain.\n"
chron_file += str(self.size) + " ! Number of tiers.\n"
for tier in self.tiers:
idx = (self.tiers.index(tier)) + 1
tier_header = "\"" + tier.classid + "\" \"" \
+ tier.nameid + "\" " + str(tier.xmin) \
+ " " + str(tier.xmax)
chron_file += tier_header + "\n"
transcript = tier.simple_transcript
for (xmin, xmax, utt) in transcript:
chron_file += str(idx) + " " + str(xmin)
chron_file += " " + str(xmax) +"\n"
chron_file += "\"" + utt + "\"\n"
return chron_file
def to_oo(self):
oo_file = ""
oo_file += "File type = \"ooTextFile\"\n"
oo_file += "Object class = \"TextGrid\"\n\n"
oo_file += "xmin = ", self.xmin, "\n"
oo_file += "xmax = ", self.xmax, "\n"
oo_file += "tiers? <exists>\n"
oo_file += "size = ", self.size, "\n"
oo_file += "item []:\n"
for i in range(len(self.tiers)):
oo_file += "%4s%s [%s]" % ("", "item", i + 1)
_curr_tier = self.tiers[i]
for (x, y) in _curr_tier.header:
oo_file += "%8s%s = \"%s\"" % ("", x, y)
if _curr_tier.classid != TEXTTIER:
for (xmin, xmax, text) in _curr_tier.simple_transcript:
oo_file += "%12s%s = %s" % ("", "xmin", xmin)
oo_file += "%12s%s = %s" % ("", "xmax", xmax)
oo_file += "%12s%s = \"%s\"" % ("", "text", text)
else:
for (time, mark) in _curr_tier.simple_transcript:
oo_file += "%12s%s = %s" % ("", "time", time)
oo_file += "%12s%s = %s" % ("", "mark", mark)
return oo_file
class Tier(object):
def __init__(self, tier, text_type, t_time, config=None):
if config is None:
self.tier = tier
self.text_type = text_type
self.t_time = t_time
self.classid = ""
self.nameid = ""
self.xmin = 0
self.xmax = 0
self.size = 0
self.transcript = ""
self.tier_info = ""
self._make_info()
self.simple_transcript = self.make_simple_transcript()
if self.classid != TEXTTIER:
self.mark_type = "intervals"
else:
self.mark_type = "points"
self.header = [("class", self.classid), ("name", self.nameid), \
("xmin", self.xmin), ("xmax", self.xmax), ("size", self.size)]
else:
pass
def __iter__(self):
return self
def _make_info(self):
trans = "([\S\s]*)"
if self.text_type == "ChronTextFile":
classid = "\"(.*)\" +"
nameid = "\"(.*)\" +"
xmin = "(\d+\.?\d*) +"
xmax = "(\d+\.?\d*) *[\r\n]+"
self.size = None
size = ""
elif self.text_type == "ooTextFile":
classid = "\t+class = \"(.*)\" *[\r\n]+"
nameid = "\t+name = \"(.*)\" *[\r\n]+"
xmin = "\t+xmin = (\d+\.?\d*) *[\r\n]+"
xmax = "\t+xmax = (\d+\.?\d*) *[\r\n]+"
size = "\t+\S+: size = (\d+) *[\r\n]+"
elif self.text_type == "OldooTextFile":
classid = "\"(.*)\" *[\r\n]+"
nameid = "\"(.*)\" *[\r\n]+"
xmin = "(\d+\.?\d*) *[\r\n]+"
xmax = "(\d+\.?\d*) *[\r\n]+"
size = "(\d+) *[\r\n]+"
m = re.compile(classid + nameid + xmin + xmax + size + trans)
self.tier_info = m.findall(self.tier)[0]
self.classid = self.tier_info[0]
self.nameid = self.tier_info[1]
self.xmin = float(self.tier_info[2])
self.xmax = float(self.tier_info[3])
if self.size != None:
self.size = int(self.tier_info[4])
self.transcript = self.tier_info[-1]
def make_simple_transcript(self):
if self.text_type == "ChronTextFile":
trans_head = ""
trans_xmin = " (\S+)"
trans_xmax = " (\S+)[\r\n]+"
trans_text = "\"([\S\s]*?)\""
elif self.text_type == "ooTextFile":
trans_head = "\\t+\S+ \[\d+\]: *[\r\n]+"
trans_xmin = "\\t+\S+ = (\S+) *[\r\n]+"
trans_xmax = "\\t+\S+ = (\S+) *[\r\n]+"
trans_text = "\\t+\S+ = \"([^\"]*?)\""
elif self.text_type == "OldooTextFile":
trans_head = ""
trans_xmin = "(.*)[\r\n]+"
trans_xmax = "(.*)[\r\n]+"
trans_text = "\"([\S\s]*?)\""
if self.classid == TEXTTIER:
trans_xmin = ""
trans_m = re.compile(trans_head + trans_xmin + trans_xmax + trans_text)
self.simple_transcript = trans_m.findall(self.transcript)
return self.simple_transcript
def transcript(self):
return self.transcript
def time(self, non_speech_char="."):
total = 0.0
if self.classid != TEXTTIER:
for (time1, time2, utt) in self.simple_transcript:
utt = utt.strip()
if utt and not utt[0] == ".":
total += (float(time2) - float(time1))
return total
def tier_name(self):
return self.nameid
def classid(self):
return self.classid
def min_max(self):
return (self.xmin, self.xmax)
def __repr__(self):
return "<%s \"%s\" (%.2f, %.2f) %.2f%%>" % (self.classid, self.nameid, self.xmin, self.xmax, 100*self.time()/self.t_time)
def __str__(self):
return self.__repr__() + "\n " + "\n ".join(" ".join(row) for row in self.simple_transcript)
def demo_TextGrid(demo_data):
print("** Demo of the TextGrid class. **")
fid = TextGrid(demo_data)
print("Tiers: %s" % (fid.size))
for i, tier in enumerate(fid):
print("\n***")
print("Tier: %s" % (i + 1))
print(tier)
def demo():
# Each demo demonstrates different TextGrid formats.
print("Format 1")
demo_TextGrid(demo_data1)
print("\nFormat 2")
demo_TextGrid(demo_data2)
print("\nFormat 3")
demo_TextGrid(demo_data3)
demo_data1 = """File type = "ooTextFile"
Object class = "TextGrid"
xmin = 0
xmax = 2045.144149659864
tiers? <exists>
size = 3
item []:
item [1]:
class = "IntervalTier"
name = "utterances"
xmin = 0
xmax = 2045.144149659864
intervals: size = 5
intervals [1]:
xmin = 0
xmax = 2041.4217474125382
text = ""
intervals [2]:
xmin = 2041.4217474125382
xmax = 2041.968276643991
text = "this"
intervals [3]:
xmin = 2041.968276643991
xmax = 2042.5281632653062
text = "is"
intervals [4]:
xmin = 2042.5281632653062
xmax = 2044.0487352585324
text = "a"
intervals [5]:
xmin = 2044.0487352585324
xmax = 2045.144149659864
text = "demo"
item [2]:
class = "TextTier"
name = "notes"
xmin = 0
xmax = 2045.144149659864
points: size = 3
points [1]:
time = 2041.4217474125382
mark = ".begin_demo"
points [2]:
time = 2043.8338291031832
mark = "voice gets quiet here"
points [3]:
time = 2045.144149659864
mark = ".end_demo"
item [3]:
class = "IntervalTier"
name = "phones"
xmin = 0
xmax = 2045.144149659864
intervals: size = 12
intervals [1]:
xmin = 0
xmax = 2041.4217474125382
text = ""
intervals [2]:
xmin = 2041.4217474125382
xmax = 2041.5438290324326
text = "D"
intervals [3]:
xmin = 2041.5438290324326
xmax = 2041.7321032910372
text = "I"
intervals [4]:
xmin = 2041.7321032910372
xmax = 2041.968276643991
text = "s"
intervals [5]:
xmin = 2041.968276643991
xmax = 2042.232189031843
text = "I"
intervals [6]:
xmin = 2042.232189031843
xmax = 2042.5281632653062
text = "z"
intervals [7]:
xmin = 2042.5281632653062
xmax = 2044.0487352585324
text = "eI"
intervals [8]:
xmin = 2044.0487352585324
xmax = 2044.2487352585324
text = "dc"
intervals [9]:
xmin = 2044.2487352585324
xmax = 2044.3102321849011
text = "d"
intervals [10]:
xmin = 2044.3102321849011
xmax = 2044.5748932104329
text = "E"
intervals [11]:
xmin = 2044.5748932104329
xmax = 2044.8329108578437
text = "m"
intervals [12]:
xmin = 2044.8329108578437
xmax = 2045.144149659864
text = "oU"
"""
demo_data2 = """File type = "ooTextFile"
Object class = "TextGrid"
0
2.8
<exists>
2
"IntervalTier"
"utterances"
0
2.8
3
0
1.6229213249309031
""
1.6229213249309031
2.341428074708195
"demo"
2.341428074708195
2.8
""
"IntervalTier"
"phones"
0
2.8
6
0
1.6229213249309031
""
1.6229213249309031
1.6428291382019483
"dc"
1.6428291382019483
1.65372183721983721
"d"
1.65372183721983721
1.94372874328943728
"E"
1.94372874328943728
2.13821938291038210
"m"
2.13821938291038210
2.341428074708195
"oU"
2.341428074708195
2.8
""
"""
demo_data3 = """"Praat chronological TextGrid text file"
0 2.8 ! Time domain.
2 ! Number of tiers.
"IntervalTier" "utterances" 0 2.8
"IntervalTier" "utterances" 0 2.8
1 0 1.6229213249309031
""
2 0 1.6229213249309031
""
2 1.6229213249309031 1.6428291382019483
"dc"
2 1.6428291382019483 1.65372183721983721
"d"
2 1.65372183721983721 1.94372874328943728
"E"
2 1.94372874328943728 2.13821938291038210
"m"
2 2.13821938291038210 2.341428074708195
"oU"
1 1.6229213249309031 2.341428074708195
"demo"
1 2.341428074708195 2.8
""
2 2.341428074708195 2.8
""
"""
if __name__ == "__main__":
demo()
| true | true |
1c49fb41d87bacb2885b8521921dc64905ec5e5d | 25,423 | py | Python | utils/data_generator.py | qiuqiangkong/dcase2019_task2 | 62575c8cdd4723cfdf497b290b6dddcce316c60b | [
"MIT"
] | 36 | 2019-04-13T02:04:04.000Z | 2020-10-27T15:54:24.000Z | utils/data_generator.py | qiuqiangkong/dcase2019_task2 | 62575c8cdd4723cfdf497b290b6dddcce316c60b | [
"MIT"
] | 2 | 2019-04-14T08:08:26.000Z | 2019-04-18T19:29:38.000Z | utils/data_generator.py | qiuqiangkong/dcase2019_task2 | 62575c8cdd4723cfdf497b290b6dddcce316c60b | [
"MIT"
] | 16 | 2019-04-13T23:01:32.000Z | 2021-01-10T05:20:15.000Z | import numpy as np
import h5py
import csv
import time
import logging
import os
import glob
import matplotlib.pyplot as plt
import logging
import pandas as pd
from utilities import scale
import config
class Base(object):
def __init__(self):
'''Base class for train, validate and test data generator.
'''
pass
def load_hdf5(self, hdf5_path, cross_validation_path):
'''Load hdf5 file.
Args:
hdf5_path: string, path of hdf5 file
cross_validation_path, string | 'none', path of cross validation csv
file
Returns:
data_dict: {'audio_name': (audios_num,),
'feature': (dataset_total_frames, mel_bins),
'begin_index': (audios_num,),
'end_index': (audios_num,),
(if exist) 'target': (audios_num, classes_num),
(if exist) 'fold': (audios_num,)}
'''
data_dict = {}
with h5py.File(hdf5_path, 'r') as hf:
data_dict['audio_name'] = np.array(
[audio_name.decode() for audio_name in hf['audio_name'][:]])
data_dict['feature'] = hf['feature'][:].astype(np.float32)
data_dict['begin_index'] = hf['begin_index'][:].astype(np.int32)
data_dict['end_index'] = hf['end_index'][:].astype(np.int32)
if 'target' in hf.keys():
data_dict['target'] = hf['target'][:].astype(np.float32)
if cross_validation_path:
df = pd.read_csv(cross_validation_path, sep=',')
folds = []
for n, audio_name in enumerate(data_dict['audio_name']):
index = df.index[df['fname'] == audio_name][0]
folds.append(df['fold'][index])
data_dict['fold'] = np.array(folds)
return data_dict
def get_segment_metadata_dict(self, data_dict, audio_indexes,
segment_frames, hop_frames, source):
'''Get segments metadata for training or inference. Long audio
recordings are split to segments with the same duration. Each segment
inherit the label of the audio recording.
Args:
data_dict: {'audio_name': (audios_num,),
'feature': (dataset_total_frames, mel_bins),
'begin_index': (audios_num,),
'end_index': (audios_num,),
(if exist) 'target': (audios_num, classes_num),
(if exist) 'fold': (audios_num,)}
audio_indexes: (audios_num,)
segment_frames: int, frames number of a segment
hop_frames: int, hop frames between segments
source: 'curated' | 'noisy' | None
Returns:
segment_metadata_dict: {'audio_name': (segments_num,),
'begin_index': (segments_num,),
'end_index': (segments_num,),
(if exist) 'target': (segments_num, classes_num),
(if exist) 'source': (segments_num)}
'''
segment_metadata_dict = {'audio_name': [], 'begin_index': [],
'end_index': []}
has_target = 'target' in data_dict.keys()
if has_target:
segment_metadata_dict['target'] = []
if source:
segment_metadata_dict['source'] = []
for audio_index in audio_indexes:
audio_name = data_dict['audio_name'][audio_index]
begin_index = data_dict['begin_index'][audio_index]
end_index = data_dict['end_index'][audio_index]
if has_target:
target = data_dict['target'][audio_index]
else:
target = None
# If audio recording shorter than a segment
if end_index - begin_index < segment_frames:
segment_metadata_dict['begin_index'].append(begin_index)
segment_metadata_dict['end_index'].append(end_index)
self._append_to_meta_data(segment_metadata_dict, audio_name,
target, source)
# If audio recording longer than a segment then split
else:
shift = 0
while end_index - (begin_index + shift) > segment_frames:
segment_metadata_dict['begin_index'].append(
begin_index + shift)
segment_metadata_dict['end_index'].append(
begin_index + shift + segment_frames)
self._append_to_meta_data(segment_metadata_dict,
audio_name, target, source)
shift += hop_frames
# Append the last segment
segment_metadata_dict['begin_index'].append(
end_index - segment_frames)
segment_metadata_dict['end_index'].append(end_index)
self._append_to_meta_data(segment_metadata_dict, audio_name,
target, source)
for key in segment_metadata_dict.keys():
segment_metadata_dict[key] = np.array(segment_metadata_dict[key])
return segment_metadata_dict
def _append_to_meta_data(self, segment_metadata_dict, audio_name, target,
source):
'''Append audio_name, target, source to segment_metadata_dict.
'''
segment_metadata_dict['audio_name'].append(audio_name)
if target is not None:
segment_metadata_dict['target'].append(target)
if source is not None:
segment_metadata_dict['source'].append(source)
def get_feature_mask(self, data_dict, begin_index, end_index,
segment_frames, pad_type, logmel_eps):
'''Get logmel feature and mask of one segment.
Args:
data_dict: {'audio_name': (audios_num,),
'feature': (dataset_total_frames, mel_bins),
'begin_index': (audios_num,),
'end_index': (audios_num,),
(if exist) 'target': (audios_num, classes_num),
(if exist) 'fold': (audios_num,)}
begin_index: int, begin index of a segment
end_index: int, end index of a segment
segment_frames: int, frames number of a segment
pad_type: string, 'constant' | 'repeat'
logmel_eps: constant value to pad if pad_type == 'constant'
'''
this_segment_frames = end_index - begin_index
# If segment frames of this audio is fewer than the designed segment
# frames, then pad.
if this_segment_frames < segment_frames:
if pad_type == 'constant':
this_feature = self.pad_constant(
data_dict['feature'][begin_index : end_index],
segment_frames, logmel_eps)
elif pad_type == 'repeat':
this_feature = self.pad_repeat(
data_dict['feature'][begin_index : end_index],
segment_frames)
this_mask = np.zeros(segment_frames)
this_mask[0 : this_segment_frames] = 1
# If segment frames is equal to the designed segment frames, then load
# data without padding.
else:
this_feature = data_dict['feature'][begin_index : end_index]
this_mask = np.ones(self.segment_frames)
return this_feature, this_mask
def pad_constant(self, x, max_len, constant):
'''Pad matrix with constant.
Args:
x: (frames, mel_bins)
max_len: int, legnth to be padded
constant: float, value used for padding
'''
pad = constant * np.ones((max_len - x.shape[0], x.shape[1]))
padded_x = np.concatenate((x, pad), axis=0)
return padded_x
def pad_repeat(self, x, max_len):
'''Repeat matrix to a legnth.
Args:
x: (frames, mel_bins)
max_len: int, length to be padded
'''
repeat_num = int(max_len / x.shape[0]) + 1
repeated_x = np.tile(x, (repeat_num, 1))
repeated_x = repeated_x[0 : max_len]
return repeated_x
def transform(self, x):
'''Transform data.
'''
return scale(x, self.scalar['mean'], self.scalar['std'])
class DataGenerator(Base):
def __init__(self, curated_feature_hdf5_path, noisy_feature_hdf5_path,
curated_cross_validation_path, noisy_cross_validation_path, train_source,
holdout_fold, segment_seconds, hop_seconds, pad_type, scalar, batch_size,
seed=1234):
'''Data generator for training and validation.
Args:
curated_feature_hdf5_path: string, path of hdf5 file
noisy_feature_hdf5_path: string, path of hdf5 file
curated_cross_validation_path: path of cross validation csv file
noisy_cross_validation_path: path of cross validation csv file
train_source: 'curated' | 'noisy' | 'curated_and_noisy'
holdout_fold: '1', '2', '3', '4' | 'none', set `none` for training
on all data without validation
segment_seconds: float, duration of audio recordings to be padded or split
hop_seconds: float, hop seconds between segments
pad_type: 'constant' | 'repeat'
scalar: object, containing mean and std value
batch_size: int
seed: int
'''
self.scalar = scalar
self.batch_size = batch_size
self.random_state = np.random.RandomState(seed)
self.segment_frames = int(segment_seconds * config.frames_per_second)
self.hop_frames = int(hop_seconds * config.frames_per_second)
self.pad_type = pad_type
self.logmel_eps = config.logmel_eps
# Load training data
load_time = time.time()
self.curated_data_dict = self.load_hdf5(
curated_feature_hdf5_path, curated_cross_validation_path)
self.noisy_data_dict = self.load_hdf5(
noisy_feature_hdf5_path, noisy_cross_validation_path)
# Get train and validate audio indexes
(train_curated_audio_indexes, validate_curated_audio_indexes) = \
self.get_train_validate_audio_indexes(
self.curated_data_dict, holdout_fold)
(train_noisy_audio_indexes, validate_noisy_audio_indexes) = \
self.get_train_validate_audio_indexes(
self.noisy_data_dict, holdout_fold)
logging.info('Train curated audio num: {}'.format(
len(train_curated_audio_indexes)))
logging.info('Train noisy audio num: {}'.format(
len(train_noisy_audio_indexes)))
logging.info('Validate curated audio num: {}'.format(
len(validate_curated_audio_indexes)))
logging.info('Validate noisy audio num: {}'.format(
len(validate_noisy_audio_indexes)))
logging.info('Load data time: {:.3f} s'.format(time.time() - load_time))
# Get segment metadata for training
self.train_curated_segment_metadata_dict = \
self.get_segment_metadata_dict(
self.curated_data_dict, train_curated_audio_indexes,
self.segment_frames, self.hop_frames, 'curated')
self.train_noisy_segment_metadata_dict = self.get_segment_metadata_dict(
self.noisy_data_dict, train_noisy_audio_indexes,
self.segment_frames, self.hop_frames, 'noisy')
if train_source == 'curated':
self.train_segment_metadata_dict = \
self.train_curated_segment_metadata_dict
elif train_source == 'noisy':
self.train_segment_metadata_dict = \
self.train_noisy_segment_metadata_dict
elif train_source == 'curated_and_noisy':
self.train_segment_metadata_dict = \
self.combine_curated_noisy_metadata_dict(
self.train_curated_segment_metadata_dict,
self.train_noisy_segment_metadata_dict)
# Get segment metadata for validation
self.validate_curated_segment_metadata_dict = \
self.get_segment_metadata_dict(
self.curated_data_dict, validate_curated_audio_indexes,
self.segment_frames, self.hop_frames, 'curated')
self.validate_noisy_segment_metadata_dict = \
self.get_segment_metadata_dict(
self.noisy_data_dict, validate_noisy_audio_indexes,
self.segment_frames, self.hop_frames, 'noisy')
# Print data statistics
train_segments_num = len(self.train_segment_metadata_dict['audio_name'])
validate_curated_segments_num = len(
self.validate_curated_segment_metadata_dict['audio_name'])
validate_noisy_segments_num = len(
self.validate_noisy_segment_metadata_dict['audio_name'])
logging.info('')
logging.info('Total train segments num: {}'.format(train_segments_num))
logging.info('Validate curated segments num: {}'.format(
validate_curated_segments_num))
logging.info('Validate noisy segments num: {}'.format(
validate_noisy_segments_num))
self.train_segments_indexes = np.arange(train_segments_num)
self.random_state.shuffle(self.train_segments_indexes)
self.pointer = 0
def get_train_validate_audio_indexes(self, data_dict, holdout_fold):
'''Get train and validate audio indexes.
Args:
data_dict: {'audio_name': (audios_num,),
'feature': (dataset_total_frames, mel_bins),
'target': (audios_num, classes_num),
'begin_index': (audios_num,),
'end_index': (audios_num,),
(if exist) 'fold': (audios_num,)}
holdout_fold: 'none' | int, if 'none' then validate indexes are empty
Returns:
train_audio_indexes: (train_audios_num,)
validate_audio_indexes: (validate_audios_num)
'''
if holdout_fold == 'none':
train_audio_indexes = np.arange(len(data_dict['audio_name']))
validate_audio_indexes = np.array([])
else:
train_audio_indexes = np.where(
data_dict['fold'] != int(holdout_fold))[0]
validate_audio_indexes = np.where(
data_dict['fold'] == int(holdout_fold))[0]
return train_audio_indexes, validate_audio_indexes
def combine_curated_noisy_metadata_dict(self, curated_metadata_dict,
noisy_metadata_dict):
'''Combine curated and noisy segment metadata dict.
'''
combined_metadata_dict = {}
for key in curated_metadata_dict.keys():
combined_metadata_dict[key] = np.concatenate(
(curated_metadata_dict[key], noisy_metadata_dict[key]), axis=0)
return combined_metadata_dict
def generate_train(self):
'''Generate mini-batch data for training.
Returns:
batch_data_dict: {'audio_name': (batch_size,),
'feature': (batch_size, segment_frames, mel_bins),
'mask': (batch_size, segment_frames),
'target': (batch_size, classes_num),
'source': (batch_size,)}
'''
while True:
# Reset pointer
if self.pointer >= len(self.train_segments_indexes):
self.pointer = 0
self.random_state.shuffle(self.train_segments_indexes)
# Get batch segment indexes
batch_segment_indexes = self.train_segments_indexes[
self.pointer: self.pointer + self.batch_size]
self.pointer += self.batch_size
# Batch segment data
batch_audio_name = self.train_segment_metadata_dict\
['audio_name'][batch_segment_indexes]
batch_begin_index = self.train_segment_metadata_dict\
['begin_index'][batch_segment_indexes]
batch_end_index = self.train_segment_metadata_dict\
['end_index'][batch_segment_indexes]
batch_target = self.train_segment_metadata_dict\
['target'][batch_segment_indexes]
batch_source = self.train_segment_metadata_dict\
['source'][batch_segment_indexes]
batch_feature = []
batch_mask = []
# Get logmel segments one by one, pad the short segments
for n in range(len(batch_segment_indexes)):
if batch_source[n] == 'curated':
data_dict = self.curated_data_dict
elif batch_source[n] == 'noisy':
data_dict = self.noisy_data_dict
else:
raise Exception('Incorrect source type!')
(this_feature, this_mask) = self.get_feature_mask(
data_dict, batch_begin_index[n], batch_end_index[n],
self.segment_frames, self.pad_type, self.logmel_eps)
batch_feature.append(this_feature)
batch_mask.append(this_mask)
batch_feature = np.array(batch_feature)
batch_feature = self.transform(batch_feature)
batch_mask = np.array(batch_mask)
batch_data_dict = {
'audio_name': batch_audio_name,
'feature': batch_feature,
'mask': batch_mask,
'target': batch_target,
'source': batch_source}
yield batch_data_dict
def generate_validate(self, data_type, target_source, max_iteration=None):
'''Generate mini-batch data for validation.
Returns:
batch_data_dict: {'audio_name': (batch_size,),
'feature': (batch_size, segment_frames, mel_bins),
'mask': (batch_size, segment_frames),
'target': (batch_size, classes_num)}
'''
assert(data_type in ['train', 'validate'])
assert(target_source in ['curated', 'noisy'])
segment_metadata_dict = eval(
'self.{}_{}_segment_metadata_dict'.format(data_type, target_source))
data_dict = eval('self.{}_data_dict'.format(target_source))
segments_num = len(segment_metadata_dict['audio_name'])
segment_indexes = np.arange(segments_num)
iteration = 0
pointer = 0
while True:
if iteration == max_iteration:
break
# Reset pointer
if pointer >= segments_num:
break
# Get batch segment indexes
batch_segment_indexes = segment_indexes[
pointer: pointer + self.batch_size]
pointer += self.batch_size
iteration += 1
# Batch segment data
batch_audio_name = segment_metadata_dict\
['audio_name'][batch_segment_indexes]
batch_begin_index = segment_metadata_dict\
['begin_index'][batch_segment_indexes]
batch_end_index = segment_metadata_dict\
['end_index'][batch_segment_indexes]
batch_target = segment_metadata_dict\
['target'][batch_segment_indexes]
batch_feature = []
batch_mask = []
# Get logmel segments one by one, pad the short segments
for n in range(len(batch_segment_indexes)):
(this_feature, this_mask) = self.get_feature_mask(
data_dict, batch_begin_index[n], batch_end_index[n],
self.segment_frames, self.pad_type, self.logmel_eps)
batch_feature.append(this_feature)
batch_mask.append(this_mask)
batch_feature = np.array(batch_feature)
batch_feature = self.transform(batch_feature)
batch_mask = np.array(batch_mask)
batch_data_dict = {
'audio_name': batch_audio_name,
'feature': batch_feature,
'mask': batch_mask,
'target': batch_target}
yield batch_data_dict
class TestDataGenerator(Base):
def __init__(self, test_feature_hdf5_path, segment_seconds, hop_seconds,
pad_type, scalar, batch_size, seed=1234):
'''Data generator for testing.
Args:
test_feature_hdf5_path: string, path of hdf5 file
segment_seconds: float, duration of audio recordings to be padded or split
hop_seconds: float, hop seconds between segments
pad_type: 'constant' | 'repeat'
scalar: object, containing mean and std value
batch_size: int
seed: int
'''
self.scalar = scalar
self.batch_size = batch_size
self.random_state = np.random.RandomState(seed)
self.segment_frames = int(segment_seconds * config.frames_per_second)
self.hop_frames = int(hop_seconds * config.frames_per_second)
self.pad_type = pad_type
self.logmel_eps = config.logmel_eps
# Load testing data
self.test_data_dict = self.load_hdf5(
test_feature_hdf5_path, cross_validation_path=None)
audios_num = len(self.test_data_dict['audio_name'])
test_audio_indexes = np.arange(audios_num)
self.test_segment_metadata_dict = \
self.get_segment_metadata_dict(
self.test_data_dict, test_audio_indexes, self.segment_frames,
self.hop_frames, source=None)
def generate_test(self):
'''Generate mini-batch data for test.
Returns:
batch_data_dict: {'audio_name': (batch_size,),
'feature': (batch_size, segment_frames, mel_bins),
'mask': (batch_size, segment_frames)}
'''
segment_metadata_dict = self.test_segment_metadata_dict
data_dict = self.test_data_dict
segments_num = len(segment_metadata_dict['audio_name'])
segment_indexes = np.arange(segments_num)
iteration = 0
pointer = 0
while True:
# Reset pointer
if pointer >= segments_num:
break
# Get batch segment indexes
batch_segment_indexes = segment_indexes[
pointer: pointer + self.batch_size]
pointer += self.batch_size
iteration += 1
# Batch segment data
batch_audio_name = segment_metadata_dict\
['audio_name'][batch_segment_indexes]
batch_begin_index = segment_metadata_dict\
['begin_index'][batch_segment_indexes]
batch_end_index = segment_metadata_dict\
['end_index'][batch_segment_indexes]
batch_feature = []
batch_mask = []
# Get logmel segments one by one, pad the short segments
for n in range(len(batch_segment_indexes)):
(this_feature, this_mask) = self.get_feature_mask(
data_dict, batch_begin_index[n], batch_end_index[n],
self.segment_frames, self.pad_type, self.logmel_eps)
batch_feature.append(this_feature)
batch_mask.append(this_mask)
batch_feature = np.array(batch_feature)
batch_feature = self.transform(batch_feature)
batch_mask = np.array(batch_mask)
batch_data_dict = {
'audio_name': batch_audio_name,
'feature': batch_feature,
'mask': batch_mask}
yield batch_data_dict | 39.476708 | 84 | 0.556858 | import numpy as np
import h5py
import csv
import time
import logging
import os
import glob
import matplotlib.pyplot as plt
import logging
import pandas as pd
from utilities import scale
import config
class Base(object):
def __init__(self):
pass
def load_hdf5(self, hdf5_path, cross_validation_path):
data_dict = {}
with h5py.File(hdf5_path, 'r') as hf:
data_dict['audio_name'] = np.array(
[audio_name.decode() for audio_name in hf['audio_name'][:]])
data_dict['feature'] = hf['feature'][:].astype(np.float32)
data_dict['begin_index'] = hf['begin_index'][:].astype(np.int32)
data_dict['end_index'] = hf['end_index'][:].astype(np.int32)
if 'target' in hf.keys():
data_dict['target'] = hf['target'][:].astype(np.float32)
if cross_validation_path:
df = pd.read_csv(cross_validation_path, sep=',')
folds = []
for n, audio_name in enumerate(data_dict['audio_name']):
index = df.index[df['fname'] == audio_name][0]
folds.append(df['fold'][index])
data_dict['fold'] = np.array(folds)
return data_dict
def get_segment_metadata_dict(self, data_dict, audio_indexes,
segment_frames, hop_frames, source):
segment_metadata_dict = {'audio_name': [], 'begin_index': [],
'end_index': []}
has_target = 'target' in data_dict.keys()
if has_target:
segment_metadata_dict['target'] = []
if source:
segment_metadata_dict['source'] = []
for audio_index in audio_indexes:
audio_name = data_dict['audio_name'][audio_index]
begin_index = data_dict['begin_index'][audio_index]
end_index = data_dict['end_index'][audio_index]
if has_target:
target = data_dict['target'][audio_index]
else:
target = None
if end_index - begin_index < segment_frames:
segment_metadata_dict['begin_index'].append(begin_index)
segment_metadata_dict['end_index'].append(end_index)
self._append_to_meta_data(segment_metadata_dict, audio_name,
target, source)
else:
shift = 0
while end_index - (begin_index + shift) > segment_frames:
segment_metadata_dict['begin_index'].append(
begin_index + shift)
segment_metadata_dict['end_index'].append(
begin_index + shift + segment_frames)
self._append_to_meta_data(segment_metadata_dict,
audio_name, target, source)
shift += hop_frames
segment_metadata_dict['begin_index'].append(
end_index - segment_frames)
segment_metadata_dict['end_index'].append(end_index)
self._append_to_meta_data(segment_metadata_dict, audio_name,
target, source)
for key in segment_metadata_dict.keys():
segment_metadata_dict[key] = np.array(segment_metadata_dict[key])
return segment_metadata_dict
def _append_to_meta_data(self, segment_metadata_dict, audio_name, target,
source):
segment_metadata_dict['audio_name'].append(audio_name)
if target is not None:
segment_metadata_dict['target'].append(target)
if source is not None:
segment_metadata_dict['source'].append(source)
def get_feature_mask(self, data_dict, begin_index, end_index,
segment_frames, pad_type, logmel_eps):
this_segment_frames = end_index - begin_index
if this_segment_frames < segment_frames:
if pad_type == 'constant':
this_feature = self.pad_constant(
data_dict['feature'][begin_index : end_index],
segment_frames, logmel_eps)
elif pad_type == 'repeat':
this_feature = self.pad_repeat(
data_dict['feature'][begin_index : end_index],
segment_frames)
this_mask = np.zeros(segment_frames)
this_mask[0 : this_segment_frames] = 1
else:
this_feature = data_dict['feature'][begin_index : end_index]
this_mask = np.ones(self.segment_frames)
return this_feature, this_mask
def pad_constant(self, x, max_len, constant):
pad = constant * np.ones((max_len - x.shape[0], x.shape[1]))
padded_x = np.concatenate((x, pad), axis=0)
return padded_x
def pad_repeat(self, x, max_len):
repeat_num = int(max_len / x.shape[0]) + 1
repeated_x = np.tile(x, (repeat_num, 1))
repeated_x = repeated_x[0 : max_len]
return repeated_x
def transform(self, x):
return scale(x, self.scalar['mean'], self.scalar['std'])
class DataGenerator(Base):
def __init__(self, curated_feature_hdf5_path, noisy_feature_hdf5_path,
curated_cross_validation_path, noisy_cross_validation_path, train_source,
holdout_fold, segment_seconds, hop_seconds, pad_type, scalar, batch_size,
seed=1234):
self.scalar = scalar
self.batch_size = batch_size
self.random_state = np.random.RandomState(seed)
self.segment_frames = int(segment_seconds * config.frames_per_second)
self.hop_frames = int(hop_seconds * config.frames_per_second)
self.pad_type = pad_type
self.logmel_eps = config.logmel_eps
load_time = time.time()
self.curated_data_dict = self.load_hdf5(
curated_feature_hdf5_path, curated_cross_validation_path)
self.noisy_data_dict = self.load_hdf5(
noisy_feature_hdf5_path, noisy_cross_validation_path)
(train_curated_audio_indexes, validate_curated_audio_indexes) = \
self.get_train_validate_audio_indexes(
self.curated_data_dict, holdout_fold)
(train_noisy_audio_indexes, validate_noisy_audio_indexes) = \
self.get_train_validate_audio_indexes(
self.noisy_data_dict, holdout_fold)
logging.info('Train curated audio num: {}'.format(
len(train_curated_audio_indexes)))
logging.info('Train noisy audio num: {}'.format(
len(train_noisy_audio_indexes)))
logging.info('Validate curated audio num: {}'.format(
len(validate_curated_audio_indexes)))
logging.info('Validate noisy audio num: {}'.format(
len(validate_noisy_audio_indexes)))
logging.info('Load data time: {:.3f} s'.format(time.time() - load_time))
self.train_curated_segment_metadata_dict = \
self.get_segment_metadata_dict(
self.curated_data_dict, train_curated_audio_indexes,
self.segment_frames, self.hop_frames, 'curated')
self.train_noisy_segment_metadata_dict = self.get_segment_metadata_dict(
self.noisy_data_dict, train_noisy_audio_indexes,
self.segment_frames, self.hop_frames, 'noisy')
if train_source == 'curated':
self.train_segment_metadata_dict = \
self.train_curated_segment_metadata_dict
elif train_source == 'noisy':
self.train_segment_metadata_dict = \
self.train_noisy_segment_metadata_dict
elif train_source == 'curated_and_noisy':
self.train_segment_metadata_dict = \
self.combine_curated_noisy_metadata_dict(
self.train_curated_segment_metadata_dict,
self.train_noisy_segment_metadata_dict)
self.validate_curated_segment_metadata_dict = \
self.get_segment_metadata_dict(
self.curated_data_dict, validate_curated_audio_indexes,
self.segment_frames, self.hop_frames, 'curated')
self.validate_noisy_segment_metadata_dict = \
self.get_segment_metadata_dict(
self.noisy_data_dict, validate_noisy_audio_indexes,
self.segment_frames, self.hop_frames, 'noisy')
train_segments_num = len(self.train_segment_metadata_dict['audio_name'])
validate_curated_segments_num = len(
self.validate_curated_segment_metadata_dict['audio_name'])
validate_noisy_segments_num = len(
self.validate_noisy_segment_metadata_dict['audio_name'])
logging.info('')
logging.info('Total train segments num: {}'.format(train_segments_num))
logging.info('Validate curated segments num: {}'.format(
validate_curated_segments_num))
logging.info('Validate noisy segments num: {}'.format(
validate_noisy_segments_num))
self.train_segments_indexes = np.arange(train_segments_num)
self.random_state.shuffle(self.train_segments_indexes)
self.pointer = 0
def get_train_validate_audio_indexes(self, data_dict, holdout_fold):
if holdout_fold == 'none':
train_audio_indexes = np.arange(len(data_dict['audio_name']))
validate_audio_indexes = np.array([])
else:
train_audio_indexes = np.where(
data_dict['fold'] != int(holdout_fold))[0]
validate_audio_indexes = np.where(
data_dict['fold'] == int(holdout_fold))[0]
return train_audio_indexes, validate_audio_indexes
def combine_curated_noisy_metadata_dict(self, curated_metadata_dict,
noisy_metadata_dict):
combined_metadata_dict = {}
for key in curated_metadata_dict.keys():
combined_metadata_dict[key] = np.concatenate(
(curated_metadata_dict[key], noisy_metadata_dict[key]), axis=0)
return combined_metadata_dict
def generate_train(self):
while True:
if self.pointer >= len(self.train_segments_indexes):
self.pointer = 0
self.random_state.shuffle(self.train_segments_indexes)
batch_segment_indexes = self.train_segments_indexes[
self.pointer: self.pointer + self.batch_size]
self.pointer += self.batch_size
batch_audio_name = self.train_segment_metadata_dict\
['audio_name'][batch_segment_indexes]
batch_begin_index = self.train_segment_metadata_dict\
['begin_index'][batch_segment_indexes]
batch_end_index = self.train_segment_metadata_dict\
['end_index'][batch_segment_indexes]
batch_target = self.train_segment_metadata_dict\
['target'][batch_segment_indexes]
batch_source = self.train_segment_metadata_dict\
['source'][batch_segment_indexes]
batch_feature = []
batch_mask = []
for n in range(len(batch_segment_indexes)):
if batch_source[n] == 'curated':
data_dict = self.curated_data_dict
elif batch_source[n] == 'noisy':
data_dict = self.noisy_data_dict
else:
raise Exception('Incorrect source type!')
(this_feature, this_mask) = self.get_feature_mask(
data_dict, batch_begin_index[n], batch_end_index[n],
self.segment_frames, self.pad_type, self.logmel_eps)
batch_feature.append(this_feature)
batch_mask.append(this_mask)
batch_feature = np.array(batch_feature)
batch_feature = self.transform(batch_feature)
batch_mask = np.array(batch_mask)
batch_data_dict = {
'audio_name': batch_audio_name,
'feature': batch_feature,
'mask': batch_mask,
'target': batch_target,
'source': batch_source}
yield batch_data_dict
def generate_validate(self, data_type, target_source, max_iteration=None):
assert(data_type in ['train', 'validate'])
assert(target_source in ['curated', 'noisy'])
segment_metadata_dict = eval(
'self.{}_{}_segment_metadata_dict'.format(data_type, target_source))
data_dict = eval('self.{}_data_dict'.format(target_source))
segments_num = len(segment_metadata_dict['audio_name'])
segment_indexes = np.arange(segments_num)
iteration = 0
pointer = 0
while True:
if iteration == max_iteration:
break
if pointer >= segments_num:
break
batch_segment_indexes = segment_indexes[
pointer: pointer + self.batch_size]
pointer += self.batch_size
iteration += 1
batch_audio_name = segment_metadata_dict\
['audio_name'][batch_segment_indexes]
batch_begin_index = segment_metadata_dict\
['begin_index'][batch_segment_indexes]
batch_end_index = segment_metadata_dict\
['end_index'][batch_segment_indexes]
batch_target = segment_metadata_dict\
['target'][batch_segment_indexes]
batch_feature = []
batch_mask = []
for n in range(len(batch_segment_indexes)):
(this_feature, this_mask) = self.get_feature_mask(
data_dict, batch_begin_index[n], batch_end_index[n],
self.segment_frames, self.pad_type, self.logmel_eps)
batch_feature.append(this_feature)
batch_mask.append(this_mask)
batch_feature = np.array(batch_feature)
batch_feature = self.transform(batch_feature)
batch_mask = np.array(batch_mask)
batch_data_dict = {
'audio_name': batch_audio_name,
'feature': batch_feature,
'mask': batch_mask,
'target': batch_target}
yield batch_data_dict
class TestDataGenerator(Base):
def __init__(self, test_feature_hdf5_path, segment_seconds, hop_seconds,
pad_type, scalar, batch_size, seed=1234):
self.scalar = scalar
self.batch_size = batch_size
self.random_state = np.random.RandomState(seed)
self.segment_frames = int(segment_seconds * config.frames_per_second)
self.hop_frames = int(hop_seconds * config.frames_per_second)
self.pad_type = pad_type
self.logmel_eps = config.logmel_eps
self.test_data_dict = self.load_hdf5(
test_feature_hdf5_path, cross_validation_path=None)
audios_num = len(self.test_data_dict['audio_name'])
test_audio_indexes = np.arange(audios_num)
self.test_segment_metadata_dict = \
self.get_segment_metadata_dict(
self.test_data_dict, test_audio_indexes, self.segment_frames,
self.hop_frames, source=None)
def generate_test(self):
segment_metadata_dict = self.test_segment_metadata_dict
data_dict = self.test_data_dict
segments_num = len(segment_metadata_dict['audio_name'])
segment_indexes = np.arange(segments_num)
iteration = 0
pointer = 0
while True:
if pointer >= segments_num:
break
batch_segment_indexes = segment_indexes[
pointer: pointer + self.batch_size]
pointer += self.batch_size
iteration += 1
batch_audio_name = segment_metadata_dict\
['audio_name'][batch_segment_indexes]
batch_begin_index = segment_metadata_dict\
['begin_index'][batch_segment_indexes]
batch_end_index = segment_metadata_dict\
['end_index'][batch_segment_indexes]
batch_feature = []
batch_mask = []
for n in range(len(batch_segment_indexes)):
(this_feature, this_mask) = self.get_feature_mask(
data_dict, batch_begin_index[n], batch_end_index[n],
self.segment_frames, self.pad_type, self.logmel_eps)
batch_feature.append(this_feature)
batch_mask.append(this_mask)
batch_feature = np.array(batch_feature)
batch_feature = self.transform(batch_feature)
batch_mask = np.array(batch_mask)
batch_data_dict = {
'audio_name': batch_audio_name,
'feature': batch_feature,
'mask': batch_mask}
yield batch_data_dict | true | true |
1c49fb45dc43ea2b8aafb011957f27effda703a7 | 1,190 | py | Python | all_tests/it10.py | shushantkumar/ci_edit_final | 3b13c7a39b2112ed8daaa70bc4f0f50d67909494 | [
"Apache-2.0"
] | null | null | null | all_tests/it10.py | shushantkumar/ci_edit_final | 3b13c7a39b2112ed8daaa70bc4f0f50d67909494 | [
"Apache-2.0"
] | null | null | null | all_tests/it10.py | shushantkumar/ci_edit_final | 3b13c7a39b2112ed8daaa70bc4f0f50d67909494 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from app.curses_util import *
import app.fake_curses_testing
class it10(app.fake_curses_testing.FakeCursesTestCase):
def setUp(self):
app.fake_curses_testing.FakeCursesTestCase.setUp(self)
def test10(self):
#self.setMovieMode(True)
lineLimitIndicator = self.prg.prefs.editor['lineLimitIndicator']
self.prg.prefs.editor['lineLimitIndicator'] = 10
self.runWithFakeInputs([
self.displayCheck(2, 7, [u" "]),
self.writeText(u"A line with numbers 1234567890"),
self.displayCheck(2, 7, [u"A line with numbers 1234567890"]),
self.writeText(u". Writing"),
self.displayCheck(2, 7, [u"ith numbers 1234567890. Writing"]),
self.writeText(u" some more."),
self.displayCheck(2, 7, [u" 1234567890. Writing some more."]),
self.writeText(u"\n"),
self.displayCheck(2, 7, [u"A line with numbers 1234567890."]),
CTRL_Q, u"n"
])
self.prg.prefs.editor['lineLimitIndicator'] = lineLimitIndicator | 38.387097 | 74 | 0.653782 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from app.curses_util import *
import app.fake_curses_testing
class it10(app.fake_curses_testing.FakeCursesTestCase):
def setUp(self):
app.fake_curses_testing.FakeCursesTestCase.setUp(self)
def test10(self):
lineLimitIndicator = self.prg.prefs.editor['lineLimitIndicator']
self.prg.prefs.editor['lineLimitIndicator'] = 10
self.runWithFakeInputs([
self.displayCheck(2, 7, [u" "]),
self.writeText(u"A line with numbers 1234567890"),
self.displayCheck(2, 7, [u"A line with numbers 1234567890"]),
self.writeText(u". Writing"),
self.displayCheck(2, 7, [u"ith numbers 1234567890. Writing"]),
self.writeText(u" some more."),
self.displayCheck(2, 7, [u" 1234567890. Writing some more."]),
self.writeText(u"\n"),
self.displayCheck(2, 7, [u"A line with numbers 1234567890."]),
CTRL_Q, u"n"
])
self.prg.prefs.editor['lineLimitIndicator'] = lineLimitIndicator | true | true |
1c49fb62426836b9756f2971c833979c6b552fae | 2,657 | py | Python | tests/test_cdtw.py | dizcza/cdtw-python | a83fffd6fc222a1691f07421fd4dbf46dc19e0aa | [
"MIT"
] | null | null | null | tests/test_cdtw.py | dizcza/cdtw-python | a83fffd6fc222a1691f07421fd4dbf46dc19e0aa | [
"MIT"
] | null | null | null | tests/test_cdtw.py | dizcza/cdtw-python | a83fffd6fc222a1691f07421fd4dbf46dc19e0aa | [
"MIT"
] | null | null | null | import unittest
import math
import numpy as np
from cdtw.dtw import *
from numpy.testing import assert_array_equal, assert_array_almost_equal
try:
import dtaidistance
DTAIDISTANCE_INSTALLED = True
except ImportError:
DTAIDISTANCE_INSTALLED = False
class TestCDTW(unittest.TestCase):
def test_empty(self):
self.assertRaises(ValueError, dtw_mat, [], [1.0, 2.0])
self.assertRaises(ValueError, dtw_dist, [], [1.0, 2.0])
def test_one_point(self):
self.assertEqual(dtw_dist([1.0], [5.0]), 4.0)
cost_mat = dtw_mat([1.0], [5.0])
assert_array_equal(cost_mat, [[4.0]])
assert_array_equal(dtw_path(cost_mat), [(0, 0)])
def test_simple(self):
x = [1, 2, 3, 4, 5]
y = [2, 3, 4]
cost_mat_expected = np.sqrt([
[1, 5, 14],
[1, 2, 6],
[2, 1, 2],
[6, 2, 1],
[15, 6, 2]
])
path_expected = [(0, 0), (1, 0), (2, 1), (3, 2), (4, 2)]
cost_mat = dtw_mat(x, y)
self.assertAlmostEqual(dtw_dist(x, y), math.sqrt(2.0), places=6)
assert_array_almost_equal(cost_mat, cost_mat_expected)
assert_array_equal(dtw_path(cost_mat), path_expected)
def test_order_does_not_matter(self):
np.random.seed(0)
x = np.random.randn(100)
y = np.random.randn(300)
assert_array_almost_equal(dtw_mat(x, y), dtw_mat(y, x).T)
self.assertAlmostEqual(dtw_dist(x, y), dtw_dist(y, x))
def test_dtw_distance_path(self):
np.random.seed(0)
x = np.random.randn(10)
y = np.random.randn(30)
cost_mat = dtw_mat(x, y)
self.assertAlmostEqual(cost_mat[-1, -1], dtw_dist(x, y), places=6)
path = dtw_path(cost_mat)
assert_array_equal(path[0], (0, 0))
assert_array_equal(path[-1], (len(x) - 1, len(y) - 1))
@unittest.skipUnless(DTAIDISTANCE_INSTALLED, "dtaidistance not installed")
def test_dtaidistance(self):
np.random.seed(0)
x = np.random.randn(100).astype(np.float32)
y = np.random.randn(30).astype(np.float32)
self.assertAlmostEqual(dtw_dist(x, y),
dtaidistance.dtw.distance(x, y),
places=6)
_, cost_mat_expected = dtaidistance.dtw.warping_paths(x, y)
cost_mat = dtw_mat(x, y)
assert_array_almost_equal(cost_mat, cost_mat_expected[1:, 1:],
decimal=5)
path_expected = dtaidistance.dtw.best_path(cost_mat_expected)
assert_array_equal(dtw_path(cost_mat), path_expected)
if __name__ == '__main__':
unittest.main()
| 33.2125 | 78 | 0.596161 | import unittest
import math
import numpy as np
from cdtw.dtw import *
from numpy.testing import assert_array_equal, assert_array_almost_equal
try:
import dtaidistance
DTAIDISTANCE_INSTALLED = True
except ImportError:
DTAIDISTANCE_INSTALLED = False
class TestCDTW(unittest.TestCase):
def test_empty(self):
self.assertRaises(ValueError, dtw_mat, [], [1.0, 2.0])
self.assertRaises(ValueError, dtw_dist, [], [1.0, 2.0])
def test_one_point(self):
self.assertEqual(dtw_dist([1.0], [5.0]), 4.0)
cost_mat = dtw_mat([1.0], [5.0])
assert_array_equal(cost_mat, [[4.0]])
assert_array_equal(dtw_path(cost_mat), [(0, 0)])
def test_simple(self):
x = [1, 2, 3, 4, 5]
y = [2, 3, 4]
cost_mat_expected = np.sqrt([
[1, 5, 14],
[1, 2, 6],
[2, 1, 2],
[6, 2, 1],
[15, 6, 2]
])
path_expected = [(0, 0), (1, 0), (2, 1), (3, 2), (4, 2)]
cost_mat = dtw_mat(x, y)
self.assertAlmostEqual(dtw_dist(x, y), math.sqrt(2.0), places=6)
assert_array_almost_equal(cost_mat, cost_mat_expected)
assert_array_equal(dtw_path(cost_mat), path_expected)
def test_order_does_not_matter(self):
np.random.seed(0)
x = np.random.randn(100)
y = np.random.randn(300)
assert_array_almost_equal(dtw_mat(x, y), dtw_mat(y, x).T)
self.assertAlmostEqual(dtw_dist(x, y), dtw_dist(y, x))
def test_dtw_distance_path(self):
np.random.seed(0)
x = np.random.randn(10)
y = np.random.randn(30)
cost_mat = dtw_mat(x, y)
self.assertAlmostEqual(cost_mat[-1, -1], dtw_dist(x, y), places=6)
path = dtw_path(cost_mat)
assert_array_equal(path[0], (0, 0))
assert_array_equal(path[-1], (len(x) - 1, len(y) - 1))
@unittest.skipUnless(DTAIDISTANCE_INSTALLED, "dtaidistance not installed")
def test_dtaidistance(self):
np.random.seed(0)
x = np.random.randn(100).astype(np.float32)
y = np.random.randn(30).astype(np.float32)
self.assertAlmostEqual(dtw_dist(x, y),
dtaidistance.dtw.distance(x, y),
places=6)
_, cost_mat_expected = dtaidistance.dtw.warping_paths(x, y)
cost_mat = dtw_mat(x, y)
assert_array_almost_equal(cost_mat, cost_mat_expected[1:, 1:],
decimal=5)
path_expected = dtaidistance.dtw.best_path(cost_mat_expected)
assert_array_equal(dtw_path(cost_mat), path_expected)
if __name__ == '__main__':
unittest.main()
| true | true |
1c49fd605d161c98160f7a93f5883987a5cf6858 | 2,476 | py | Python | samsungctl/interactive.py | jakubpas/samsungctl | adda11c55038e2a3d057edf515ee44a2fd950949 | [
"MIT"
] | 4 | 2021-03-01T01:49:23.000Z | 2022-02-08T16:18:37.000Z | samsungctl/interactive.py | jakubpas/samsungctl | adda11c55038e2a3d057edf515ee44a2fd950949 | [
"MIT"
] | null | null | null | samsungctl/interactive.py | jakubpas/samsungctl | adda11c55038e2a3d057edf515ee44a2fd950949 | [
"MIT"
] | 1 | 2021-06-23T20:42:05.000Z | 2021-06-23T20:42:05.000Z | import curses
_wake_on_lan = '44:5C:E9:51:C8:29'
_mappings = [
["p", "KEY_POWER", "P", "Power off"],
["h", "KEY_HOME", "H", "Home"],
["KEY_UP", "KEY_UP", "Up", "Up"],
["KEY_DOWN", "KEY_DOWN", "Down", "Down"],
["KEY_LEFT", "KEY_LEFT", "Left", "Left"],
["KEY_RIGHT", "KEY_RIGHT", "Right", "Right"],
["\n", "KEY_ENTER", "Enter", "Enter"],
["KEY_BACKSPACE", "KEY_RETURN", "Backspace", "Return"],
["e", "KEY_EXIT", "E", "Exit"],
[" ", "KEY_PLAY", "Space", "Play/Pause"],
["m", "KEY_MENU", "M", "Menu"],
["s", "KEY_SOURCE", "S", "Source"],
["+", "KEY_VOLUP", "+", "Volume Up"],
["-", "KEY_VOLDOWN", "-", "Volume Down"],
["*", "KEY_MUTE", "*", "Mute"],
["s", "KEY_HDMI", "S", "HDMI Source"],
["i", "KEY_INFO", "I", "Info"],
["n", "KEY_MORE", "D", "Numbers"],
]
def run(remote):
"""Run interactive remote control application."""
curses.wrapper(_control, remote)
def _control(std_scr, remote):
height, width = std_scr.getmaxyx()
std_scr.addstr("Interactive mode, press 'Q' to exit.\n")
std_scr.addstr("Key mappings:\n")
column_len = max(len(mapping[2]) for mapping in _mappings) + 1
mappings_dict = {}
for mapping in _mappings:
mappings_dict[mapping[0]] = mapping[1]
row = std_scr.getyx()[0] + 2
if row < height:
line = " {}= {} ({})\n".format(mapping[2].ljust(column_len),
mapping[3], mapping[1])
std_scr.addstr(line)
elif row == height:
std_scr.addstr("[Terminal is too small to show all keys]\n")
running = True
while running:
key = std_scr.getkey()
if key == "q":
running = False
if key in mappings_dict:
remote.control(mappings_dict[key])
try:
std_scr.addstr(".")
except curses.error:
std_scr.deleteln()
std_scr.move(std_scr.getyx()[0], 0)
std_scr.addstr(".")
| 37.515152 | 74 | 0.421648 | import curses
_wake_on_lan = '44:5C:E9:51:C8:29'
_mappings = [
["p", "KEY_POWER", "P", "Power off"],
["h", "KEY_HOME", "H", "Home"],
["KEY_UP", "KEY_UP", "Up", "Up"],
["KEY_DOWN", "KEY_DOWN", "Down", "Down"],
["KEY_LEFT", "KEY_LEFT", "Left", "Left"],
["KEY_RIGHT", "KEY_RIGHT", "Right", "Right"],
["\n", "KEY_ENTER", "Enter", "Enter"],
["KEY_BACKSPACE", "KEY_RETURN", "Backspace", "Return"],
["e", "KEY_EXIT", "E", "Exit"],
[" ", "KEY_PLAY", "Space", "Play/Pause"],
["m", "KEY_MENU", "M", "Menu"],
["s", "KEY_SOURCE", "S", "Source"],
["+", "KEY_VOLUP", "+", "Volume Up"],
["-", "KEY_VOLDOWN", "-", "Volume Down"],
["*", "KEY_MUTE", "*", "Mute"],
["s", "KEY_HDMI", "S", "HDMI Source"],
["i", "KEY_INFO", "I", "Info"],
["n", "KEY_MORE", "D", "Numbers"],
]
def run(remote):
curses.wrapper(_control, remote)
def _control(std_scr, remote):
height, width = std_scr.getmaxyx()
std_scr.addstr("Interactive mode, press 'Q' to exit.\n")
std_scr.addstr("Key mappings:\n")
column_len = max(len(mapping[2]) for mapping in _mappings) + 1
mappings_dict = {}
for mapping in _mappings:
mappings_dict[mapping[0]] = mapping[1]
row = std_scr.getyx()[0] + 2
if row < height:
line = " {}= {} ({})\n".format(mapping[2].ljust(column_len),
mapping[3], mapping[1])
std_scr.addstr(line)
elif row == height:
std_scr.addstr("[Terminal is too small to show all keys]\n")
running = True
while running:
key = std_scr.getkey()
if key == "q":
running = False
if key in mappings_dict:
remote.control(mappings_dict[key])
try:
std_scr.addstr(".")
except curses.error:
std_scr.deleteln()
std_scr.move(std_scr.getyx()[0], 0)
std_scr.addstr(".")
| true | true |
1c49fd615acaec331d8b1875e666820618016ac8 | 4,988 | py | Python | attention.py | huajianjiu/ANSMESC | 76323a46f638c717e23388cf529734081a70eeee | [
"Apache-2.0"
] | 1 | 2021-08-09T03:45:36.000Z | 2021-08-09T03:45:36.000Z | attention.py | huajianjiu/ANSMESC | 76323a46f638c717e23388cf529734081a70eeee | [
"Apache-2.0"
] | 2 | 2021-08-09T07:40:19.000Z | 2021-08-10T12:34:04.000Z | attention.py | yuanzhiKe/ANSMESC | 76323a46f638c717e23388cf529734081a70eeee | [
"Apache-2.0"
] | null | null | null | # author - Richard Liao
# Dec 26 2016
# Attention GRU network
from keras import backend as K
from keras.engine.topology import Layer
from keras import initializers, regularizers, constraints
class AttentionWithContext(Layer):
"""
Attention operation, with a context/query vector, for temporal data.
Supports Masking.
Follows the work of Yang et al. [https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf]
"Hierarchical Attention Networks for Document Classification"
by using a context vector to assist the attention
# Input shape
3D tensor with shape: `(samples, steps, features)`.
# Output shape
2D tensor with shape: `(samples, features)`.
:param kwargs:
Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
The dimensions are inferred based on the output shape of the RNN.
Example:
model.add(LSTM(64, return_sequences=True))
model.add(AttentionWithContext())
"""
def __init__(self,
W_regularizer=None, u_regularizer=None, b_regularizer=None,
W_constraint=None, u_constraint=None, b_constraint=None,
bias=True, **kwargs):
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.u_regularizer = regularizers.get(u_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.u_constraint = constraints.get(u_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(AttentionWithContext, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight(shape=(input_shape[-1], input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight(shape=(input_shape[-1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
self.u = self.add_weight(shape=(input_shape[-1],),
initializer=self.init,
name='{}_u'.format(self.name),
regularizer=self.u_regularizer,
constraint=self.u_constraint)
super(AttentionWithContext, self).build(input_shape)
def compute_mask(self, input, input_mask=None):
# do not pass the mask to the next layers
return None
def call(self, x, mask=None):
uit = K.dot(x, self.W)
if self.bias:
uit += self.b
uit = K.tanh(uit)
# ait = K.dot(uit, self.u) # replace this
mul_a = uit * self.u # with this
ait = K.sum(mul_a, axis=2) # and this
a = K.exp(ait)
# apply mask after the exp. will be re-normalized next
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
a *= K.cast(mask, K.floatx())
# in some cases especially in the early stages of training the sum may be almost zero
# and this results in NaN's. A workaround is to add a very small positive number ε to the sum.
# a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return input_shape[0], input_shape[-1]
if __name__ == "__main__":
from keras.models import Model
from keras.layers import Input, Embedding, Bidirectional, TimeDistributed, GRU, Dense
import numpy as np
input_array = np.random.randint(25, size=(15, 100))
embedding_layer = Embedding(25 + 1,
100,
input_length=100,
trainable=True)
sentence_input = Input(shape=(100,), dtype='int32')
embedded_sequences = embedding_layer(sentence_input)
l_lstm = Bidirectional(GRU(100, return_sequences=True))(embedded_sequences)
l_dense = TimeDistributed(Dense(200))(l_lstm)
l_att = AttentionWithContext()(l_dense)
model = Model(sentence_input, l_att)
# model = Model(sentence_input, l_dense)
model.compile('rmsprop', 'mse')
output_array = model.predict(input_array)
print(output_array.shape)
| 39.587302 | 102 | 0.594226 |
from keras import backend as K
from keras.engine.topology import Layer
from keras import initializers, regularizers, constraints
class AttentionWithContext(Layer):
def __init__(self,
W_regularizer=None, u_regularizer=None, b_regularizer=None,
W_constraint=None, u_constraint=None, b_constraint=None,
bias=True, **kwargs):
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.u_regularizer = regularizers.get(u_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.u_constraint = constraints.get(u_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(AttentionWithContext, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight(shape=(input_shape[-1], input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight(shape=(input_shape[-1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
self.u = self.add_weight(shape=(input_shape[-1],),
initializer=self.init,
name='{}_u'.format(self.name),
regularizer=self.u_regularizer,
constraint=self.u_constraint)
super(AttentionWithContext, self).build(input_shape)
def compute_mask(self, input, input_mask=None):
return None
def call(self, x, mask=None):
uit = K.dot(x, self.W)
if self.bias:
uit += self.b
uit = K.tanh(uit)
mul_a = uit * self.u ait = K.sum(mul_a, axis=2)
a = K.exp(ait)
if mask is not None:
a *= K.cast(mask, K.floatx())
# a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return input_shape[0], input_shape[-1]
if __name__ == "__main__":
from keras.models import Model
from keras.layers import Input, Embedding, Bidirectional, TimeDistributed, GRU, Dense
import numpy as np
input_array = np.random.randint(25, size=(15, 100))
embedding_layer = Embedding(25 + 1,
100,
input_length=100,
trainable=True)
sentence_input = Input(shape=(100,), dtype='int32')
embedded_sequences = embedding_layer(sentence_input)
l_lstm = Bidirectional(GRU(100, return_sequences=True))(embedded_sequences)
l_dense = TimeDistributed(Dense(200))(l_lstm)
l_att = AttentionWithContext()(l_dense)
model = Model(sentence_input, l_att)
# model = Model(sentence_input, l_dense)
model.compile('rmsprop', 'mse')
output_array = model.predict(input_array)
print(output_array.shape)
| true | true |
1c49fdbc6de8e0fa0905400b281525d8cbffcdac | 2,739 | py | Python | tests/test_client.py | nicolaskenner/jira | 5c27f6ddafffc6110be1db4749fa67025852bcb6 | [
"BSD-2-Clause"
] | 1 | 2021-03-04T08:06:37.000Z | 2021-03-04T08:06:37.000Z | tests/test_client.py | nicolaskenner/jira | 5c27f6ddafffc6110be1db4749fa67025852bcb6 | [
"BSD-2-Clause"
] | 1 | 2020-08-25T15:50:27.000Z | 2020-08-25T15:50:27.000Z | tests/test_client.py | nicolaskenner/jira | 5c27f6ddafffc6110be1db4749fa67025852bcb6 | [
"BSD-2-Clause"
] | 1 | 2022-01-18T20:17:48.000Z | 2022-01-18T20:17:48.000Z | # -*- coding: utf-8 -*-
import getpass
import pytest
# from tenacity import retry
# from tenacity import wait_incrementing
from tests import get_unique_project_name
from tests import JiraTestManager
from jira import Role, Issue, JIRA, JIRAError, Project # noqa
import jira.client
@pytest.fixture()
def prep():
pass
@pytest.fixture(scope="module")
def test_manager():
return JiraTestManager()
@pytest.fixture()
def cl_admin(test_manager):
return test_manager.jira_admin
@pytest.fixture()
def cl_normal(test_manager):
return test_manager.jira_normal
@pytest.fixture(scope="function")
def slug(request, cl_admin):
def remove_by_slug():
try:
cl_admin.delete_project(slug)
except (ValueError, JIRAError):
# Some tests have project already removed, so we stay silent
pass
slug = get_unique_project_name()
project_name = "Test user=%s key=%s A" % (getpass.getuser(), slug)
try:
proj = cl_admin.project(slug)
except JIRAError:
proj = cl_admin.create_project(slug, project_name)
assert proj
request.addfinalizer(remove_by_slug)
return slug
def test_delete_project(cl_admin, cl_normal, slug):
assert cl_admin.delete_project(slug)
def test_delete_inexistent_project(cl_admin):
slug = "abogus123"
with pytest.raises(JIRAError) as ex:
assert cl_admin.delete_project(slug)
assert "No project could be found with key" in str(
ex.value
) or 'Parameter pid="%s" is not a Project, projectID or slug' % slug in str(
ex.value
)
def test_templates(cl_admin):
templates = cl_admin.templates()
expected_templates = set(
filter(
None,
"""
Agility
Basic
Bug tracking
Content Management
Customer service
Document Approval
IT Service Desk
Kanban software development
Lead Tracking
Process management
Procurement
Project management
Recruitment
Scrum software development
Task management
""".split(
"\n"
),
)
)
for t in expected_templates:
assert t in templates
def test_result_list():
iterable = [2, 3]
startAt = 0
maxResults = 50
total = 2
results = jira.client.ResultList(iterable, startAt, maxResults, total)
for idx, result in enumerate(results):
assert results[idx] == iterable[idx]
assert next(results) == iterable[0]
assert next(results) == iterable[1]
with pytest.raises(StopIteration):
next(results)
def test_result_list_if_empty():
results = jira.client.ResultList()
for r in results:
raise AssertionError("`results` should be empty")
with pytest.raises(StopIteration):
next(results)
| 20.75 | 80 | 0.68054 | import getpass
import pytest
from tests import get_unique_project_name
from tests import JiraTestManager
from jira import Role, Issue, JIRA, JIRAError, Project import jira.client
@pytest.fixture()
def prep():
pass
@pytest.fixture(scope="module")
def test_manager():
return JiraTestManager()
@pytest.fixture()
def cl_admin(test_manager):
return test_manager.jira_admin
@pytest.fixture()
def cl_normal(test_manager):
return test_manager.jira_normal
@pytest.fixture(scope="function")
def slug(request, cl_admin):
def remove_by_slug():
try:
cl_admin.delete_project(slug)
except (ValueError, JIRAError):
pass
slug = get_unique_project_name()
project_name = "Test user=%s key=%s A" % (getpass.getuser(), slug)
try:
proj = cl_admin.project(slug)
except JIRAError:
proj = cl_admin.create_project(slug, project_name)
assert proj
request.addfinalizer(remove_by_slug)
return slug
def test_delete_project(cl_admin, cl_normal, slug):
assert cl_admin.delete_project(slug)
def test_delete_inexistent_project(cl_admin):
slug = "abogus123"
with pytest.raises(JIRAError) as ex:
assert cl_admin.delete_project(slug)
assert "No project could be found with key" in str(
ex.value
) or 'Parameter pid="%s" is not a Project, projectID or slug' % slug in str(
ex.value
)
def test_templates(cl_admin):
templates = cl_admin.templates()
expected_templates = set(
filter(
None,
"""
Agility
Basic
Bug tracking
Content Management
Customer service
Document Approval
IT Service Desk
Kanban software development
Lead Tracking
Process management
Procurement
Project management
Recruitment
Scrum software development
Task management
""".split(
"\n"
),
)
)
for t in expected_templates:
assert t in templates
def test_result_list():
iterable = [2, 3]
startAt = 0
maxResults = 50
total = 2
results = jira.client.ResultList(iterable, startAt, maxResults, total)
for idx, result in enumerate(results):
assert results[idx] == iterable[idx]
assert next(results) == iterable[0]
assert next(results) == iterable[1]
with pytest.raises(StopIteration):
next(results)
def test_result_list_if_empty():
results = jira.client.ResultList()
for r in results:
raise AssertionError("`results` should be empty")
with pytest.raises(StopIteration):
next(results)
| true | true |
1c49fdc0256ccd65c716e03f0e803a5cd3cf8ffb | 2,151 | py | Python | google_oauth/__init__.py | martialo12/flask-google-login | 592043ed8cf8fddcaab7536c1911d654013b5e4f | [
"MIT"
] | null | null | null | google_oauth/__init__.py | martialo12/flask-google-login | 592043ed8cf8fddcaab7536c1911d654013b5e4f | [
"MIT"
] | null | null | null | google_oauth/__init__.py | martialo12/flask-google-login | 592043ed8cf8fddcaab7536c1911d654013b5e4f | [
"MIT"
] | null | null | null | # python standard libraries
from pathlib import Path
import logging.config
# third party libraries
from flask import Flask
from flask_login import LoginManager
from oauthlib.oauth2 import WebApplicationClient
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
import yaml
# logging
path_to_config_file = Path(__file__).parent / "config/config.yaml"
logging.config.fileConfig(path_to_config_file, disable_existing_loggers=False)
# create logger
logger = logging.getLogger("flaskapp")
with open(rf"{path_to_config_file}") as cfgfile:
logger.info(f"loading configuration from config {path_to_config_file}")
config = yaml.load(cfgfile, Loader=yaml.FullLoader)
flask_app_conf = config["FLASK_APP_CONFIGURATION"]
google_conf = config["GOOGLE_CONFIG"]
db_conf = config["DB_CONFIG"]
logger.info(f"=========Flask app Config========\n")
logger.debug(f"{flask_app_conf}")
logger.info(f"=========Google Config========\n")
logger.debug(f"{google_conf}")
logger.info(f"=========db Config========\n")
logger.debug(f"{db_conf}")
# flask app config
secret_key = flask_app_conf["secret_key"]
port = flask_app_conf["port"]
debug = flask_app_conf["debug"]
host = flask_app_conf["host"]
# google config
google_discovery_url = google_conf["google_discovery_url"]
google_redirect_uri = google_conf["google_redirect_uri"]
google_client_id = google_conf["google_client_id"]
google_client_secret = google_conf["google_client_secret"]
# db conf
SQLALCHEMY_DATABASE_URI = db_conf["SQLALCHEMY_DATABASE_URI"]
SQLALCHEMY_TRACK_MODIFICATIONS = db_conf["SQLALCHEMY_TRACK_MODIFICATIONS"]
# create flask app
app = Flask(__name__)
app.config["SECRET_KEY"] = secret_key
app.config["SQLALCHEMY_DATABASE_URI"] = SQLALCHEMY_DATABASE_URI
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = SQLALCHEMY_TRACK_MODIFICATIONS
# add extensions to our app
db = SQLAlchemy(app)
bootstrap = Bootstrap(app)
# user session management
login_manager = LoginManager()
login_manager.init_app(app)
# Oauth2 client setup
client = WebApplicationClient(google_client_id)
from google_oauth import routes
| 22.40625 | 78 | 0.76662 | from pathlib import Path
import logging.config
from flask import Flask
from flask_login import LoginManager
from oauthlib.oauth2 import WebApplicationClient
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
import yaml
path_to_config_file = Path(__file__).parent / "config/config.yaml"
logging.config.fileConfig(path_to_config_file, disable_existing_loggers=False)
logger = logging.getLogger("flaskapp")
with open(rf"{path_to_config_file}") as cfgfile:
logger.info(f"loading configuration from config {path_to_config_file}")
config = yaml.load(cfgfile, Loader=yaml.FullLoader)
flask_app_conf = config["FLASK_APP_CONFIGURATION"]
google_conf = config["GOOGLE_CONFIG"]
db_conf = config["DB_CONFIG"]
logger.info(f"=========Flask app Config========\n")
logger.debug(f"{flask_app_conf}")
logger.info(f"=========Google Config========\n")
logger.debug(f"{google_conf}")
logger.info(f"=========db Config========\n")
logger.debug(f"{db_conf}")
secret_key = flask_app_conf["secret_key"]
port = flask_app_conf["port"]
debug = flask_app_conf["debug"]
host = flask_app_conf["host"]
google_discovery_url = google_conf["google_discovery_url"]
google_redirect_uri = google_conf["google_redirect_uri"]
google_client_id = google_conf["google_client_id"]
google_client_secret = google_conf["google_client_secret"]
SQLALCHEMY_DATABASE_URI = db_conf["SQLALCHEMY_DATABASE_URI"]
SQLALCHEMY_TRACK_MODIFICATIONS = db_conf["SQLALCHEMY_TRACK_MODIFICATIONS"]
app = Flask(__name__)
app.config["SECRET_KEY"] = secret_key
app.config["SQLALCHEMY_DATABASE_URI"] = SQLALCHEMY_DATABASE_URI
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = SQLALCHEMY_TRACK_MODIFICATIONS
db = SQLAlchemy(app)
bootstrap = Bootstrap(app)
login_manager = LoginManager()
login_manager.init_app(app)
client = WebApplicationClient(google_client_id)
from google_oauth import routes
| true | true |
1c49fe277718b141c1da5f42a448b8e3c088d5a0 | 3,987 | py | Python | python/basics/chapter_6_dictionaries/exercises_6.py | gabriel-miglioranza/python_crash_course | 57db9d6b17b225a6aaa5451c3a3b567ffc426b37 | [
"MIT"
] | null | null | null | python/basics/chapter_6_dictionaries/exercises_6.py | gabriel-miglioranza/python_crash_course | 57db9d6b17b225a6aaa5451c3a3b567ffc426b37 | [
"MIT"
] | null | null | null | python/basics/chapter_6_dictionaries/exercises_6.py | gabriel-miglioranza/python_crash_course | 57db9d6b17b225a6aaa5451c3a3b567ffc426b37 | [
"MIT"
] | null | null | null | # Chapter 6 exercises from the book Python Crash Course: A Hands-On, Project-Based Introduction to Programming.
# 6-1. Person
person = {
'first_name': 'sean',
'last_name': 'carroll',
'city': 'los angeles'
}
print(person)
# 6-2. Favorite Numbers
favorite_numbers = {
'cris': 23,
'bianca': 133,
'monica': 42
}
print("Monica's favorite number is " + str(favorite_numbers['monica']) + ".")
print("Cris' favorite number is " + str(favorite_numbers['cris']) + ".")
print("Biancas's favorite number is " + str(favorite_numbers['bianca']) + ".")
# 6-3 Glossary
glossary = {
'list': 'set of values organized in a cardinal order.',
'tuple': "set of fixed values organized in a cardinal order.",
'dictionary': 'collection of key-value pairs.',
'string': 'a collection of characters in a certain order.',
}
print('List: ' + glossary['list'])
print('Tuple: ' + glossary['tuple'])
print('Dictionary: ' + glossary['dictionary'])
# 6-4 Glossary 2
for key, value in glossary.items():
print(key.title() + ': ' + value)
# 6-5. Rivers
rivers = {
'nile': 'egypt',
'thames': 'england',
'são francisco': 'brazil'
}
for river, country in rivers.items():
print('The ' + river.title() + ' runs through ' + country.title() + '.')
for river in rivers.keys():
print(river.title())
for country in rivers.values():
print(country.title())
# 6-6. Polling
favorite_languages = {
'jen': 'python',
'sarah': 'c',
'edward': 'ruby',
'phil': 'python'
}
people_poll = ['jen', 'sarah', 'ned', 'phil', 'james']
for people in people_poll:
if people in favorite_languages.keys():
print(people.title() + ', thanks for your answer.')
else:
print(people.title() + ', you have not taken the poll yet.')
# 6-7. People
person_0 = {
'first_name': 'sean',
'last_name': 'carroll',
'city': 'los angeles'
}
person_1 = {
'first_name': 'hannah',
'last_name': 'fry',
'city': 'london'
}
person_2 = {
'first_name': 'gabriel',
'last_name': 'miglioranza',
'city': 'porto alegre'
}
people = [person_0, person_1, person_2]
for person in people:
full_name = person['first_name'] + ' ' + person['last_name']
city = person['city']
print(full_name.title() + ' lives in ' + city.title() + '.')
# 6-8 Pets
montanha = {
'kind': 'dog',
'owner': 'gabriel'
}
chiara = {
'kind': 'cat',
'owner': 'amanda'
}
guri = {
'kind': 'dog',
'owner': 'ito'
}
pets = [montanha, chiara, guri]
for pet in pets:
for key, value in pet.items():
print(key.title() + ': ' + value.title())
# 6-9. Favorite Places
favorite_places = {
'anna': ['angel falls', 'antartica', 'antelope canion'],
'paul': ['the azores', 'boracay', 'cabo san lucas'],
'miguel': ['grand canyon', 'faroe islands', 'fernando de noronha']
}
for name, places in favorite_places.items():
print(name.title() + "'s favorite places are:")
for place in places:
print('\t' + place.title() + '.')
# 6-10. Favorite Numbers
favorite_numbers = {
'cris': [234, 3434, 343],
'bianca': [215423, 534, 3523],
'monica': [42, 3454, 345]
}
for name, numbers in favorite_numbers.items():
print(name.title() + ': ', numbers)
# 6-11. Cities
cities = {
'porto alegre':{
'contry': 'brazil',
'population': '1479101',
'foundation': '1772 AC'
},
'new york city':{
'contry': 'united states of america',
'population': '8175133',
'foundation': '1898 AC'
},
'rome':{
'contry': 'italy',
'population': '2872800',
'foundation': '753 BC'
}
}
for city, infos in cities.items():
print('About ' + city.title() + ':')
for key, info in infos.items():
print(key.title() + ': ' + info.title())
# 6-12. Extensions | 24.163636 | 112 | 0.562328 |
person = {
'first_name': 'sean',
'last_name': 'carroll',
'city': 'los angeles'
}
print(person)
favorite_numbers = {
'cris': 23,
'bianca': 133,
'monica': 42
}
print("Monica's favorite number is " + str(favorite_numbers['monica']) + ".")
print("Cris' favorite number is " + str(favorite_numbers['cris']) + ".")
print("Biancas's favorite number is " + str(favorite_numbers['bianca']) + ".")
# 6-3 Glossary
glossary = {
'list': 'set of values organized in a cardinal order.',
'tuple': "set of fixed values organized in a cardinal order.",
'dictionary': 'collection of key-value pairs.',
'string': 'a collection of characters in a certain order.',
}
print('List: ' + glossary['list'])
print('Tuple: ' + glossary['tuple'])
print('Dictionary: ' + glossary['dictionary'])
# 6-4 Glossary 2
for key, value in glossary.items():
print(key.title() + ': ' + value)
# 6-5. Rivers
rivers = {
'nile': 'egypt',
'thames': 'england',
'são francisco': 'brazil'
}
for river, country in rivers.items():
print('The ' + river.title() + ' runs through ' + country.title() + '.')
for river in rivers.keys():
print(river.title())
for country in rivers.values():
print(country.title())
# 6-6. Polling
favorite_languages = {
'jen': 'python',
'sarah': 'c',
'edward': 'ruby',
'phil': 'python'
}
people_poll = ['jen', 'sarah', 'ned', 'phil', 'james']
for people in people_poll:
if people in favorite_languages.keys():
print(people.title() + ', thanks for your answer.')
else:
print(people.title() + ', you have not taken the poll yet.')
# 6-7. People
person_0 = {
'first_name': 'sean',
'last_name': 'carroll',
'city': 'los angeles'
}
person_1 = {
'first_name': 'hannah',
'last_name': 'fry',
'city': 'london'
}
person_2 = {
'first_name': 'gabriel',
'last_name': 'miglioranza',
'city': 'porto alegre'
}
people = [person_0, person_1, person_2]
for person in people:
full_name = person['first_name'] + ' ' + person['last_name']
city = person['city']
print(full_name.title() + ' lives in ' + city.title() + '.')
# 6-8 Pets
montanha = {
'kind': 'dog',
'owner': 'gabriel'
}
chiara = {
'kind': 'cat',
'owner': 'amanda'
}
guri = {
'kind': 'dog',
'owner': 'ito'
}
pets = [montanha, chiara, guri]
for pet in pets:
for key, value in pet.items():
print(key.title() + ': ' + value.title())
# 6-9. Favorite Places
favorite_places = {
'anna': ['angel falls', 'antartica', 'antelope canion'],
'paul': ['the azores', 'boracay', 'cabo san lucas'],
'miguel': ['grand canyon', 'faroe islands', 'fernando de noronha']
}
for name, places in favorite_places.items():
print(name.title() + "'s favorite places are:")
for place in places:
print('\t' + place.title() + '.')
favorite_numbers = {
'cris': [234, 3434, 343],
'bianca': [215423, 534, 3523],
'monica': [42, 3454, 345]
}
for name, numbers in favorite_numbers.items():
print(name.title() + ': ', numbers)
cities = {
'porto alegre':{
'contry': 'brazil',
'population': '1479101',
'foundation': '1772 AC'
},
'new york city':{
'contry': 'united states of america',
'population': '8175133',
'foundation': '1898 AC'
},
'rome':{
'contry': 'italy',
'population': '2872800',
'foundation': '753 BC'
}
}
for city, infos in cities.items():
print('About ' + city.title() + ':')
for key, info in infos.items():
print(key.title() + ': ' + info.title())
| true | true |
1c49ffe311feb2c993cc12b7b475ef5d345533e4 | 222,005 | py | Python | SigProfilerTopography/source/plotting/TranscriptionReplicationStrandBiasFigures.py | AlexandrovLab/SigProfilerTopography | 34c7cf24392bc77953370038a520ffc8d0bdee50 | [
"BSD-2-Clause"
] | 5 | 2021-04-02T14:03:45.000Z | 2022-02-21T12:54:52.000Z | SigProfilerTopography/source/plotting/TranscriptionReplicationStrandBiasFigures.py | AlexandrovLab/SigProfilerTopography | 34c7cf24392bc77953370038a520ffc8d0bdee50 | [
"BSD-2-Clause"
] | null | null | null | SigProfilerTopography/source/plotting/TranscriptionReplicationStrandBiasFigures.py | AlexandrovLab/SigProfilerTopography | 34c7cf24392bc77953370038a520ffc8d0bdee50 | [
"BSD-2-Clause"
] | 1 | 2022-01-22T06:27:49.000Z | 2022-01-22T06:27:49.000Z | # This source code file is a part of SigProfilerTopography
# SigProfilerTopography is a tool included as part of the SigProfiler
# computational framework for comprehensive analysis of mutational
# signatures from next-generation sequencing of cancer genomes.
# SigProfilerTopography provides the downstream data analysis of
# mutations and extracted mutational signatures w.r.t.
# nucleosome occupancy, replication time, strand bias and processivity.
# Copyright (C) 2018-2020 Burcak Otlu
import os
import numpy as np
import statsmodels.stats.multitest
# import matplotlib
# BACKEND = 'Agg'
# if matplotlib.get_backend().lower() != BACKEND.lower():
# # If backend is not set properly a call to describe will hang
# matplotlib.use(BACKEND)
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
from matplotlib import gridspec
import pandas as pd
from SigProfilerTopography.source.commons.TopographyCommons import natural_key
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_STRAND
from SigProfilerTopography.source.commons.TopographyCommons import UNTRANSCRIBED_STRAND
from SigProfilerTopography.source.commons.TopographyCommons import LAGGING
from SigProfilerTopography.source.commons.TopographyCommons import LEADING
from SigProfilerTopography.source.commons.TopographyCommons import six_mutation_types
from SigProfilerTopography.source.commons.TopographyCommons import STRANDBIAS
from SigProfilerTopography.source.commons.TopographyCommons import DATA
from SigProfilerTopography.source.commons.TopographyCommons import FIGURE
from SigProfilerTopography.source.commons.TopographyCommons import SCATTER_PLOTS
from SigProfilerTopography.source.commons.TopographyCommons import BAR_PLOTS
from SigProfilerTopography.source.commons.TopographyCommons import CIRCLE_PLOTS
from SigProfilerTopography.source.commons.TopographyCommons import CIRCLE_BAR_PLOTS
from SigProfilerTopography.source.commons.TopographyCommons import SAMPLES
from SigProfilerTopography.source.commons.TopographyCommons import TABLES
from SigProfilerTopography.source.commons.TopographyCommons import SUBS_STRAND_BIAS_NUMBER_OF_MUTATIONS_THRESHOLD
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIPTIONSTRANDBIAS
from SigProfilerTopography.source.commons.TopographyCommons import REPLICATIONSTRANDBIAS
from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_VERSUS_LEADING
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_VERSUS_UNTRANSCRIBED
from SigProfilerTopography.source.commons.TopographyCommons import GENIC_VERSUS_INTERGENIC
from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_VERSUS_LEADING_P_VALUE
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_VERSUS_UNTRANSCRIBED_P_VALUE
from SigProfilerTopography.source.commons.TopographyCommons import GENIC_VERSUS_INTERGENIC_P_VALUE
from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_VERSUS_LEADING_Q_VALUE
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE
from SigProfilerTopography.source.commons.TopographyCommons import GENIC_VERSUS_INTERGENIC_Q_VALUE
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_REAL_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import UNTRANSCRIBED_REAL_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import GENIC_REAL_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import INTERGENIC_REAL_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_REAL_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import LEADING_REAL_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_SIMULATIONS_MEAN_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import UNTRANSCRIBED_SIMULATIONS_MEAN_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import GENIC_SIMULATIONS_MEAN_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import INTERGENIC_SIMULATIONS_MEAN_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_SIMULATIONS_MEAN_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import LEADING_SIMULATIONS_MEAN_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import GENIC
from SigProfilerTopography.source.commons.TopographyCommons import INTERGENIC
from SigProfilerTopography.source.commons.TopographyCommons import percentage_numbers
from SigProfilerTopography.source.commons.TopographyCommons import percentage_strings
from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_10_PERCENT_DIFF
from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_20_PERCENT_DIFF
from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_30_PERCENT_DIFF
from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_50_PERCENT_DIFF
from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_75_PERCENT_DIFF
from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_100_PERCENT_DIFF
from SigProfilerTopography.source.commons.TopographyCommons import ID
from SigProfilerTopography.source.commons.TopographyCommons import DBS
from SigProfilerTopography.source.commons.TopographyCommons import SBS_CONTEXTS
from SigProfilerTopography.source.commons.TopographyCommons import PLOTTING_FOR_SIGPROFILERTOPOGRAPHY_TOOL
from SigProfilerTopography.source.commons.TopographyCommons import PLOTTING_FOR_SIGPROFILERTOPOGRAPHY_MANUSCRIPT
from SigProfilerTopography.source.commons.TopographyCommons import EXCEL_FILES
from SigProfilerTopography.source.commons.TopographyCommons import write_excel_file
from SigProfilerTopography.source.commons.TopographyCommons import NUMBER_OF_REQUIRED_MUTATIONS_FOR_STRAND_BIAS_BAR_PLOT
SIGNATURE = 'signature'
CANCER_TYPE = 'cancer_type'
MUTATION_TYPE = 'mutation_type'
TYPE = 'type'
SIGNIFICANT_STRAND = 'significant_strand'
SIGNIFICANCE_LEVEL = 0.05
from SigProfilerTopography.source.commons.TopographyCommons import Table_SBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_DBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_ID_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_SBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_DBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_ID_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import getSample2NumberofSubsDict
from SigProfilerTopography.source.commons.TopographyCommons import getSample2NumberofIndelsDict
from SigProfilerTopography.source.commons.TopographyCommons import Sample2NumberofDinucsDictFilename
from SigProfilerTopography.source.commons.TopographyCommons import getSample2SubsSignature2NumberofMutationsDict
from SigProfilerTopography.source.commons.TopographyCommons import getSample2IndelsSignature2NumberofMutationsDict
from SigProfilerTopography.source.commons.TopographyCommons import Sample2DinucsSignature2NumberofMutationsDictFilename
transcriptionStrands = [TRANSCRIBED_STRAND, UNTRANSCRIBED_STRAND]
genicVersusIntergenicStrands=[GENIC, INTERGENIC]
replicationStrands = [LAGGING, LEADING]
########################################################################
#New way
#For Mutation Types
def plot_mutation_types_transcription_log10_ratio_replication_log_10_ratio_using_dataframes(sample,numberofMutations,
type_transcribed_versus_untranscribed_df,
type_lagging_versus_leading_df,
outputDir, jobname):
fig = plt.figure(figsize=(8,8), facecolor=None)
plt.style.use('ggplot')
# build a rectangle in axes coords
left, width = .0, 1.
bottom, height = .0, 1.
right = left + width
top = bottom + height
# This code makes the background white.
# Always put these statements after plt.figure
ax = plt.gca()
ax.set_facecolor('white')
for edge_i in ['bottom','top','left','right']:
ax.spines[edge_i].set_edgecolor("black")
ax.spines[edge_i].set_linewidth(1)
ax.spines[edge_i].set_bounds(-0.3, 0.3)
plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes)
plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes)
plt.text((right+0.02),(bottom+top-0.08), 'Transcribed',horizontalalignment='center',verticalalignment='center',rotation='vertical',transform=ax.transAxes)
plt.text((right+0.02),(bottom+0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)
if (sample is not None):
plt.title(sample, fontsize=15, fontweight='bold')
plt.xlabel('Lagging/leading replication strand\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold')
plt.ylabel('Transcribed/untranscribed strand\nratio(log10)',fontstyle='normal', fontsize=12, fontweight='bold')
# Put some extra place by xlim if necessary
plt.xlim(-0.3, 0.3)
plt.ylim(-0.3, 0.3)
plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10)
yticks = [-0.2, -0.1, 0.0, 0.1, 0.2]
yticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']
plt.yticks(yticks, yticklabels)
xticks = [-0.2, -0.1, 0.0, 0.1, 0.2]
xticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']
plt.xticks(xticks, xticklabels)
# type_transcribed_versus_untranscribed_df=type_transcribed_versus_untranscribed_df[['cancer_type', 'type',
# 'Transcribed_real_count', 'UnTranscribed_real_count', 'Transcribed_mean_sims_count', 'UnTranscribed_mean_sims_count', 'transcribed_versus_untranscribed_p_value','transcribed_versus_untranscribed_q_value',
# 'Transcribed_real_count.1', 'Transcribed_mean_sims_count.1', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list',
# 'UnTranscribed_real_count.1', 'UnTranscribed_mean_sims_count.1', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list' ]]
#
# type_lagging_versus_leading_df=type_lagging_versus_leading_df[['cancer_type', 'type',
# 'Lagging_real_count', 'Leading_real_count', 'Lagging_mean_sims_count', 'Leading_mean_sims_count', 'lagging_versus_leading_p_value', 'lagging_versus_leading_q_value',
# 'Lagging_real_count.1', 'Lagging_mean_sims_count.1', 'Lagging_min_sims_count', 'Lagging_max_sims_count', 'Lagging_sims_count_list',
# 'Leading_real_count.1', 'Leading_mean_sims_count.1', 'Leading_min_sims_count', 'Leading_max_sims_count', 'Leading_sims_count_list' ]]
########################################################################
transcriptionRatiosDict = {}
replicationRatiosDict = {}
for mutationType in six_mutation_types:
##################################################################
transcribed_real_count=0
untranscribed_real_count=0
if (type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type']==mutationType]['Transcribed_real_count'].values.size>0):
transcribed_real_count= type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type'] == mutationType]['Transcribed_real_count'].values[0]
if (type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type']==mutationType]['UnTranscribed_real_count'].values.size>0):
untranscribed_real_count= type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type'] == mutationType]['UnTranscribed_real_count'].values[0]
if (transcribed_real_count>0 and untranscribed_real_count>0):
transcriptionRatiosDict[mutationType] = np.log10(transcribed_real_count/untranscribed_real_count)
##################################################################
##################################################################
lagging_real_count = 0
leading_real_count = 0
if (type_lagging_versus_leading_df[type_lagging_versus_leading_df['type'] == mutationType]['Lagging_real_count'].values.size > 0):
lagging_real_count = type_lagging_versus_leading_df[type_lagging_versus_leading_df['type'] == mutationType]['Lagging_real_count'].values[0]
if (type_lagging_versus_leading_df[type_lagging_versus_leading_df['type'] == mutationType]['Leading_real_count'].values.size > 0):
leading_real_count = type_lagging_versus_leading_df[type_lagging_versus_leading_df['type'] == mutationType]['Leading_real_count'].values[0]
if (lagging_real_count>0 and leading_real_count>0):
replicationRatiosDict[mutationType] = np.log10(lagging_real_count/leading_real_count)
##################################################################
##################################################################
if (mutationType in replicationRatiosDict) and (mutationType in transcriptionRatiosDict):
plt.scatter(replicationRatiosDict[mutationType], transcriptionRatiosDict[mutationType], label=mutationType)
##################################################################
########################################################################
legend = plt.legend(loc='upper left', frameon=True, fancybox =False,labels=six_mutation_types, bbox_to_anchor=(-0.0095, 1.0095))
legend.get_frame().set_linewidth(1)
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
plt.axvline(x=0.0, color='gray', linestyle='--')
plt.axhline(y=0.0, color='gray', linestyle='--')
if sample is None:
figureName = 'all_mutation_types_%s_scatter_plot.png' %(STRANDBIAS)
figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,SCATTER_PLOTS,figureName)
else:
figureName = 'all_mutation_types_%s_%d_%s_scatter_plot.png' %(sample,numberofMutations,STRANDBIAS)
os.makedirs(os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS,SCATTER_PLOTS), exist_ok=True)
figureFile = os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS, SCATTER_PLOTS, figureName)
fig.savefig(figureFile)
plt.cla()
plt.close(fig)
########################################################################
########################################################################
#Old way
#For Mutation Types
def plot_ncomms11383_Supp_FigG_AllMutationTypes_TranscriptionLog10Ratio_ReplicationLog10Ratio(sample,numberofMutations,type2TranscriptionStrand2CountDict,type2ReplicationStrand2CountDict,outputDir,jobname):
fig = plt.figure(figsize=(8,8), facecolor=None)
plt.style.use('ggplot')
# build a rectangle in axes coords
left, width = .0, 1.
bottom, height = .0, 1.
right = left + width
top = bottom + height
# This code makes the background white.
# Always put these statements after plt.figure
ax = plt.gca()
ax.set_facecolor('white')
for edge_i in ['bottom','top','left','right']:
ax.spines[edge_i].set_edgecolor("black")
ax.spines[edge_i].set_linewidth(1)
ax.spines[edge_i].set_bounds(-0.3, 0.3)
plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes)
plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes)
plt.text((right+0.02),(bottom+top-0.08), 'Transcribed',horizontalalignment='center',verticalalignment='center',rotation='vertical',transform=ax.transAxes)
plt.text((right+0.02),(bottom+0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)
if (sample is not None):
plt.title(sample, fontsize=15, fontweight='bold')
plt.xlabel('Lagging/leading replication strand\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold')
plt.ylabel('Transcribed/untranscribed strand\nratio(log10)',fontstyle='normal', fontsize=12, fontweight='bold')
# Put some extra place by xlim if necessary
plt.xlim(-0.3, 0.3)
plt.ylim(-0.3, 0.3)
plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10)
# plt.tick_params(
# axis='y', # changes apply to the x-axis
# which='both', # both major and minor ticks are affected
# left='off' # ticks along the bottom edge are off
# )
yticks = [-0.2, -0.1, 0.0, 0.1, 0.2]
yticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']
plt.yticks(yticks, yticklabels)
xticks = [-0.2, -0.1, 0.0, 0.1, 0.2]
xticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']
plt.xticks(xticks, xticklabels)
########################################################################
transcriptionRatiosDict = {}
replicationRatiosDict = {}
for mutationType in six_mutation_types:
if (mutationType in type2TranscriptionStrand2CountDict) and (mutationType in type2ReplicationStrand2CountDict):
if ((TRANSCRIBED_STRAND in type2TranscriptionStrand2CountDict[mutationType]) and (UNTRANSCRIBED_STRAND in type2TranscriptionStrand2CountDict[mutationType])):
transcriptionRatiosDict[mutationType]= np.log10(type2TranscriptionStrand2CountDict[mutationType][TRANSCRIBED_STRAND]/type2TranscriptionStrand2CountDict[mutationType][UNTRANSCRIBED_STRAND])
if ((LAGGING in type2ReplicationStrand2CountDict[mutationType]) and (LEADING in type2ReplicationStrand2CountDict[mutationType])):
replicationRatiosDict[mutationType] = np.log10(type2ReplicationStrand2CountDict[mutationType][LAGGING]/type2ReplicationStrand2CountDict[mutationType][LEADING])
if (mutationType in replicationRatiosDict) and (mutationType in transcriptionRatiosDict):
plt.scatter(replicationRatiosDict[mutationType],transcriptionRatiosDict[mutationType], label=mutationType)
########################################################################
legend = plt.legend(loc='upper left', frameon=True, fancybox =False,labels=six_mutation_types, bbox_to_anchor=(-0.0095, 1.0095))
legend.get_frame().set_linewidth(1)
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
plt.axvline(x=0.0, color='gray', linestyle='--')
plt.axhline(y=0.0, color='gray', linestyle='--')
if sample is None:
figureName = 'all_mutation_types_%s_scatter_plot.png' %(STRANDBIAS)
figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,SCATTER_PLOTS,figureName)
else:
figureName = 'all_mutation_types_%s_%d_%s_scatter_plot.png' %(sample,numberofMutations,STRANDBIAS)
os.makedirs(os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS,SCATTER_PLOTS), exist_ok=True)
figureFile = os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS, SCATTER_PLOTS, figureName)
fig.savefig(figureFile)
plt.cla()
plt.close(fig)
########################################################################
########################################################################
#July 7, 2020
def plot_types_transcription_log10_ratio_replication_log10_ratio_using_dataframes(signatureType,
sample,
numberofMutations,
type_transcribed_versus_untranscribed_df,
type_lagging_versus_leading_df,
signature_cutoff_numberofmutations_averageprobability_df,
outputDir,
jobname):
fig = plt.figure(figsize=(8,8), facecolor=None)
plt.style.use('ggplot')
# build a rectangle in axes coords
left, width = .0, 1.
bottom, height = .0, 1.
right = left + width
top = bottom + height
# This code makes the background white.
# Always put these statements after plt.figure
ax = plt.gca()
ax.set_facecolor('white')
for edge_i in ['bottom','top','left','right']:
ax.spines[edge_i].set_edgecolor("black")
ax.spines[edge_i].set_linewidth(1)
ax.spines[edge_i].set_bounds(-0.3, 0.3)
plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes)
plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes)
plt.text((right+0.02),(bottom+top-0.08), 'Transcribed',horizontalalignment='center',verticalalignment='center',rotation='vertical',transform=ax.transAxes)
plt.text((right+0.02),(bottom+0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)
if (sample is not None):
plt.title(sample, fontsize=15, fontweight='bold')
plt.xlabel('Lagging/leading replication strand\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold')
plt.ylabel('Transcribed/untranscribed strand\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold')
# Put some extra place by xlim if necessary
plt.xlim(-0.3, 0.3)
plt.ylim(-0.3, 0.3)
plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10)
yticks = [-0.2, -0.1, 0.0, 0.1, 0.2]
yticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']
plt.yticks(yticks, yticklabels)
xticks = [-0.2, -0.1, 0.0, 0.1, 0.2]
xticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']
plt.xticks(xticks, xticklabels)
transcriptionRatiosDict = {}
replicationRatiosDict = {}
for signature in signature_cutoff_numberofmutations_averageprobability_df['signature'].unique():
#################################################################################################
#First check whether we have this signature or not
# type_transcribed_versus_untranscribed_df=type_transcribed_versus_untranscribed_df[['cancer_type', 'type',
# 'Transcribed_real_count', 'UnTranscribed_real_count', 'Transcribed_mean_sims_count', 'UnTranscribed_mean_sims_count', 'transcribed_versus_untranscribed_p_value','transcribed_versus_untranscribed_q_value',
# 'Transcribed_real_count.1', 'Transcribed_mean_sims_count.1', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list',
# 'UnTranscribed_real_count.1', 'UnTranscribed_mean_sims_count.1', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list' ]]
transcribed_real_count=0
untranscribed_real_count=0
if (type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type']==signature]['Transcribed_real_count'].values.size>0):
transcribed_real_count=type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type'] == signature]['Transcribed_real_count'].values[0]
if (type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type']==signature]['UnTranscribed_real_count'].values.size>0):
untranscribed_real_count=type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type'] == signature]['UnTranscribed_real_count'].values[0]
if (transcribed_real_count+untranscribed_real_count>=SUBS_STRAND_BIAS_NUMBER_OF_MUTATIONS_THRESHOLD):
transcriptionRatiosDict[signature] = np.log10(transcribed_real_count/untranscribed_real_count)
#################################################################################################
#################################################################################################
# First check whether we have this signature or not
# type_lagging_versus_leading_df=type_lagging_versus_leading_df[['cancer_type', 'type',
# 'Lagging_real_count', 'Leading_real_count', 'Lagging_mean_sims_count', 'Leading_mean_sims_count', 'lagging_versus_leading_p_value', 'lagging_versus_leading_q_value',
# 'Lagging_real_count.1', 'Lagging_mean_sims_count.1', 'Lagging_min_sims_count', 'Lagging_max_sims_count', 'Lagging_sims_count_list',
# 'Leading_real_count.1', 'Leading_mean_sims_count.1', 'Leading_min_sims_count', 'Leading_max_sims_count', 'Leading_sims_count_list' ]]
lagging_real_count=0
leading_real_count = 0
if (type_lagging_versus_leading_df[type_lagging_versus_leading_df['type']==signature]['Lagging_real_count'].values.size>0):
lagging_real_count=type_lagging_versus_leading_df[type_lagging_versus_leading_df['type']==signature]['Lagging_real_count'].values[0]
if (type_lagging_versus_leading_df[type_lagging_versus_leading_df['type']==signature]['Leading_real_count'].values.size>0):
leading_real_count=type_lagging_versus_leading_df[type_lagging_versus_leading_df['type']==signature]['Leading_real_count'].values[0]
if (lagging_real_count+leading_real_count>=SUBS_STRAND_BIAS_NUMBER_OF_MUTATIONS_THRESHOLD):
replicationRatiosDict[signature] = np.log10(lagging_real_count/leading_real_count)
#################################################################################################
if (transcriptionRatiosDict and replicationRatiosDict):
signaturesShownInLegend = []
for signature in signature_cutoff_numberofmutations_averageprobability_df['signature'].unique():
if ((signature in replicationRatiosDict.keys()) and (signature in transcriptionRatiosDict.keys())):
signaturesShownInLegend.append(signature)
plt.scatter(replicationRatiosDict[signature], transcriptionRatiosDict[signature], label=signature)
legend = plt.legend(loc='upper left', frameon=True, fancybox=False, labels=signaturesShownInLegend,
bbox_to_anchor=(-0.0095, 1.0095))
legend.get_frame().set_linewidth(1)
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
plt.axvline(x=0.0, color='gray', linestyle='--')
plt.axhline(y=0.0, color='gray', linestyle='--')
if sample is None:
figureName = 'all_%s_signatures_%s_scatter_plot.png' % (signatureType, STRANDBIAS)
figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,SCATTER_PLOTS,figureName)
else:
figureName = 'all_%s_signatures_%s_%d_%s_scatter_plot.png' % (signatureType, sample, numberofMutations, STRANDBIAS)
os.makedirs(os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS,SCATTER_PLOTS), exist_ok=True)
figureFile = os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS, SCATTER_PLOTS, figureName)
fig.savefig(figureFile)
plt.cla()
plt.close(fig)
########################################################################
########################################################################
#May 9, 2018 starts
#For Signatures
def plot_ncomms11383_Supp_FigH_AllSignatures_TranscriptionLog10Ratio_ReplicationLog10Ratio(
signatureType,
sample,
numberofMutations,
signature2TranscriptionStrand2CountDict,
signature2ReplicationStrand2CountDict,
signature_cutoff_numberofmutations_averageprobability_df,
outputDir,
jobname):
fig = plt.figure(figsize=(8,8), facecolor=None)
plt.style.use('ggplot')
# build a rectangle in axes coords
left, width = .0, 1.
bottom, height = .0, 1.
right = left + width
top = bottom + height
# This code makes the background white.
# Always put these statements after plt.figure
ax = plt.gca()
ax.set_facecolor('white')
for edge_i in ['bottom','top','left','right']:
ax.spines[edge_i].set_edgecolor("black")
ax.spines[edge_i].set_linewidth(1)
ax.spines[edge_i].set_bounds(-0.3, 0.3)
plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes)
plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes)
plt.text((right+0.02),(bottom+top-0.08), 'Transcribed',horizontalalignment='center',verticalalignment='center',rotation='vertical',transform=ax.transAxes)
plt.text((right+0.02),(bottom+0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)
if (sample is not None):
plt.title(sample, fontsize=15, fontweight='bold')
plt.xlabel('Lagging/leading replication strand\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold')
plt.ylabel('Transcribed/untranscribed strand\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold')
# Put some extra place by xlim if necessary
plt.xlim(-0.3, 0.3)
plt.ylim(-0.3, 0.3)
plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10)
yticks = [-0.2, -0.1, 0.0, 0.1, 0.2]
yticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']
plt.yticks(yticks, yticklabels)
xticks = [-0.2, -0.1, 0.0, 0.1, 0.2]
xticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']
plt.xticks(xticks, xticklabels)
transcriptionRatiosDict = {}
replicationRatiosDict = {}
for signature in signature_cutoff_numberofmutations_averageprobability_df['signature'].unique():
#################################################################################################
#First check whether we have this signature or not
if ((signature in signature2TranscriptionStrand2CountDict) and (TRANSCRIBED_STRAND in (signature2TranscriptionStrand2CountDict[signature])) and
(UNTRANSCRIBED_STRAND in (signature2TranscriptionStrand2CountDict[signature])) ):
if ((signature2TranscriptionStrand2CountDict[signature][TRANSCRIBED_STRAND]+signature2TranscriptionStrand2CountDict[signature][UNTRANSCRIBED_STRAND]) >= SUBS_STRAND_BIAS_NUMBER_OF_MUTATIONS_THRESHOLD):
transcriptionRatiosDict[signature]= np.log10(signature2TranscriptionStrand2CountDict[signature][TRANSCRIBED_STRAND]/signature2TranscriptionStrand2CountDict[signature][UNTRANSCRIBED_STRAND])
#################################################################################################
#################################################################################################
# First check whether we have this signature or not
if ((signature in signature2ReplicationStrand2CountDict) and (LAGGING in (signature2ReplicationStrand2CountDict[signature])) and
(LEADING in (signature2ReplicationStrand2CountDict[signature]))):
if ((signature2ReplicationStrand2CountDict[signature][LAGGING]+signature2ReplicationStrand2CountDict[signature][LEADING])>= SUBS_STRAND_BIAS_NUMBER_OF_MUTATIONS_THRESHOLD):
replicationRatiosDict[signature] = np.log10(signature2ReplicationStrand2CountDict[signature][LAGGING]/signature2ReplicationStrand2CountDict[signature][LEADING])
#################################################################################################
if (transcriptionRatiosDict and replicationRatiosDict):
signaturesShownInLegend = []
for signature in signature_cutoff_numberofmutations_averageprobability_df['signature'].unique():
if ((signature in replicationRatiosDict.keys()) and (signature in transcriptionRatiosDict.keys())):
signaturesShownInLegend.append(signature)
plt.scatter(replicationRatiosDict[signature], transcriptionRatiosDict[signature], label=signature)
legend = plt.legend(loc='upper left', frameon=True, fancybox=False, labels=signaturesShownInLegend,
bbox_to_anchor=(-0.0095, 1.0095))
legend.get_frame().set_linewidth(1)
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
plt.axvline(x=0.0, color='gray', linestyle='--')
plt.axhline(y=0.0, color='gray', linestyle='--')
if sample is None:
figureName = 'all_%s_signatures_%s_scatter_plot.png' % (signatureType, STRANDBIAS)
figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,SCATTER_PLOTS,figureName)
else:
figureName = 'all_%s_signatures_%s_%d_%s_scatter_plot.png' % (
signatureType, sample, numberofMutations, STRANDBIAS)
os.makedirs(os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS,SCATTER_PLOTS), exist_ok=True)
figureFile = os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS, SCATTER_PLOTS, figureName)
fig.savefig(figureFile)
plt.cla()
plt.close(fig)
########################################################################
########################################################################
#MutationTypeBased SampleBased Figures
def plot_ncomms11383_Supp_FigE_MutationTypeBased_AllSamples_TranscriptionLog10Ratio_ReplicationLog10Ratio(
type2Sample2TranscriptionStrand2CountDict,
type2Sample2ReplicationStrand2CountDict,
outputDir,
jobname,
isFigureAugmentation):
mutationType2ColorDict = {'C>A': 'blue', 'C>G':'black', 'C>T':'red', 'T>A':'gray', 'T>C':'green', 'T>G':'pink'}
transcriptionRatiosDict = {}
replicationRatiosDict = {}
for mutationType in six_mutation_types:
#initialization
if mutationType not in transcriptionRatiosDict:
transcriptionRatiosDict[mutationType] = {}
if mutationType not in replicationRatiosDict:
replicationRatiosDict[mutationType] = {}
#Fill the dictionaries
if mutationType in type2Sample2TranscriptionStrand2CountDict:
for sample in type2Sample2TranscriptionStrand2CountDict[mutationType].keys():
if ((TRANSCRIBED_STRAND in type2Sample2TranscriptionStrand2CountDict[mutationType][sample].keys()) and (UNTRANSCRIBED_STRAND in type2Sample2TranscriptionStrand2CountDict[mutationType][sample].keys())):
transcriptionRatiosDict[mutationType][sample]= np.log10(type2Sample2TranscriptionStrand2CountDict[mutationType][sample][TRANSCRIBED_STRAND]/type2Sample2TranscriptionStrand2CountDict[mutationType][sample][UNTRANSCRIBED_STRAND])
if mutationType in type2Sample2ReplicationStrand2CountDict:
for sample in type2Sample2ReplicationStrand2CountDict[mutationType].keys():
if ((LAGGING in type2Sample2ReplicationStrand2CountDict[mutationType][sample].keys()) and (LEADING in type2Sample2ReplicationStrand2CountDict[mutationType][sample].keys())):
replicationRatiosDict[mutationType][sample] = np.log10(type2Sample2ReplicationStrand2CountDict[mutationType][sample][LAGGING]/type2Sample2ReplicationStrand2CountDict[mutationType][sample][LEADING])
for mutationType in six_mutation_types:
fig = plt.figure(figsize=(8, 8), facecolor=None)
plt.style.use('ggplot')
# build a rectangle in axes coords
left, width = .0, 1.
bottom, height = .0, 1.
right = left + width
top = bottom + height
# This code makes the background white.
# Always put these statements after plt.figure
ax = plt.gca()
ax.set_facecolor('white')
for edge_i in ['bottom', 'top', 'left', 'right']:
ax.spines[edge_i].set_edgecolor("black")
ax.spines[edge_i].set_linewidth(1)
ax.spines[edge_i].set_bounds(-0.65, 0.65)
plt.title(mutationType, fontsize=15, fontweight='bold')
plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes)
plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes)
plt.text((right + 0.02), (bottom + top - 0.08), 'Transcribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)
plt.text((right + 0.02), (bottom + 0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)
plt.xlabel('Lagging/leading replication strand\nratio(log10)', fontstyle='normal', fontsize=12,fontweight='bold')
plt.ylabel('Transcribed/untranscribed strand\nratio(log10)', fontstyle='normal', fontsize=12,fontweight='bold')
# Put some extra place by xlim if necessary
plt.xlim(-0.65, 0.65)
plt.ylim(-0.65, 0.65)
plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10)
yticks = [-0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6]
yticklabels = ['-0.6', '-0.4', '-0.2', '0.0', '0.2', '0.4', '0.6']
plt.yticks(yticks, yticklabels)
xticks = [-0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6]
xticklabels = ['-0.6', '-0.4', '-0.2', '0.0', '0.2', '0.4', '0.6']
plt.xticks(xticks, xticklabels)
if (mutationType in type2Sample2TranscriptionStrand2CountDict):
for sample in type2Sample2TranscriptionStrand2CountDict[mutationType].keys():
if ((sample in replicationRatiosDict[mutationType].keys()) and (sample in transcriptionRatiosDict[mutationType].keys())):
plt.scatter(replicationRatiosDict[mutationType][sample],transcriptionRatiosDict[mutationType][sample], facecolor='none', color=mutationType2ColorDict[mutationType])
plt.axvline(x=0.0, color='gray', linestyle='--')
plt.axhline(y=0.0, color='gray', linestyle='--')
if (isFigureAugmentation):
plt.title(jobname + ' ' + mutationType)
newMutationType = mutationType.replace('>', '2')
figureName = newMutationType + '_MutationType_' + STRANDBIAS + '.png'
figureFile = os.path.join(outputDir,jobname,FIGURE,STRANDBIAS,SCATTER_PLOTS,figureName)
fig.savefig(figureFile)
plt.cla()
plt.close(fig)
########################################################################
########################################################################
#SignatureBased SampleBased Figures
#Sig26 is very different
def plot_ncomms11383_Supp_FigF_SignatureBased_AllSamples_TranscriptionLog10Ratio_ReplicationLog10Ratio(type2Sample2TranscriptionStrand2CountDict,type2Sample2ReplicationStrand2CountDict,signatures,outputDir,jobname,isFigureAugmentation):
transcriptionRatiosDict = {}
replicationRatiosDict = {}
for signature in signatures:
# initialization
if signature not in transcriptionRatiosDict:
transcriptionRatiosDict[signature] = {}
if signature not in replicationRatiosDict:
replicationRatiosDict[signature] = {}
# Fill the dictionaries
if signature in type2Sample2TranscriptionStrand2CountDict:
for sample in type2Sample2TranscriptionStrand2CountDict[signature].keys():
if (UNTRANSCRIBED_STRAND in type2Sample2TranscriptionStrand2CountDict[signature][sample]) and (TRANSCRIBED_STRAND in type2Sample2TranscriptionStrand2CountDict[signature][sample]):
transcriptionRatiosDict[signature][sample] = np.log10(type2Sample2TranscriptionStrand2CountDict[signature][sample][TRANSCRIBED_STRAND] /type2Sample2TranscriptionStrand2CountDict[signature][sample][UNTRANSCRIBED_STRAND])
# print(signature, sample)
# print(signature2Sample2TranscriptionStrand2CountDict[signature][sample][TRANSCRIBED_STRAND])
# print(signature2Sample2TranscriptionStrand2CountDict[signature][sample][UNTRANSCRIBED_STRAND])
# print(signature,sample,transcriptionRatiosDict[signature][sample])
if signature in type2Sample2ReplicationStrand2CountDict:
for sample in type2Sample2ReplicationStrand2CountDict[signature].keys():
if (LAGGING in type2Sample2ReplicationStrand2CountDict[signature][sample]) and (LEADING in type2Sample2ReplicationStrand2CountDict[signature][sample]):
replicationRatiosDict[signature][sample] = np.log10(type2Sample2ReplicationStrand2CountDict[signature][sample][LAGGING] /type2Sample2ReplicationStrand2CountDict[signature][sample][LEADING])
for signature in signatures:
if (len(replicationRatiosDict[signature].keys())>0 and len(transcriptionRatiosDict[signature].keys())>0):
fig = plt.figure(figsize=(8, 8), facecolor=None)
plt.style.use('ggplot')
# build a rectangle in axes coords
left, width = .0, 1.
bottom, height = .0, 1.
right = left + width
top = bottom + height
# This code makes the background white.
# Always put these statements after plt.figure
ax = plt.gca()
ax.set_facecolor('white')
for edge_i in ['bottom', 'top', 'left', 'right']:
ax.spines[edge_i].set_edgecolor("black")
ax.spines[edge_i].set_linewidth(1)
ax.spines[edge_i].set_bounds(-0.65, 0.65)
plt.title(signature, fontsize=15, fontweight='bold')
plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes)
plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes)
plt.text((right + 0.02), (bottom + top - 0.08), 'Transcribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)
plt.text((right + 0.02), (bottom + 0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)
plt.xlabel('Lagging/leading replication strand\nratio(log10)', fontstyle='normal', fontsize=12,fontweight='bold')
plt.ylabel('Transcribed/untranscribed strand\nratio(log10)', fontstyle='normal', fontsize=12,fontweight='bold')
# Put some extra place by xlim if necessary
plt.xlim(-0.65, 0.65)
plt.ylim(-0.65, 0.65)
plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10)
yticks = [-0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6]
yticklabels = ['-0.6', '-0.4', '-0.2', '0.0', '0.2', '0.4', '0.6']
plt.yticks(yticks, yticklabels)
xticks = [-0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6]
xticklabels = ['-0.6', '-0.4', '-0.2', '0.0', '0.2', '0.4', '0.6']
plt.xticks(xticks, xticklabels)
for sample in type2Sample2TranscriptionStrand2CountDict[signature].keys():
if (sample in replicationRatiosDict[signature]) and (sample in transcriptionRatiosDict[signature]):
plt.scatter(replicationRatiosDict[signature][sample], transcriptionRatiosDict[signature][sample],facecolor='none',color='green')
plt.axvline(x=0.0, color='gray', linestyle='--')
plt.axhline(y=0.0, color='gray', linestyle='--')
if (isFigureAugmentation):
plt.title(jobname + ' ' + signature)
figureName = signature.replace(' ','') + '_Signature_' + STRANDBIAS + '.png'
figureFile = os.path.join(outputDir,jobname,FIGURE,STRANDBIAS,SCATTER_PLOTS,figureName)
fig.savefig(figureFile)
plt.cla()
plt.close(fig)
########################################################################
def is_there_at_least_10perc_diff(strand1_value, strand2_value):
diff = abs(strand1_value - strand2_value)
if (diff >= (strand1_value/10)) or (diff >= (strand2_value/10)):
return True
else:
return False
# Only this method supports simulations
# key can be a sample or a signature
def plotStrandBiasFigureWithBarPlots(outputDir,
jobname,
numberofSimulations,
key,
isKeySample,
numberofMutations,
N,
x_axis_labels,
strand1_values,
strand2_values,
strand1_simulations_median_values,
strand2_simulations_median_values,
fdr_bh_adjusted_pvalues,
strand1Name,
strand2Name,
mutationsOrSignatures,
color1,
color2,
figureName,
width,
plot_mode):
# Here we can take into difference between strand1_values and strand2_values while deciding on significance
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
# the x locations for the groups
ind = np.arange(N)
fig, ax = plt.subplots(figsize=(16,10),dpi=300)
legend=None
rects1=None
rects2=None
rects3=None
rects4=None
rects1 = ax.bar(ind, strand1_values, width=width, edgecolor='black', color=color1)
rects2 = ax.bar(ind + width, strand2_values, width=width, edgecolor='black', color=color2)
if ((strand1_simulations_median_values is not None) and strand1_simulations_median_values):
rects3 = ax.bar(ind+ 2*width, strand1_simulations_median_values, width=width, edgecolor='black', color=color1, hatch = '///')
if ((strand2_simulations_median_values is not None) and strand2_simulations_median_values):
rects4 = ax.bar(ind +3*width, strand2_simulations_median_values, width=width, edgecolor='black', color=color2, hatch = '///')
# add some text for labels, title and axes ticks
if plot_mode==PLOTTING_FOR_SIGPROFILERTOPOGRAPHY_TOOL:
ax.tick_params(axis='x', labelsize=35)
ax.tick_params(axis='y', labelsize=35)
locs, labels = plt.yticks()
ax.set_ylim(0, locs[-1] + 5000)
# To make the bar width not too wide
if len(ind) < 6:
maxn = 6
ax.set_xlim(-0.5, maxn - 0.5)
# Set title
if key is not None:
ax.set_title('%s %s vs. %s %s' %(key,strand1Name,strand2Name,mutationsOrSignatures), fontsize=20,fontweight='bold')
else:
ax.set_title('%s vs. %s %s' %(strand1Name,strand2Name,mutationsOrSignatures), fontsize=20,fontweight='bold')
# Set x tick labels
if len(x_axis_labels) > 6:
ax.set_xticklabels(x_axis_labels, fontsize=35, rotation=90)
else:
ax.set_xticklabels(x_axis_labels, fontsize=35)
# Set the ylabel
plt.ylabel('Number of single base substitutions', fontsize=35, fontweight='normal')
# set the x axis tick locations
if (numberofSimulations > 0):
ax.set_xticks(ind + (3 * width) / 2)
realStrand1Name = 'Real %s' % (strand1Name)
realStrand2Name = 'Real %s' % (strand2Name)
simulationsStrand1Name = 'Simulated %s' % (strand1Name)
simulationsStrand2Name = 'Simulated %s' % (strand2Name)
if ((rects1 is not None) and (rects2 is not None) and (rects3 is not None) and (rects4 is not None)):
if ((len(rects1) > 0) and (len(rects2) > 0) and (len(rects3) > 0) and (len(rects4) > 0)):
legend = ax.legend((rects1[0], rects2[0], rects3[0], rects4[0]),(realStrand1Name, realStrand2Name, simulationsStrand1Name, simulationsStrand2Name),prop={'size': 25}, ncol=1, loc='best')
else:
# Old way with no simulations
ax.set_xticks(ind + width / 2)
if ((rects1 is not None) and (rects2 is not None)):
if ((len(rects1) > 0) and (len(rects2) > 0)):
legend = ax.legend((rects1[0], rects2[0]), (strand1Name, strand2Name), prop={'size': 25}, ncol=1, loc='upper right')
elif plot_mode == PLOTTING_FOR_SIGPROFILERTOPOGRAPHY_MANUSCRIPT:
# set axis ticks
# ax.tick_params(axis='both', which='both', length=0)
ax.tick_params(axis='x', which='both', length=0)
ax.tick_params(axis='y', which='both', length=0)
# set axis labels
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
if (numberofSimulations > 0):
realStrand1Name = 'Real %s' % (strand1Name)
realStrand2Name = 'Real %s' % (strand2Name)
simulationsStrand1Name = 'Simulated %s' % (strand1Name)
simulationsStrand2Name = 'Simulated %s' % (strand2Name)
if ((rects1 is not None) and (rects2 is not None) and (rects3 is not None) and (rects4 is not None)):
if ((len(rects1) > 0) and (len(rects2) > 0) and (len(rects3) > 0) and (len(rects4) > 0)):
legend = ax.legend((rects1[0], rects2[0], rects3[0], rects4[0]),(realStrand1Name, realStrand2Name, simulationsStrand1Name, simulationsStrand2Name),prop={'size': 30}, ncol=1, loc='best')
else:
if ((rects1 is not None) and (rects2 is not None)):
if ((len(rects1) > 0) and (len(rects2) > 0)):
legend = ax.legend((rects1[0], rects2[0]), (strand1Name, strand2Name), prop={'size': 35},loc='upper right')
# To make the barplot background white
ax.set_facecolor('white')
# To makes spines black like a rectangle with black stroke
ax.spines["bottom"].set_color('black')
ax.spines["left"].set_color('black')
ax.spines["top"].set_color('black')
ax.spines["right"].set_color('black')
if (legend is not None):
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
# Add star above the bars for significant differences between the number of mutations on each strand starts
# For each bar: Place a label
if fdr_bh_adjusted_pvalues is not None:
for fdr_bh_adjusted_pvalue, strand1_value, strand2_value, rect1, rect2 in zip(fdr_bh_adjusted_pvalues, strand1_values, strand2_values, rects1, rects2):
# Get X and Y placement of label from rect.
y_value = max(rect1.get_height(),rect2.get_height())
x_value = rect1.get_x() + rect1.get_width()
# Number of points between bar and label. Change to your liking.
space = 3
# Vertical alignment for positive values
va = 'bottom'
# If value of bar is negative: Place label below bar
if y_value < 0:
# Invert space to place label below
space *= -1
# Vertically align label at top
va = 'top'
# Use Y value as label and format number with one decimal place
label = "{:.1f}".format(y_value)
# Create annotation
if ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.0001) and (is_there_at_least_10perc_diff(strand1_value, strand2_value))):
plt.annotate(
'***', # Use `label` as label
(x_value, y_value), # Place label at end of the bar
xytext=(0, space), # Vertically shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
ha='center', # Horizontally center label
va=va,
fontsize=20) # Vertically align label differently for
elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.001) and (is_there_at_least_10perc_diff(strand1_value, strand2_value))):
plt.annotate(
'**', # Use `label` as label
(x_value, y_value), # Place label at end of the bar
xytext=(0, space), # Vertically shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
ha='center', # Horizontally center label
va=va,
fontsize=20) # Vertically align label differently for
elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= SIGNIFICANCE_LEVEL) and (is_there_at_least_10perc_diff(strand1_value, strand2_value))):
plt.annotate(
'*', # Use `label` as label
(x_value, y_value), # Place label at end of the bar
xytext=(0, space), # Vertically shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
ha='center', # Horizontally center label
va=va,
fontsize=20) # Vertically align label differently for
# positive and negative values.
# Add star above the bars for significant differences between the number of mutations on each strand ends
#########################################################################################################
if (key is None):
figureName = '%s_bar_plot.png' %(figureName)
figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS, BAR_PLOTS, figureName)
elif (not isKeySample):
figureName = '%s_%s_bar_plot.png' %(key,figureName)
figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS, BAR_PLOTS, figureName)
else:
figureName = '%s_%s_%d_bar_plot.png' %(figureName,key,numberofMutations)
os.makedirs(os.path.join(outputDir, jobname, FIGURE, SAMPLES, key, STRANDBIAS, BAR_PLOTS), exist_ok=True)
figureFile = os.path.join(outputDir, jobname, FIGURE, SAMPLES, key, STRANDBIAS, BAR_PLOTS, figureName)
fig.savefig(figureFile)
plt.cla()
plt.close(fig)
# June 2, 2021
def plot_circle_plot_in_given_axis(ax,
percentage_strings,
sbs_signature,
six_mutation_types,
xticklabels_list,
signature2mutation_type2strand2percentagedict):
strand_bias_list=[LAGGING_VERSUS_LEADING, TRANSCRIBED_VERSUS_UNTRANSCRIBED, GENIC_VERSUS_INTERGENIC]
# make aspect ratio square
ax.set_aspect(1.0)
# set title
title = '%s Strand Bias' %(sbs_signature)
ax.text(len(percentage_strings) * 3, len(strand_bias_list) + 2.5, title, horizontalalignment='center',fontsize=60, fontweight='bold', fontname='Arial')
# Colors are from SigProfilerPlotting tool to be consistent
colors = [[3 / 256, 189 / 256, 239 / 256],
[1 / 256, 1 / 256, 1 / 256],
[228 / 256, 41 / 256, 38 / 256],
[203 / 256, 202 / 256, 202 / 256],
[162 / 256, 207 / 256, 99 / 256],
[236 / 256, 199 / 256, 197 / 256]]
# Put rectangles
x = 0
for i in range(0, len(six_mutation_types), 1):
ax.text((x + (len(percentage_strings) / 2) - 0.75), len(strand_bias_list) + 1.5, six_mutation_types[i],fontsize=55, fontweight='bold', fontname='Arial')
ax.add_patch(plt.Rectangle((x + .0415, len(strand_bias_list) + 0.75), len(percentage_strings) - (2 * .0415), .5,facecolor=colors[i], clip_on=False))
ax.add_patch(plt.Rectangle((x, 0), len(percentage_strings), len(strand_bias_list), facecolor=colors[i], zorder=0,alpha=0.25, edgecolor='grey'))
x += len(percentage_strings)
# CODE GOES HERE TO CENTER X-AXIS LABELS...
ax.set_xlim([0, len(six_mutation_types) * len(percentage_strings)])
ax.set_xticklabels([])
ax.tick_params(axis='x', which='minor', length=0, labelsize=35)
# major ticks
ax.set_xticks(np.arange(0, len(six_mutation_types) * len(percentage_strings), 1))
# minor ticks
ax.set_xticks(np.arange(0, len(six_mutation_types) * len(percentage_strings), 1) + 0.5, minor=True)
ax.set_xticklabels(xticklabels_list, minor=True)
ax.xaxis.set_label_position('top')
ax.xaxis.set_ticks_position('top')
ax.tick_params(
axis='x', # changes apply to the x-axis
which='major', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False) # labels along the bottom edge are off
# CODE GOES HERE TO CENTER Y-AXIS LABELS...
ax.set_ylim([0, len(strand_bias_list)])
ax.set_yticklabels([])
ax.tick_params(axis='y', which='minor', length=0, labelsize=40)
# major ticks
ax.set_yticks(np.arange(0, len(strand_bias_list), 1))
# minor ticks
ax.set_yticks(np.arange(0, len(strand_bias_list), 1) + 0.5, minor=True)
ax.set_yticklabels(['', sbs_signature,''], minor=True) # fontsize
ax.tick_params(
axis='y', # changes apply to the x-axis
which='major', # both major and minor ticks are affected
left=False) # labels along the bottom edge are off
# Gridlines based on major ticks
ax.grid(which='major', color='black', zorder=3)
# Put the legend
legend_elements = [
Line2D([0], [0], marker='o', color='white', label=GENIC, markerfacecolor='cyan', markersize=40),
Line2D([0], [0], marker='o', color='white', label=INTERGENIC, markerfacecolor='gray', markersize=40),
Line2D([0], [0], marker='o', color='white', label=TRANSCRIBED_STRAND, markerfacecolor='royalblue',markersize=40),
Line2D([0], [0], marker='o', color='white', label=UNTRANSCRIBED_STRAND, markerfacecolor='yellowgreen',markersize=40),
Line2D([0], [0], marker='o', color='white', label=LAGGING, markerfacecolor='indianred', markersize=40),
Line2D([0], [0], marker='o', color='white', label=LEADING, markerfacecolor='goldenrod', markersize=40)]
legend = ax.legend(handles=legend_elements, ncol=len(legend_elements), bbox_to_anchor=(0.5, 0), loc='upper center',fontsize=40)
# legend.get_frame().set_linewidth(1)
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
for percentage_diff_index, percentage_string in enumerate(percentage_strings):
for mutation_type_index, mutation_type in enumerate(six_mutation_types):
# for row_sbs_signature_index, row_sbs_signature in enumerate(rows_sbs_signatures):
# strand_bias_list = [TRANSCRIBED_VERSUS_UNTRANSCRIBED, GENIC_VERSUS_INTERGENIC, LAGGING_VERSUS_LEADING]
for strand_bias_index, strand_bias in enumerate(strand_bias_list):
if (strand_bias == LAGGING_VERSUS_LEADING):
if sbs_signature in signature2mutation_type2strand2percentagedict:
if mutation_type in signature2mutation_type2strand2percentagedict[sbs_signature]:
lagging_percentage = None
leading_percentage = None
if (LAGGING in signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type]) and (
signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][LAGGING][percentage_string] == 1):
lagging_percentage = 100
if (LEADING in signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type]) and (
signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][LEADING][percentage_string] == 1):
leading_percentage = 100
if (lagging_percentage is not None) and (leading_percentage is None):
radius = 0.49
if (radius > 0):
# print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index, row_sbs_signature,mutation_type, percentage_string))
ax.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,strand_bias_index + 0.5), radius, color='indianred', fill=True))
elif (leading_percentage is not None) and (lagging_percentage is None):
radius = 0.49
if (radius > 0):
# print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index, row_sbs_signature,mutation_type, percentage_string))
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius,
color='goldenrod', fill=True))
elif (lagging_percentage is not None) and (leading_percentage is not None):
radius_lagging = 0.49
radius_leading = 0.49
if (radius_lagging > radius_leading):
# First lagging
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_lagging,
color='indianred', fill=True))
# Second leading
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_leading,
color='goldenrod', fill=True))
else:
# First leading
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_leading,
color='goldenrod', fill=True))
# Second lagging
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_lagging,
color='indianred', fill=True))
elif (strand_bias == GENIC_VERSUS_INTERGENIC):
if sbs_signature in signature2mutation_type2strand2percentagedict:
if mutation_type in signature2mutation_type2strand2percentagedict[sbs_signature]:
genic_percentage = None
intergenic_percentage = None
if (GENIC in signature2mutation_type2strand2percentagedict[sbs_signature][
mutation_type]) and (
signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][
GENIC][percentage_string] == 1):
genic_percentage = 100
if (INTERGENIC in signature2mutation_type2strand2percentagedict[sbs_signature][
mutation_type]) and (
signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][
INTERGENIC][percentage_string] == 1):
intergenic_percentage = 100
if (genic_percentage is not None) and (intergenic_percentage is None):
radius = 0.49
if (radius > 0):
# print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string))
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius, color='cyan',
fill=True))
elif (intergenic_percentage is not None) and (genic_percentage is None):
radius = 0.49
if (radius > 0):
# print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string))
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius, color='gray',
fill=True))
elif (genic_percentage is not None) and (intergenic_percentage is not None):
radius_genic = 0.49
radius_intergenic = 0.49
if (radius_genic > radius_intergenic):
# First genic
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_genic,
color='cyan', fill=True))
# Second intergenic
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_intergenic,
color='gray', fill=True))
else:
# First intergenic
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_intergenic,
color='gray', fill=True))
# Second genic
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_genic,
color='cyan', fill=True))
elif (strand_bias == TRANSCRIBED_VERSUS_UNTRANSCRIBED):
if sbs_signature in signature2mutation_type2strand2percentagedict:
if mutation_type in signature2mutation_type2strand2percentagedict[sbs_signature]:
transcribed_percentage = None
untranscribed_percentage = None
if (TRANSCRIBED_STRAND in signature2mutation_type2strand2percentagedict[sbs_signature][
mutation_type]) and (
signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][
TRANSCRIBED_STRAND][percentage_string] == 1):
transcribed_percentage = 100
if (UNTRANSCRIBED_STRAND in
signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type]) and (
signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][
UNTRANSCRIBED_STRAND][percentage_string] == 1):
untranscribed_percentage = 100
if (transcribed_percentage is not None) and (untranscribed_percentage is None):
radius = 0.49
if (radius > 0):
# print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string))
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius,
color='royalblue', fill=True))
elif (untranscribed_percentage is not None) and (transcribed_percentage is None):
radius = 0.49
if (radius > 0):
# print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string))
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius,
color='yellowgreen', fill=True))
elif (transcribed_percentage is not None) and (untranscribed_percentage is not None):
radius_transcribed = 0.49
radius_untranscribed = 0.49
if (radius_transcribed > radius_untranscribed):
# First transcribed
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_transcribed,
color='royalblue', fill=True))
# Second untranscribed
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_untranscribed,
color='yellowgreen', fill=True))
else:
# First untranscribed
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_untranscribed,
color='yellowgreen', fill=True))
# Second transcribed
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_transcribed,
color='royalblue', fill=True))
# June 2, 2021
def plot_strand_bias_figure_with_bar_plots(strand_bias,
strandbias_figures_outputDir,
numberofSimulations,
signature,
N,
x_axis_tick_labels,
y_axis_label,
strand1_values,
strand2_values,
strand1_simulations_median_values,
strand2_simulations_median_values,
fdr_bh_adjusted_pvalues,
strand1Name,
strand2Name,
color1,
color2,
width,
axis_given=None):
# Here we can take into difference between strand1_values and strand2_values while deciding on significance
# the x locations for the groups
ind = np.arange(N)
if axis_given == None:
fig, ax = plt.subplots(figsize=(16,10),dpi=100)
else:
ax = axis_given
legend = None
rects3 = None
rects4 = None
rects1 = ax.bar(ind, strand1_values, width=width, edgecolor='black', color=color1)
rects2 = ax.bar(ind + width, strand2_values, width=width, edgecolor='black', color=color2)
if ((strand1_simulations_median_values is not None) and strand1_simulations_median_values):
rects3 = ax.bar(ind+ 2*width, strand1_simulations_median_values, width=width, edgecolor='black', color=color1, hatch = '///')
if ((strand2_simulations_median_values is not None) and strand2_simulations_median_values):
rects4 = ax.bar(ind + 3*width, strand2_simulations_median_values, width=width, edgecolor='black', color=color2, hatch = '///')
# add some text for labels, title and axes ticks
ax.tick_params(axis='x', labelsize=35)
ax.tick_params(axis='y', labelsize=35)
ymax = np.nanmax([np.nanmax(strand1_values),
np.nanmax(strand2_values),
np.nanmax(strand1_simulations_median_values),
np.nanmax(strand2_simulations_median_values)])
y = ymax / 1.025
ytick_offest = float(y / 3)
ylabs = [0, ytick_offest, ytick_offest * 2, ytick_offest * 3, ytick_offest * 4]
ylabels = [0, ytick_offest, ytick_offest * 2, ytick_offest * 3, ytick_offest * 4]
ylabels = ['{:,}'.format(int(x)) for x in ylabels]
if len(ylabels[-1]) > 3:
ylabels_temp = []
if len(ylabels[-1]) > 7:
for label in ylabels:
if len(label) > 7:
ylabels_temp.append(label[0:-8] + "m")
elif len(label) > 3:
ylabels_temp.append(label[0:-4] + "k")
else:
ylabels_temp.append(label)
else:
for label in ylabels:
if len(label) > 3:
ylabels_temp.append(label[0:-4] + "k")
else:
ylabels_temp.append(label)
ylabels = ylabels_temp
ax.set_ylim([0, y])
ax.set_yticks(ylabs)
ax.set_yticklabels(ylabels, fontsize=35, fontweight='bold', fontname='Arial')
# To make the bar width not too wide
if len(ind) < 6:
maxn = 6
ax.set_xlim(-0.5, maxn - 0.5)
# Set title
ax.set_title('%s vs. %s' %(strand1Name,strand2Name), fontsize=40, fontweight='bold')
# Set x tick labels
if len(x_axis_tick_labels) > 6:
ax.set_xticklabels(x_axis_tick_labels, fontsize=35, rotation=90)
else:
ax.set_xticklabels(x_axis_tick_labels, fontsize=35)
# Set the ylabel
if y_axis_label:
ax.set_ylabel(y_axis_label, fontsize=35, fontweight='normal', labelpad=15)
# Set the x axis tick locations
if (numberofSimulations > 0):
ax.set_xticks(ind + (3 * width) / 2)
realStrand1Name = 'Real %s' % (strand1Name)
realStrand2Name = 'Real %s' % (strand2Name)
simulationsStrand1Name = 'Simulated %s' % (strand1Name)
simulationsStrand2Name = 'Simulated %s' % (strand2Name)
if ((rects1 is not None) and (rects2 is not None) and (rects3 is not None) and (rects4 is not None)):
if ((len(rects1) > 0) and (len(rects2) > 0) and (len(rects3) > 0) and (len(rects4) > 0)):
legend = ax.legend((rects1[0], rects2[0], rects3[0], rects4[0]),
(realStrand1Name, realStrand2Name, simulationsStrand1Name, simulationsStrand2Name),prop={'size': 25}, ncol=1, loc='best')
else:
# Old way with no simulations
ax.set_xticks(ind + width / 2)
if ((rects1 is not None) and (rects2 is not None)):
if ((len(rects1) > 0) and (len(rects2) > 0)):
legend = ax.legend((rects1[0], rects2[0]), (strand1Name, strand2Name), prop={'size': 25}, ncol=1, loc='upper right')
# To make the barplot background white
ax.set_facecolor('white')
# To makes spines black like a rectangle with black stroke
ax.spines["bottom"].set_color('black')
ax.spines["left"].set_color('black')
ax.spines["top"].set_color('black')
ax.spines["right"].set_color('black')
if (legend is not None):
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
# Add star above the bars for significant differences between the number of mutations on each strand starts
# For each bar: Place a label
if fdr_bh_adjusted_pvalues is not None:
for fdr_bh_adjusted_pvalue, strand1_value, strand2_value, rect1, rect2 in zip(fdr_bh_adjusted_pvalues, strand1_values, strand2_values, rects1, rects2):
# Get X and Y placement of label from rect.
y_value = max(rect1.get_height(),rect2.get_height())
x_value = rect1.get_x() + rect1.get_width()
# Number of points between bar and label. Change to your liking.
space = 3
# Vertical alignment for positive values
va = 'bottom'
# If value of bar is negative: Place label below bar
if y_value < 0:
# Invert space to place label below
space *= -1
# Vertically align label at top
va = 'top'
# Use Y value as label and format number with one decimal place
label = "{:.1f}".format(y_value)
# Create annotation
if ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.0001) and is_there_at_least_10perc_diff(strand1_value, strand2_value)):
ax.annotate(
'***', # Use `label` as label
(x_value, y_value), # Place label at end of the bar
xytext=(0, space), # Vertically shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
ha='center', # Horizontally center label
va=va,
fontsize=25) # Vertically align label differently for
elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.001) and is_there_at_least_10perc_diff(strand1_value, strand2_value)):
ax.annotate(
'**', # Use `label` as label
(x_value, y_value), # Place label at end of the bar
xytext=(0, space), # Vertically shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
ha='center', # Horizontally center label
va=va,
fontsize=25) # Vertically align label differently for
elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= SIGNIFICANCE_LEVEL) and is_there_at_least_10perc_diff(strand1_value, strand2_value)) :
ax.annotate(
'*', # Use `label` as label
(x_value, y_value), # Place label at end of the bar
xytext=(0, space), # Vertically shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
ha='center', # Horizontally center label
va=va,
fontsize=25) # Vertically align label differently for
if axis_given == None:
filename = '%s_%s_with_bars.png' %(signature,strand_bias)
figFile = os.path.join(strandbias_figures_outputDir, filename)
fig.savefig(figFile, dpi=100, bbox_inches="tight")
plt.cla()
plt.close(fig)
# June 2, 2021
def plot_bar_plot_in_given_axis(axis,
sbs_signature,
strand_bias,
strands_list,
signature_strand1_versus_strand2_df,
y_axis_label = None):
box = axis.get_position()
axis.set_position([box.x0, box.y0 + 0.125, box.width * 1, box.height * 1], which='both')
mutation_types = six_mutation_types
numberofSimulations = 100
width = 0.20
if strand_bias == LAGGING_VERSUS_LEADING:
strands = strands_list
strand1 = "Lagging_real_count"
strand2 = "Leading_real_count"
strand1_sims = "Lagging_mean_sims_count"
strand2_sims = "Leading_mean_sims_count"
q_value_column_name = "lagging_versus_leading_q_value"
color1 = 'indianred'
color2 = 'goldenrod'
elif strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED:
strands = strands_list
strand1 = "Transcribed_real_count"
strand2 = "UnTranscribed_real_count"
strand1_sims = "Transcribed_mean_sims_count"
strand2_sims = "UnTranscribed_mean_sims_count"
q_value_column_name = "transcribed_versus_untranscribed_q_value"
color1 = 'royalblue'
color2 = 'yellowgreen'
elif strand_bias == GENIC_VERSUS_INTERGENIC:
strands = strands_list
strand1 = "genic_real_count"
strand2 = "intergenic_real_count"
strand1_sims = "genic_mean_sims_count"
strand2_sims = "intergenic_mean_sims_count"
q_value_column_name = "genic_versus_intergenic_q_value"
color1 = 'cyan'
color2 = 'gray'
groupby_df = signature_strand1_versus_strand2_df.groupby(['signature'])
group_df = groupby_df.get_group(sbs_signature)
mutationtype_strand1_real_list = []
mutationtype_strand2_real_list = []
mutationtype_strand1_sims_mean_list = []
mutationtype_strand2_sims_mean_list = []
mutationtype_FDR_BH_adjusted_pvalues_list = []
for mutation_type in six_mutation_types:
strand1_real_count=group_df[group_df['mutation_type'] == mutation_type][strand1].values[0]
strand2_real_count=group_df[group_df['mutation_type'] == mutation_type][strand2].values[0]
strand1_sims_count=group_df[group_df['mutation_type'] == mutation_type][strand1_sims].values[0]
strand2_sims_count=group_df[group_df['mutation_type'] == mutation_type][strand2_sims].values[0]
q_value=group_df[group_df['mutation_type'] == mutation_type][q_value_column_name].values[0]
mutationtype_strand1_real_list.append(strand1_real_count)
mutationtype_strand2_real_list.append(strand2_real_count)
mutationtype_strand1_sims_mean_list.append(strand1_sims_count)
mutationtype_strand2_sims_mean_list.append(strand2_sims_count)
mutationtype_FDR_BH_adjusted_pvalues_list.append(q_value)
plot_strand_bias_figure_with_bar_plots(strand_bias,
None,
numberofSimulations,
sbs_signature,
len(mutation_types),
mutation_types,
y_axis_label,
mutationtype_strand1_real_list,
mutationtype_strand2_real_list,
mutationtype_strand1_sims_mean_list,
mutationtype_strand2_sims_mean_list,
mutationtype_FDR_BH_adjusted_pvalues_list,
strands[0],
strands[1],
color1,
color2,
width,
axis_given = axis)
# June 2, 2021
def plot_strand_bias_figure_with_stacked_bar_plots(strand_bias,
strandbias_figures_outputDir,
numberofSimulations,
signature,
N,
x_axis_tick_labels,
y_axis_label,
strand1_values,
strand2_values,
strand1_simulations_median_values,
strand2_simulations_median_values,
fdr_bh_adjusted_pvalues,
strand1Name,
strand2Name,
color1,
color2,
width,
axis_given=None):
# Replace np.nans with 0
strand1_values = [0 if np.isnan(x) else x for x in strand1_values]
strand2_values = [0 if np.isnan(x) else x for x in strand2_values]
strand1_simulations_median_values = [0 if np.isnan(x) else x for x in strand1_simulations_median_values]
strand2_simulations_median_values = [0 if np.isnan(x) else x for x in strand2_simulations_median_values]
# Fill odds_ratio_list
odds_real_list = []
odds_sims_list = []
for a, b in zip(strand1_values, strand2_values):
odds_real = np.nan
if b>0:
odds_real = a/b
odds_real_list.append(odds_real)
for x, y in zip(strand1_simulations_median_values, strand2_simulations_median_values):
odds_sims = np.nan
if y > 0:
odds_sims = x/y
odds_sims_list.append(odds_sims)
odds_ratio_list = [odds_real/odds_sims if odds_sims>0 else np.nan for (odds_real, odds_sims) in zip(odds_real_list,odds_sims_list)]
# Here we can take into difference between strand1_values and strand2_values while deciding on significance
# the x locations for the groups
ind = np.arange(N)
if axis_given == None:
fig, ax = plt.subplots(figsize=(16,10),dpi=100)
else:
ax = axis_given
legend=None
rects1 = ax.bar(ind, strand1_values, width=width, edgecolor='black', color=color1)
rects2 = ax.bar(ind, strand2_values, width=width, edgecolor='black', color=color2, bottom=strand1_values)
if ((strand1_simulations_median_values is not None) and strand1_simulations_median_values):
ax.bar(ind + width, strand1_simulations_median_values, width=width, edgecolor='black', color=color1, hatch = '///')
if ((strand2_simulations_median_values is not None) and strand2_simulations_median_values):
ax.bar(ind + width, strand2_simulations_median_values, width=width, edgecolor='black', color=color2, hatch = '///', bottom=strand1_simulations_median_values)
# Add some text for labels, title and axes ticks
ax.tick_params(axis='x', labelsize=35)
ax.tick_params(axis='y', labelsize=35)
ax.set_ylim(0, 1.1)
ax.set_yticklabels([0.0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=35)
# To make the bar width not too wide
if len(ind) < 6:
maxn = 6
ax.set_xlim(-0.5, maxn - 0.5)
# Set title
stacked_bar_title = 'Real vs. Simulated\nOdds Ratio of %s vs. %s' %(strand1Name, strand2Name)
ax.set_title(stacked_bar_title, fontsize=40, fontweight='bold')
# Set x tick labels
if len(x_axis_tick_labels) > 6:
ax.set_xticklabels(x_axis_tick_labels, fontsize=35, rotation=90)
else:
ax.set_xticklabels(x_axis_tick_labels, fontsize=35)
# Set the ylabel
if y_axis_label:
ax.set_ylabel(y_axis_label, fontsize=35, fontweight='normal', labelpad=15)
# Set the x axis tick locations
if (numberofSimulations > 0):
ax.set_xticks(ind + (width/2))
else:
# Old way with no simulations
ax.set_xticks(ind + width / 2)
# To make the barplot background white
ax.set_facecolor('white')
# To makes spines black like a rectangle with black stroke
ax.spines["bottom"].set_color('black')
ax.spines["left"].set_color('black')
ax.spines["top"].set_color('black')
ax.spines["right"].set_color('black')
if (legend is not None):
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
# Add star above the bars for significant differences between the number of mutations on each strand starts
# For each bar: Place a label
if odds_ratio_list is not None:
for odds_ratio, fdr_bh_adjusted_pvalue, strand1_value, strand2_value, rect1, rect2 in zip(odds_ratio_list, fdr_bh_adjusted_pvalues, strand1_values, strand2_values, rects1, rects2):
# Get X and Y placement of label from rect.
# y_value = max(rect1.get_height(),rect2.get_height())
y_value = rect1.get_height() + rect2.get_height()
x_value = rect1.get_x() + rect1.get_width()
# Number of points between bar and label. Change to your liking.
space = 3
# Vertical alignment for positive values
va = 'bottom'
# If value of bar is negative: Place label below bar
if y_value < 0:
# Invert space to place label below
space *= -1
# Vertically align label at top
va = 'top'
# Use Y value as label and format number with one decimal place
label = "{:.1f}".format(y_value)
# Create annotation
if not np.isnan(odds_ratio):
if ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.0001) and is_there_at_least_10perc_diff(strand1_value, strand2_value)):
ax.annotate(
'%.2f ***' %(odds_ratio), # Use `label` as label
(x_value, y_value), # Place label at end of the bar
xytext=(0, space), # Vertically shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
ha='center', # Horizontally center label
va=va,
fontsize=25) # Vertically align label differently for
elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.001) and is_there_at_least_10perc_diff(strand1_value, strand2_value)):
ax.annotate(
'%.2f **' %(odds_ratio), # Use `label` as label
(x_value, y_value), # Place label at end of the bar
xytext=(0, space), # Vertically shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
ha='center', # Horizontally center label
va=va,
fontsize=25) # Vertically align label differently for
elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= SIGNIFICANCE_LEVEL) and is_there_at_least_10perc_diff(strand1_value, strand2_value)):
ax.annotate(
'%.2f *' %(odds_ratio), # Use `label` as label
(x_value, y_value), # Place label at end of the bar
xytext=(0, space), # Vertically shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
ha='center', # Horizontally center label
va=va,
fontsize=25) # Vertically align label differently for
else:
ax.annotate(
'%.2f' %(odds_ratio), # Use `label` as label
(x_value, y_value), # Place label at end of the bar
xytext=(0, space), # Vertically shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
ha='center', # Horizontally center label
va=va,
fontsize=25) # Vertically align label differently for
if axis_given==None:
filename = '%s_%s_with_bars.png' %(signature,strand_bias)
figFile = os.path.join(strandbias_figures_outputDir, filename)
fig.savefig(figFile, dpi=100, bbox_inches="tight")
plt.cla()
plt.close(fig)
# June 2, 2021
def plot_stacked_bar_plot_in_given_axis(axis,
sbs_signature,
strand_bias,
strands_list,
signature_strand1_versus_strand2_df,
y_axis_label = None):
box = axis.get_position()
axis.set_position([box.x0, box.y0+0.125, box.width * 1, box.height * 1], which='both')
mutation_types = six_mutation_types
numberofSimulations = 100
width = 0.20
if strand_bias == LAGGING_VERSUS_LEADING:
strands = strands_list
strand1 = "Lagging_real_count"
strand2 = "Leading_real_count"
strand1_sims = "Lagging_mean_sims_count"
strand2_sims = "Leading_mean_sims_count"
q_value_column_name = "lagging_versus_leading_q_value"
color1 = 'indianred'
color2 = 'goldenrod'
elif strand_bias == TRANSCRIBED_VERSUS_UNTRANSCRIBED:
strands = strands_list
strand1 = "Transcribed_real_count"
strand2 = "UnTranscribed_real_count"
strand1_sims = "Transcribed_mean_sims_count"
strand2_sims = "UnTranscribed_mean_sims_count"
q_value_column_name = "transcribed_versus_untranscribed_q_value"
color1 = 'royalblue'
color2 = 'yellowgreen'
elif strand_bias == GENIC_VERSUS_INTERGENIC:
strands = strands_list
strand1 = "genic_real_count"
strand2 = "intergenic_real_count"
strand1_sims = "genic_mean_sims_count"
strand2_sims = "intergenic_mean_sims_count"
q_value_column_name = "genic_versus_intergenic_q_value"
color1 = 'cyan'
color2 = 'gray'
groupby_df = signature_strand1_versus_strand2_df.groupby(['signature'])
group_df = groupby_df.get_group(sbs_signature)
mutationtype_strand1_real_list = []
mutationtype_strand2_real_list = []
mutationtype_strand1_sims_mean_list = []
mutationtype_strand2_sims_mean_list = []
mutationtype_FDR_BH_adjusted_pvalues_list = []
for mutation_type in six_mutation_types:
strand1_real_count=group_df[group_df['mutation_type'] == mutation_type][strand1].values[0]
strand2_real_count=group_df[group_df['mutation_type'] == mutation_type][strand2].values[0]
strand1_sims_count=group_df[group_df['mutation_type'] == mutation_type][strand1_sims].values[0]
strand2_sims_count=group_df[group_df['mutation_type'] == mutation_type][strand2_sims].values[0]
q_value=group_df[group_df['mutation_type'] == mutation_type][q_value_column_name].values[0]
mutationtype_FDR_BH_adjusted_pvalues_list.append(q_value)
if (strand1_real_count >= NUMBER_OF_REQUIRED_MUTATIONS_FOR_STRAND_BIAS_BAR_PLOT) or (strand2_real_count >= NUMBER_OF_REQUIRED_MUTATIONS_FOR_STRAND_BIAS_BAR_PLOT):
mutationtype_strand1_real_list.append(strand1_real_count/(strand1_real_count+strand2_real_count))
mutationtype_strand2_real_list.append(strand2_real_count/(strand1_real_count+strand2_real_count))
else:
mutationtype_strand1_real_list.append(np.nan)
mutationtype_strand2_real_list.append(np.nan)
if (strand1_sims_count >= NUMBER_OF_REQUIRED_MUTATIONS_FOR_STRAND_BIAS_BAR_PLOT) or (strand2_sims_count >= NUMBER_OF_REQUIRED_MUTATIONS_FOR_STRAND_BIAS_BAR_PLOT):
mutationtype_strand1_sims_mean_list.append(strand1_sims_count/(strand1_sims_count+strand2_sims_count))
mutationtype_strand2_sims_mean_list.append(strand2_sims_count/(strand1_sims_count+strand2_sims_count))
else:
mutationtype_strand1_sims_mean_list.append(np.nan)
mutationtype_strand2_sims_mean_list.append(np.nan)
plot_strand_bias_figure_with_stacked_bar_plots(strand_bias,
None,
numberofSimulations,
sbs_signature,
len(mutation_types),
mutation_types,
y_axis_label,
mutationtype_strand1_real_list,
mutationtype_strand2_real_list,
mutationtype_strand1_sims_mean_list,
mutationtype_strand2_sims_mean_list,
mutationtype_FDR_BH_adjusted_pvalues_list,
strands[0],
strands[1],
color1,
color2,
width,
axis_given=axis)
def plot_circle_bar_plots_together(outputDir,
jobname,
sbs_signature,
six_mutation_types,
signature2mutation_type2strand2percentagedict,
signature_genic_versus_intergenic_df,
signature_transcribed_versus_untranscribed_df,
signature_lagging_versus_leading_df,
genic_vs_intergenic_strands,
transcription_strands,
replication_strands):
x_ticklabels_list = percentage_strings * 6
fig = plt.figure(figsize=(5 + 1.5 * len(x_ticklabels_list), 30 + 1.5))
plt.rc('axes', edgecolor='lightgray')
width = 6
height = 6
width_ratios = [1] * width
height_ratios = [1] * height
gs = gridspec.GridSpec(height, width, height_ratios = height_ratios, width_ratios = width_ratios)
fig.subplots_adjust(hspace=0, wspace=3)
cirle_plot_axis = plt.subplot(gs[0:2, :])
genic_vs_intergenic_bar_plot_axis = plt.subplot(gs[2:4, 0:2])
transcribed_vs_untranscribed_bar_plot_axis = plt.subplot(gs[2:4, 2:4])
lagging_vs_leading_bar_plot_axis = plt.subplot(gs[2:4, 4:6])
genic_vs_intergenic_stacked_bar_plot_axis = plt.subplot(gs[4:, 0:2])
transcribed_vs_untranscribed_stacked_bar_plot_axis = plt.subplot(gs[4:, 2:4])
lagging_vs_leading_stacked_bar_plot_axis = plt.subplot(gs[4:, 4:6])
# Circle plot with legends
plot_circle_plot_in_given_axis(cirle_plot_axis,
percentage_strings,
sbs_signature,
six_mutation_types,
x_ticklabels_list,
signature2mutation_type2strand2percentagedict)
# 3 Bar plots side by side
plot_bar_plot_in_given_axis(genic_vs_intergenic_bar_plot_axis,
sbs_signature,
GENIC_VERSUS_INTERGENIC,
genic_vs_intergenic_strands,
signature_genic_versus_intergenic_df,
y_axis_label = 'Number of Single Base Substitutions')
plot_bar_plot_in_given_axis(transcribed_vs_untranscribed_bar_plot_axis,
sbs_signature,
TRANSCRIBED_VERSUS_UNTRANSCRIBED,
transcription_strands,
signature_transcribed_versus_untranscribed_df)
plot_bar_plot_in_given_axis(lagging_vs_leading_bar_plot_axis,
sbs_signature,
LAGGING_VERSUS_LEADING,
replication_strands,
signature_lagging_versus_leading_df)
# 3 Stacked Bar plots side by side
plot_stacked_bar_plot_in_given_axis(genic_vs_intergenic_stacked_bar_plot_axis,
sbs_signature,
GENIC_VERSUS_INTERGENIC,
genic_vs_intergenic_strands,
signature_genic_versus_intergenic_df,
y_axis_label = 'Ratio of mutations on each strand')
plot_stacked_bar_plot_in_given_axis(transcribed_vs_untranscribed_stacked_bar_plot_axis,
sbs_signature,
TRANSCRIBED_VERSUS_UNTRANSCRIBED,
transcription_strands,
signature_transcribed_versus_untranscribed_df)
plot_stacked_bar_plot_in_given_axis(lagging_vs_leading_stacked_bar_plot_axis,
sbs_signature,
LAGGING_VERSUS_LEADING,
replication_strands,
signature_lagging_versus_leading_df)
# filename = '%s_circle_bar_plot_together_%s.png' % (sbs_signature, str(significance_level).replace('.', '_'))
filename = '%s_circle_bar_plots.png' % (sbs_signature)
figurepath = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS, CIRCLE_BAR_PLOTS, filename)
fig.savefig(figurepath, dpi=100, bbox_inches="tight")
plt.cla()
plt.close(fig)
# Key can be signature or sample
def plotBarPlotsUsingDataframes(outputDir,
jobname,
numberofSimulations,
signature_cutoff_numberofmutations_averageprobability_df,
isKeySample,
existingMutationTypesList,
signature_strand1_versus_strand2_df,
width,
strand1_versus_strand2,
strands,
color1,
color2,
title,
figureName,
plot_mode):
# signature_strand1_versus_strand2_df column names here
# ['cancer_type', 'signature', 'mutation_type',
# 'Transcribed_real_count', 'UnTranscribed_real_count', 'Transcribed_mean_sims_count', 'UnTranscribed_mean_sims_count',
# 'transcribed_versus_untranscribed_p_value', 'transcribed_versus_untranscribed_q_value',
# 'Transcribed_real_count.1', 'Transcribed_mean_sims_count.1', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list',
# 'UnTranscribed_real_count.1', 'UnTranscribed_mean_sims_count.1', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list']
signatures = signature_strand1_versus_strand2_df['signature'].unique()
x_axis_labels = existingMutationTypesList
N = len(x_axis_labels)
for signature in signatures:
numberofMutations = int(signature_cutoff_numberofmutations_averageprobability_df[signature_cutoff_numberofmutations_averageprobability_df['signature'] == signature]['number_of_mutations'].values[0])
mutationtype_strand1_real_list=[]
mutationtype_strand2_real_list=[]
mutationtype_strand1_sims_mean_list=[]
mutationtype_strand2_sims_mean_list=[]
mutationtype_FDR_BH_adjusted_pvalues_list=[]
for mutation_type in existingMutationTypesList:
if (strand1_versus_strand2==TRANSCRIBED_VERSUS_UNTRANSCRIBED):
strand1_real_count_column_name=TRANSCRIBED_REAL_COUNT
strand1_sims_mean_count_Column_name=TRANSCRIBED_SIMULATIONS_MEAN_COUNT
strand2_real_count_column_name=UNTRANSCRIBED_REAL_COUNT
strand2_sims_mean_count_Column_name=UNTRANSCRIBED_SIMULATIONS_MEAN_COUNT
q_value_column_name = TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE
elif (strand1_versus_strand2 == GENIC_VERSUS_INTERGENIC):
strand1_real_count_column_name=GENIC_REAL_COUNT
strand1_sims_mean_count_Column_name=GENIC_SIMULATIONS_MEAN_COUNT
strand2_real_count_column_name=INTERGENIC_REAL_COUNT
strand2_sims_mean_count_Column_name=INTERGENIC_SIMULATIONS_MEAN_COUNT
q_value_column_name = GENIC_VERSUS_INTERGENIC_Q_VALUE
elif (strand1_versus_strand2 == LAGGING_VERSUS_LEADING):
strand1_real_count_column_name=LAGGING_REAL_COUNT
strand1_sims_mean_count_Column_name=LAGGING_SIMULATIONS_MEAN_COUNT
strand2_real_count_column_name=LEADING_REAL_COUNT
strand2_sims_mean_count_Column_name=LEADING_SIMULATIONS_MEAN_COUNT
q_value_column_name = LAGGING_VERSUS_LEADING_Q_VALUE
strand1_real_count = 0
strand1_sims_mean_count = 0
strand2_real_count = 0
strand2_sims_mean_count = 0
q_value = None
if (signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature']==signature) & (signature_strand1_versus_strand2_df['mutation_type']==mutation_type)][strand1_real_count_column_name].values.size>0):
strand1_real_count=signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature']==signature) & (signature_strand1_versus_strand2_df['mutation_type']==mutation_type)][strand1_real_count_column_name].values[0]
if (signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][strand1_sims_mean_count_Column_name].values.size>0):
strand1_sims_mean_count = signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][strand1_sims_mean_count_Column_name].values[0]
if (signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature']==signature) & (signature_strand1_versus_strand2_df['mutation_type']==mutation_type)][strand2_real_count_column_name].values.size>0):
strand2_real_count=signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature']==signature) & (signature_strand1_versus_strand2_df['mutation_type']==mutation_type)][strand2_real_count_column_name].values[0]
if (signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][strand2_sims_mean_count_Column_name].values.size>0):
strand2_sims_mean_count = signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][strand2_sims_mean_count_Column_name].values[0]
if (signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][q_value_column_name].values.size>0):
q_value = signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][q_value_column_name].values[0]
mutationtype_strand1_real_list.append(strand1_real_count)
mutationtype_strand1_sims_mean_list.append(strand1_sims_mean_count)
mutationtype_strand2_real_list.append(strand2_real_count)
mutationtype_strand2_sims_mean_list.append(strand2_sims_mean_count)
mutationtype_FDR_BH_adjusted_pvalues_list.append(q_value)
plotStrandBiasFigureWithBarPlots(outputDir,
jobname,
numberofSimulations,
signature,
isKeySample,
numberofMutations,
N,
x_axis_labels,
mutationtype_strand1_real_list,
mutationtype_strand2_real_list,
mutationtype_strand1_sims_mean_list,
mutationtype_strand2_sims_mean_list,
mutationtype_FDR_BH_adjusted_pvalues_list,
strands[0],
strands[1],
title,
color1,
color2,
figureName,
width,
plot_mode)
###################################################################
# April 20, 2020
# July 4, 2020 starts
# Using dataframes
def transcriptionReplicationStrandBiasFiguresUsingDataframes(outputDir, jobname, numberofSimulations, mutation_types_contexts, strand_bias_list, is_discreet, plot_mode):
# Initialize these dataframes as empty dataframe
# We will read these dataframes if there is the corresponding data
subsSignature_cutoff_numberofmutations_averageprobability_df = pd.DataFrame()
dinucsSignature_cutoff_numberofmutations_averageprobability_df = pd.DataFrame()
indelsSignature_cutoff_numberofmutations_averageprobability_df = pd.DataFrame()
sbs_df = pd.DataFrame()
dbs_df = pd.DataFrame()
id_df = pd.DataFrame()
subsSignatures = np.array([])
dinucsSignatures = np.array([])
indelsSignatures = np.array([])
os.makedirs(os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,SCATTER_PLOTS), exist_ok=True)
os.makedirs(os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,BAR_PLOTS), exist_ok=True)
os.makedirs(os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,CIRCLE_PLOTS), exist_ok=True)
os.makedirs(os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,CIRCLE_BAR_PLOTS), exist_ok=True)
os.makedirs(os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,TABLES), exist_ok=True)
os.makedirs(os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,EXCEL_FILES), exist_ok=True)
strandbias_figures_outputDir = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS)
strandbias_figures_tables_outputDir = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS, TABLES)
strandbias_figures_excel_files_outputDir = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS, EXCEL_FILES)
##########################################################################################
######################### Read dictionaries related with ################################
######################### signatures and samples starts ################################
##########################################################################################
for mutation_type_context in mutation_types_contexts:
if (mutation_type_context in SBS_CONTEXTS):
subsSignature_cutoff_numberofmutations_averageprobability_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_SBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename),sep='\t', header=0,dtype={'cutoff': np.float32,'signature': str,'number_of_mutations': np.int32,'average_probability': np.float32})
subsSignatures = subsSignature_cutoff_numberofmutations_averageprobability_df['signature'].unique()
if (DBS in mutation_types_contexts):
dinucsSignature_cutoff_numberofmutations_averageprobability_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_DBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename),sep='\t', header=0,dtype={'cutoff': np.float32,'signature': str,'number_of_mutations': np.int32,'average_probability': np.float32})
dinucsSignatures = dinucsSignature_cutoff_numberofmutations_averageprobability_df['signature'].unique()
if (ID in mutation_types_contexts):
indelsSignature_cutoff_numberofmutations_averageprobability_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_ID_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename),sep='\t', header=0,dtype={'cutoff': np.float32,'signature': str,'number_of_mutations': np.int32,'average_probability': np.float32})
indelsSignatures = indelsSignature_cutoff_numberofmutations_averageprobability_df['signature'].unique()
##########################################################################################
######################### Read dictionaries related with ################################
######################### signatures and samples ends ##################################
##########################################################################################
if is_discreet:
sbs_df = subsSignature_cutoff_numberofmutations_averageprobability_df
dbs_df = dinucsSignature_cutoff_numberofmutations_averageprobability_df
id_df = indelsSignature_cutoff_numberofmutations_averageprobability_df
else:
if os.path.exists(os.path.join(outputDir, jobname, DATA, Table_SBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename)):
sbs_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_SBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename), sep='\t', header=0, dtype={'signature': str,'number_of_mutations': np.int32,'average_probability': np.float32})
subsSignatures = sbs_df['signature'].unique()
if os.path.exists(os.path.join(outputDir, jobname, DATA, Table_DBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename)):
dbs_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_DBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename), sep='\t', header=0, dtype={'signature': str,'number_of_mutations': np.int32,'average_probability': np.float32})
dinucsSignatures = dbs_df['signature'].unique()
if os.path.exists(os.path.join(outputDir, jobname, DATA, Table_ID_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename)):
id_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_ID_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename), sep='\t', header=0, dtype={'signature': str,'number_of_mutations': np.int32,'average_probability': np.float32})
indelsSignatures = id_df['signature'].unique()
#######################################################################
# Step1 Read p_value
if LAGGING_VERSUS_LEADING in strand_bias_list:
# Replication Strand Bias
signature_mutation_type_lagging_versus_leading_table_file_name = 'Signature_Mutation_Type_%s_Strand_Table.txt' % (LAGGING_VERSUS_LEADING)
signature_mutation_type_lagging_versus_leading_table_filepath = os.path.join(outputDir, jobname, DATA, REPLICATIONSTRANDBIAS,signature_mutation_type_lagging_versus_leading_table_file_name)
signature_lagging_versus_leading_df = pd.read_csv(signature_mutation_type_lagging_versus_leading_table_filepath, header=0, sep='\t')
type_lagging_versus_leading_table_file_name = 'Type_%s_Strand_Table.txt' % (LAGGING_VERSUS_LEADING)
type_lagging_versus_leading_table_filepath = os.path.join(outputDir, jobname, DATA, REPLICATIONSTRANDBIAS,type_lagging_versus_leading_table_file_name)
type_lagging_versus_leading_df = pd.read_csv(type_lagging_versus_leading_table_filepath, header=0, sep='\t')
if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list:
# Transcription Strand Bias
signature_mutation_type_transcribed_versus_untranscribed_table_file_name = 'Signature_Mutation_Type_%s_Strand_Table.txt' % (TRANSCRIBED_VERSUS_UNTRANSCRIBED)
signature_mutation_type_transcribed_versus_untranscribed_table_filepath = os.path.join(outputDir, jobname, DATA, TRANSCRIPTIONSTRANDBIAS, signature_mutation_type_transcribed_versus_untranscribed_table_file_name)
signature_transcribed_versus_untranscribed_df = pd.read_csv(signature_mutation_type_transcribed_versus_untranscribed_table_filepath, header=0, sep='\t')
type_transcribed_versus_untranscribed_table_file_name = 'Type_%s_Strand_Table.txt' % (TRANSCRIBED_VERSUS_UNTRANSCRIBED)
type_transcribed_versus_untranscribed_table_filepath = os.path.join(outputDir, jobname, DATA, TRANSCRIPTIONSTRANDBIAS, type_transcribed_versus_untranscribed_table_file_name)
type_transcribed_versus_untranscribed_df = pd.read_csv(type_transcribed_versus_untranscribed_table_filepath, header=0, sep='\t')
if GENIC_VERSUS_INTERGENIC in strand_bias_list:
# Transcription Strand Bias
signature_mutation_type_genic_versus_intergenic_table_file_name = 'Signature_Mutation_Type_%s_Strand_Table.txt' % (GENIC_VERSUS_INTERGENIC)
signature_mutation_type_genic_versus_intergenic_table_filepath = os.path.join(outputDir, jobname, DATA, TRANSCRIPTIONSTRANDBIAS, signature_mutation_type_genic_versus_intergenic_table_file_name)
signature_genic_versus_intergenic_df = pd.read_csv(signature_mutation_type_genic_versus_intergenic_table_filepath, header=0, sep='\t')
type_genic_versus_intergenic_table_file_name = 'Type_%s_Strand_Table.txt' % (GENIC_VERSUS_INTERGENIC)
type_genic_versus_intergenic_table_filepath = os.path.join(outputDir, jobname, DATA, TRANSCRIPTIONSTRANDBIAS, type_genic_versus_intergenic_table_file_name)
type_genic_versus_intergenic_df = pd.read_csv(type_genic_versus_intergenic_table_filepath, header=0, sep='\t')
#######################################################################
#######################################################################
# Step2 Compute q_value
p_values_list=[]
element_names=[]
# Fill p_values_list
if LAGGING_VERSUS_LEADING in strand_bias_list:
for index, row in signature_lagging_versus_leading_df.iterrows():
element_name = (row[CANCER_TYPE], row[SIGNATURE], row[MUTATION_TYPE], LAGGING_VERSUS_LEADING)
element_names.append(element_name)
p_values_list.append(row[LAGGING_VERSUS_LEADING_P_VALUE])
for index, row in type_lagging_versus_leading_df.iterrows():
element_name=(row[CANCER_TYPE], None, row[TYPE], LAGGING_VERSUS_LEADING)
element_names.append(element_name)
p_values_list.append(row[LAGGING_VERSUS_LEADING_P_VALUE])
if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list:
for index, row in signature_transcribed_versus_untranscribed_df.iterrows():
element_name=(row[CANCER_TYPE], row[SIGNATURE], row[MUTATION_TYPE], TRANSCRIBED_VERSUS_UNTRANSCRIBED)
element_names.append(element_name)
p_values_list.append(row[TRANSCRIBED_VERSUS_UNTRANSCRIBED_P_VALUE])
for index, row in type_transcribed_versus_untranscribed_df.iterrows():
element_name=(row[CANCER_TYPE], None, row[TYPE], TRANSCRIBED_VERSUS_UNTRANSCRIBED)
element_names.append(element_name)
p_values_list.append(row[TRANSCRIBED_VERSUS_UNTRANSCRIBED_P_VALUE])
if GENIC_VERSUS_INTERGENIC in strand_bias_list:
for index, row in signature_genic_versus_intergenic_df.iterrows():
element_name = (row[CANCER_TYPE], row[SIGNATURE], row[MUTATION_TYPE], GENIC_VERSUS_INTERGENIC)
element_names.append(element_name)
p_values_list.append(row[GENIC_VERSUS_INTERGENIC_P_VALUE])
for index, row in type_genic_versus_intergenic_df.iterrows():
element_name=(row[CANCER_TYPE], None, row[TYPE], GENIC_VERSUS_INTERGENIC)
element_names.append(element_name)
p_values_list.append(row[GENIC_VERSUS_INTERGENIC_P_VALUE])
# print('len(p_values_list): %d' %(len(p_values_list)))
#######################################################################
#######################################################################
if ((p_values_list is not None) and p_values_list):
rejected, all_FDR_BH_adjusted_p_values, alphacSidak, alphacBonf = statsmodels.stats.multitest.multipletests(p_values_list, alpha=0.05, method='fdr_bh', is_sorted=False, returnsorted=False)
# Add None q_values
if LAGGING_VERSUS_LEADING in strand_bias_list:
signature_lagging_versus_leading_df[LAGGING_VERSUS_LEADING_Q_VALUE] = np.nan
type_lagging_versus_leading_df[LAGGING_VERSUS_LEADING_Q_VALUE] = np.nan
if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list:
signature_transcribed_versus_untranscribed_df[TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE] = np.nan
type_transcribed_versus_untranscribed_df[TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE]= np.nan
if GENIC_VERSUS_INTERGENIC in strand_bias_list:
signature_genic_versus_intergenic_df[GENIC_VERSUS_INTERGENIC_Q_VALUE]= np.nan
type_genic_versus_intergenic_df[GENIC_VERSUS_INTERGENIC_Q_VALUE]= np.nan
# Update q_value
for element_index, element_name in enumerate(element_names,0):
(cancer_type, signature, mutation_type, versus_type)=element_name
q_value=all_FDR_BH_adjusted_p_values[element_index]
if (signature is not None) and (versus_type == TRANSCRIBED_VERSUS_UNTRANSCRIBED):
signature_transcribed_versus_untranscribed_df.loc[(signature_transcribed_versus_untranscribed_df[CANCER_TYPE]==cancer_type) &
(signature_transcribed_versus_untranscribed_df[SIGNATURE]==signature) &
(signature_transcribed_versus_untranscribed_df[MUTATION_TYPE]==mutation_type),TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE]=q_value
elif (signature is not None) and (versus_type == GENIC_VERSUS_INTERGENIC):
signature_genic_versus_intergenic_df.loc[(signature_genic_versus_intergenic_df[CANCER_TYPE]==cancer_type) &
(signature_genic_versus_intergenic_df[SIGNATURE]==signature) &
(signature_genic_versus_intergenic_df[MUTATION_TYPE]==mutation_type),GENIC_VERSUS_INTERGENIC_Q_VALUE]=q_value
elif (signature is not None) and (versus_type==LAGGING_VERSUS_LEADING):
signature_lagging_versus_leading_df.loc[(signature_lagging_versus_leading_df[CANCER_TYPE]==cancer_type) &
(signature_lagging_versus_leading_df[SIGNATURE]==signature) &
(signature_lagging_versus_leading_df[MUTATION_TYPE]==mutation_type),LAGGING_VERSUS_LEADING_Q_VALUE]=q_value
elif (signature is None) and (versus_type == TRANSCRIBED_VERSUS_UNTRANSCRIBED):
type_transcribed_versus_untranscribed_df.loc[(type_transcribed_versus_untranscribed_df[CANCER_TYPE] == cancer_type) & (type_transcribed_versus_untranscribed_df[TYPE] == mutation_type),TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE] = q_value
elif (signature is None) and (versus_type == GENIC_VERSUS_INTERGENIC):
type_genic_versus_intergenic_df.loc[(type_genic_versus_intergenic_df[CANCER_TYPE] == cancer_type) & (type_genic_versus_intergenic_df[TYPE] == mutation_type),GENIC_VERSUS_INTERGENIC_Q_VALUE] = q_value
elif (signature is None) and (versus_type == LAGGING_VERSUS_LEADING):
type_lagging_versus_leading_df.loc[(type_lagging_versus_leading_df[CANCER_TYPE] == cancer_type) & (type_lagging_versus_leading_df[TYPE] == mutation_type),LAGGING_VERSUS_LEADING_Q_VALUE] = q_value
# Reorder columns
# Write dataframes
if LAGGING_VERSUS_LEADING in strand_bias_list:
signature_lagging_versus_leading_df = signature_lagging_versus_leading_df[
['cancer_type', 'signature', 'mutation_type',
'Lagging_real_count', 'Leading_real_count', 'Lagging_mean_sims_count', 'Leading_mean_sims_count',
'lagging_versus_leading_p_value', 'lagging_versus_leading_q_value',
'Lagging_real_count.1', 'Lagging_mean_sims_count.1', 'Lagging_min_sims_count',
'Lagging_max_sims_count', 'Lagging_sims_count_list',
'Leading_real_count.1', 'Leading_mean_sims_count.1', 'Leading_min_sims_count',
'Leading_max_sims_count', 'Leading_sims_count_list']]
type_lagging_versus_leading_df=type_lagging_versus_leading_df[['cancer_type', 'type',
'Lagging_real_count', 'Leading_real_count', 'Lagging_mean_sims_count', 'Leading_mean_sims_count', 'lagging_versus_leading_p_value', 'lagging_versus_leading_q_value',
'Lagging_real_count.1', 'Lagging_mean_sims_count.1', 'Lagging_min_sims_count', 'Lagging_max_sims_count', 'Lagging_sims_count_list',
'Leading_real_count.1', 'Leading_mean_sims_count.1', 'Leading_min_sims_count', 'Leading_max_sims_count', 'Leading_sims_count_list' ]]
signature_filename = 'Signature_Mutation_Type_%s_Q_Value_Table.txt' % (LAGGING_VERSUS_LEADING)
signature_filepath = os.path.join(strandbias_figures_tables_outputDir, signature_filename)
signature_lagging_versus_leading_df.to_csv(signature_filepath, sep='\t', header=True, index=False)
type_filename = 'Type_%s_Q_Value_Table.txt' % (LAGGING_VERSUS_LEADING)
type_filepath = os.path.join(strandbias_figures_tables_outputDir, type_filename)
type_lagging_versus_leading_df.to_csv(type_filepath, sep='\t', header=True, index=False)
if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list:
signature_transcribed_versus_untranscribed_df=signature_transcribed_versus_untranscribed_df[['cancer_type', 'signature', 'mutation_type',
'Transcribed_real_count', 'UnTranscribed_real_count', 'NonTranscribed_real_count',
'Transcribed_mean_sims_count', 'UnTranscribed_mean_sims_count', 'NonTranscribed_mean_sims_count',
'transcribed_versus_untranscribed_p_value', 'transcribed_versus_untranscribed_q_value',
'Transcribed_real_count.1', 'Transcribed_mean_sims_count.1', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list',
'UnTranscribed_real_count.1', 'UnTranscribed_mean_sims_count.1', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list',
'NonTranscribed_real_count.1', 'NonTranscribed_mean_sims_count.1', 'NonTranscribed_min_sims_count', 'NonTranscribed_max_sims_count', 'NonTranscribed_sims_count_list']]
type_transcribed_versus_untranscribed_df=type_transcribed_versus_untranscribed_df[['cancer_type', 'type',
'Transcribed_real_count', 'UnTranscribed_real_count', 'NonTranscribed_real_count',
'Transcribed_mean_sims_count', 'UnTranscribed_mean_sims_count', 'NonTranscribed_mean_sims_count',
'transcribed_versus_untranscribed_p_value', 'transcribed_versus_untranscribed_q_value',
'Transcribed_real_count.1', 'Transcribed_mean_sims_count.1', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list',
'UnTranscribed_real_count.1', 'UnTranscribed_mean_sims_count.1', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list',
'NonTranscribed_real_count.1', 'NonTranscribed_mean_sims_count.1', 'NonTranscribed_min_sims_count', 'NonTranscribed_max_sims_count', 'NonTranscribed_sims_count_list']]
signature_filename = 'Signature_Mutation_Type_%s_Q_Value_Table.txt' % (TRANSCRIBED_VERSUS_UNTRANSCRIBED)
signature_filepath = os.path.join(strandbias_figures_tables_outputDir, signature_filename)
signature_transcribed_versus_untranscribed_df.to_csv(signature_filepath, sep='\t', header=True, index=False)
type_filename = 'Type_%s_Q_Value_Table.txt' % (TRANSCRIBED_VERSUS_UNTRANSCRIBED)
type_filepath = os.path.join(strandbias_figures_tables_outputDir, type_filename)
type_transcribed_versus_untranscribed_df.to_csv(type_filepath, sep='\t', header=True, index=False)
if GENIC_VERSUS_INTERGENIC in strand_bias_list:
signature_genic_versus_intergenic_df=signature_genic_versus_intergenic_df[['cancer_type', 'signature', 'mutation_type',
'genic_real_count', 'intergenic_real_count', 'genic_mean_sims_count', 'intergenic_mean_sims_count', 'genic_versus_intergenic_p_value', 'genic_versus_intergenic_q_value',
'Transcribed_real_count', 'Transcribed_mean_sims_count', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list',
'UnTranscribed_real_count', 'UnTranscribed_mean_sims_count', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list',
'NonTranscribed_real_count', 'NonTranscribed_mean_sims_count', 'NonTranscribed_min_sims_count', 'NonTranscribed_max_sims_count', 'NonTranscribed_sims_count_list' ]]
type_genic_versus_intergenic_df=type_genic_versus_intergenic_df[['cancer_type', 'type',
'genic_real_count', 'intergenic_real_count', 'genic_mean_sims_count', 'intergenic_mean_sims_count', 'genic_versus_intergenic_p_value', 'genic_versus_intergenic_q_value',
'Transcribed_real_count', 'Transcribed_mean_sims_count', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list',
'UnTranscribed_real_count', 'UnTranscribed_mean_sims_count', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list',
'NonTranscribed_real_count', 'NonTranscribed_mean_sims_count', 'NonTranscribed_min_sims_count', 'NonTranscribed_max_sims_count', 'NonTranscribed_sims_count_list' ]]
signature_filename = 'Signature_Mutation_Type_%s_Q_Value_Table.txt' % (GENIC_VERSUS_INTERGENIC)
signature_filepath = os.path.join(strandbias_figures_tables_outputDir, signature_filename)
signature_genic_versus_intergenic_df.to_csv(signature_filepath, sep='\t', header=True, index=False)
type_filename = 'Type_%s_Q_Value_Table.txt' % (GENIC_VERSUS_INTERGENIC)
type_filepath = os.path.join(strandbias_figures_tables_outputDir, type_filename)
type_genic_versus_intergenic_df.to_csv(type_filepath, sep='\t', header=True, index=False)
#######################################################################
#######################################################################
# Step3 Filter q-values, Decide significant strand and set 10,20,30,50,75, 100 percent
# Add Significant Strand
# Set significant strands
# Set percentages
# Write Filtered Q Values dataframes with percentages
##################################################################################################################################
if LAGGING_VERSUS_LEADING in strand_bias_list:
signature_lagging_versus_leading_filtered_q_value_df = signature_lagging_versus_leading_df[signature_lagging_versus_leading_df[LAGGING_VERSUS_LEADING_Q_VALUE] <= SIGNIFICANCE_LEVEL].copy()
type_lagging_versus_leading_filtered_q_value_df= type_lagging_versus_leading_df[type_lagging_versus_leading_df[LAGGING_VERSUS_LEADING_Q_VALUE] <= SIGNIFICANCE_LEVEL].copy()
signature_lagging_versus_leading_filtered_q_value_df[SIGNIFICANT_STRAND] = None
type_lagging_versus_leading_filtered_q_value_df[SIGNIFICANT_STRAND] = None
for percentage_string in percentage_strings:
signature_lagging_versus_leading_filtered_q_value_df[percentage_string] = None
type_lagging_versus_leading_filtered_q_value_df[percentage_string] = None
signature_lagging_versus_leading_filtered_q_value_df.loc[(signature_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT] > signature_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT]), SIGNIFICANT_STRAND] = LAGGING
signature_lagging_versus_leading_filtered_q_value_df.loc[(signature_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT] > signature_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT]), SIGNIFICANT_STRAND] = LEADING
type_lagging_versus_leading_filtered_q_value_df.loc[(type_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT] > type_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT]), SIGNIFICANT_STRAND]=LAGGING
type_lagging_versus_leading_filtered_q_value_df.loc[(type_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT] > type_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT]),SIGNIFICANT_STRAND]=LEADING
for percentage_index, percentage_number in enumerate(percentage_numbers, 0):
percentage_string = percentage_strings[percentage_index]
# Set percentages for signature mutation_type
signature_lagging_versus_leading_filtered_q_value_df.loc[((signature_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT] - signature_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT]) >= (signature_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT] * percentage_number / 100)), percentage_string] = 1
signature_lagging_versus_leading_filtered_q_value_df.loc[((signature_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT] - signature_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT]) >= (signature_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT] * percentage_number / 100)), percentage_string] = 1
# Set percentages for type
type_lagging_versus_leading_filtered_q_value_df.loc[((type_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT] - type_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT]) >= (type_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT] * percentage_number / 100)), percentage_string] = 1
type_lagging_versus_leading_filtered_q_value_df.loc[((type_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT] - type_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT]) >= (type_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT] * percentage_number / 100)), percentage_string] = 1
signature_filename = 'Signature_Mutation_Type_%s_Filtered_Q_Value_Percentages_Table.txt' % (LAGGING_VERSUS_LEADING)
signature_filepath = os.path.join(strandbias_figures_tables_outputDir, signature_filename)
signature_lagging_versus_leading_filtered_q_value_df.to_csv(signature_filepath, sep='\t', header=True,index=False)
type_filename = 'Type_%s_Filtered_Q_Value_Percentages_Table.txt' % (LAGGING_VERSUS_LEADING)
type_filepath = os.path.join(strandbias_figures_tables_outputDir, type_filename)
type_lagging_versus_leading_filtered_q_value_df.to_csv(type_filepath, sep='\t', header=True, index=False)
##################################################################################################################################
##################################################################################################################################
if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list:
signature_transcribed_versus_untranscribed_filtered_q_value_df = signature_transcribed_versus_untranscribed_df[signature_transcribed_versus_untranscribed_df[TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE] <= SIGNIFICANCE_LEVEL].copy()
type_transcribed_versus_untranscribed_filtered_q_value_df= type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df[TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE]<= SIGNIFICANCE_LEVEL].copy()
signature_transcribed_versus_untranscribed_filtered_q_value_df[SIGNIFICANT_STRAND] = None
type_transcribed_versus_untranscribed_filtered_q_value_df[SIGNIFICANT_STRAND]=None
for percentage_string in percentage_strings:
signature_transcribed_versus_untranscribed_filtered_q_value_df[percentage_string]=None
type_transcribed_versus_untranscribed_filtered_q_value_df[percentage_string] = None
signature_transcribed_versus_untranscribed_filtered_q_value_df.loc[(signature_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT] > signature_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]), SIGNIFICANT_STRAND] = TRANSCRIBED_STRAND
signature_transcribed_versus_untranscribed_filtered_q_value_df.loc[(signature_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT] > signature_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]), SIGNIFICANT_STRAND] = UNTRANSCRIBED_STRAND
type_transcribed_versus_untranscribed_filtered_q_value_df.loc[(type_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT] > type_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]), SIGNIFICANT_STRAND] = TRANSCRIBED_STRAND
type_transcribed_versus_untranscribed_filtered_q_value_df.loc[(type_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT] > type_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]), SIGNIFICANT_STRAND] = UNTRANSCRIBED_STRAND
for percentage_index, percentage_number in enumerate(percentage_numbers,0):
percentage_string=percentage_strings[percentage_index]
# Set percentages for signature mutation_type
signature_transcribed_versus_untranscribed_filtered_q_value_df.loc[((signature_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]-signature_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]) >= (signature_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]*percentage_number/100)), percentage_string] = 1
signature_transcribed_versus_untranscribed_filtered_q_value_df.loc[((signature_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]-signature_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]) >= (signature_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]*percentage_number/100)), percentage_string] = 1
# Set percentages for type
type_transcribed_versus_untranscribed_filtered_q_value_df.loc[((type_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]-type_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]) >= (type_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]*percentage_number/100)), percentage_string] = 1
type_transcribed_versus_untranscribed_filtered_q_value_df.loc[((type_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]-type_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]) >= (type_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]*percentage_number/100)), percentage_string] = 1
signature_filename = 'Signature_Mutation_Type_%s_Filtered_Q_Value_Percentages_Table.txt' % (TRANSCRIBED_VERSUS_UNTRANSCRIBED)
signature_filepath = os.path.join(strandbias_figures_tables_outputDir, signature_filename)
signature_transcribed_versus_untranscribed_filtered_q_value_df.to_csv(signature_filepath, sep='\t', header=True, index=False)
type_filename = 'Type_%s_Filtered_Q_Value_Percentages_Table.txt' % (TRANSCRIBED_VERSUS_UNTRANSCRIBED)
type_filepath = os.path.join(strandbias_figures_tables_outputDir, type_filename)
type_transcribed_versus_untranscribed_filtered_q_value_df.to_csv(type_filepath, sep='\t', header=True,index=False)
##################################################################################################################################
##################################################################################################################################
if GENIC_VERSUS_INTERGENIC in strand_bias_list:
signature_genic_versus_intergenic_filtered_q_value_df = signature_genic_versus_intergenic_df[signature_genic_versus_intergenic_df[GENIC_VERSUS_INTERGENIC_Q_VALUE] <= SIGNIFICANCE_LEVEL].copy()
type_genic_versus_intergenic_filtered_q_value_df= type_genic_versus_intergenic_df[type_genic_versus_intergenic_df[GENIC_VERSUS_INTERGENIC_Q_VALUE]<= SIGNIFICANCE_LEVEL].copy()
signature_genic_versus_intergenic_filtered_q_value_df[SIGNIFICANT_STRAND] = None
type_genic_versus_intergenic_filtered_q_value_df[SIGNIFICANT_STRAND] = None
for percentage_string in percentage_strings:
signature_genic_versus_intergenic_filtered_q_value_df[percentage_string] = None
type_genic_versus_intergenic_filtered_q_value_df[percentage_string] = None
signature_genic_versus_intergenic_filtered_q_value_df.loc[(signature_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT] > signature_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]), SIGNIFICANT_STRAND] = GENIC
signature_genic_versus_intergenic_filtered_q_value_df.loc[(signature_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT] > signature_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]),SIGNIFICANT_STRAND] = INTERGENIC
type_genic_versus_intergenic_filtered_q_value_df.loc[(type_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT] > type_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]), SIGNIFICANT_STRAND] = GENIC
type_genic_versus_intergenic_filtered_q_value_df.loc[(type_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT] > type_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]), SIGNIFICANT_STRAND] = INTERGENIC
# Set percentages
for percentage_index, percentage_number in enumerate(percentage_numbers,0):
percentage_string=percentage_strings[percentage_index]
# Set percentages for signature mutation_type
signature_genic_versus_intergenic_filtered_q_value_df.loc[((signature_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]-signature_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]) >= (signature_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]*percentage_number/100)), percentage_string] = 1
signature_genic_versus_intergenic_filtered_q_value_df.loc[((signature_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]-signature_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]) >= (signature_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]*percentage_number/100)), percentage_string] = 1
# Set percentages for type
type_genic_versus_intergenic_filtered_q_value_df.loc[((type_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]-type_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]) >= (type_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]*percentage_number/100)), percentage_string] = 1
type_genic_versus_intergenic_filtered_q_value_df.loc[((type_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]-type_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]) >= (type_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]*percentage_number/100)), percentage_string] = 1
signature_filename = 'Signature_Mutation_Type_%s_Filtered_Q_Value_Percentages_Table.txt' % (GENIC_VERSUS_INTERGENIC)
signature_filepath = os.path.join(strandbias_figures_tables_outputDir, signature_filename)
signature_genic_versus_intergenic_filtered_q_value_df.to_csv(signature_filepath, sep='\t', header=True,index=False)
type_filename = 'Type_%s_Filtered_Q_Value_Percentages_Table.txt' % (GENIC_VERSUS_INTERGENIC)
type_filepath = os.path.join(strandbias_figures_tables_outputDir, type_filename)
type_genic_versus_intergenic_filtered_q_value_df.to_csv(type_filepath, sep='\t', header=True, index=False)
##################################################################################################################################
#######################################################################
# Write Excel Files
sheet_list = ['corrected_p_value', 'percentages']
for strand1_versus_strand2 in strand_bias_list:
if strand1_versus_strand2==LAGGING_VERSUS_LEADING:
signatures_df_list=[signature_lagging_versus_leading_df,signature_lagging_versus_leading_filtered_q_value_df]
types_df_list = [type_lagging_versus_leading_df, type_lagging_versus_leading_filtered_q_value_df]
elif strand1_versus_strand2==TRANSCRIBED_VERSUS_UNTRANSCRIBED:
signatures_df_list = [signature_transcribed_versus_untranscribed_df,signature_transcribed_versus_untranscribed_filtered_q_value_df]
types_df_list = [type_transcribed_versus_untranscribed_df, type_transcribed_versus_untranscribed_filtered_q_value_df]
elif strand1_versus_strand2==GENIC_VERSUS_INTERGENIC:
signatures_df_list = [signature_genic_versus_intergenic_df,signature_genic_versus_intergenic_filtered_q_value_df]
types_df_list = [type_genic_versus_intergenic_df, type_genic_versus_intergenic_filtered_q_value_df]
signatures_filename="Signatures_Mutation_Types_%s.xlsx" %(strand1_versus_strand2)
file_name_with_path=os.path.join(strandbias_figures_excel_files_outputDir, signatures_filename)
write_excel_file(signatures_df_list, sheet_list, file_name_with_path)
types_filename="Types_%s.xlsx" %(strand1_versus_strand2)
file_name_with_path=os.path.join(strandbias_figures_excel_files_outputDir, types_filename)
write_excel_file(types_df_list, sheet_list, file_name_with_path)
#######################################################################
#######################################################################
#Circle plots starts
#######################################################################
#Step4 Fill this dictionary
signature2mutation_type2strand2percentagedict={}
df_list=[]
if LAGGING_VERSUS_LEADING in strand_bias_list:
df_list.append(signature_lagging_versus_leading_filtered_q_value_df)
if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list:
df_list.append(signature_transcribed_versus_untranscribed_filtered_q_value_df)
if GENIC_VERSUS_INTERGENIC in strand_bias_list:
df_list.append(signature_genic_versus_intergenic_filtered_q_value_df)
for df in df_list:
for index, row in df.iterrows():
cancer_type = row[CANCER_TYPE]
signature = row[SIGNATURE]
mutation_type = row[MUTATION_TYPE]
significant_strand=row[SIGNIFICANT_STRAND]
percent_10 = row[AT_LEAST_10_PERCENT_DIFF]
percent_20 = row[AT_LEAST_20_PERCENT_DIFF]
percent_30 = row[AT_LEAST_30_PERCENT_DIFF]
percent_50 = row[AT_LEAST_50_PERCENT_DIFF]
percent_75 = row[AT_LEAST_75_PERCENT_DIFF]
percent_100 = row[AT_LEAST_100_PERCENT_DIFF]
if signature in signature2mutation_type2strand2percentagedict:
if mutation_type in signature2mutation_type2strand2percentagedict[signature]:
if significant_strand in signature2mutation_type2strand2percentagedict[signature][mutation_type]:
if (percent_10 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF]=1
if (percent_20 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF]=1
if (percent_30 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF]=1
if (percent_50 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF]=1
if (percent_75 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF]=1
if (percent_100 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF]=1
else:
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand]={}
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 0
if (percent_10 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 1
if (percent_20 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 1
if (percent_30 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 1
if (percent_50 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 1
if (percent_75 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 1
if (percent_100 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 1
else:
signature2mutation_type2strand2percentagedict[signature][mutation_type] = {}
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand] = {}
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 0
if (percent_10 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 1
if (percent_20 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 1
if (percent_30 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 1
if (percent_50 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 1
if (percent_75 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 1
if (percent_100 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 1
else:
signature2mutation_type2strand2percentagedict[signature] = {}
signature2mutation_type2strand2percentagedict[signature][mutation_type] = {}
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand] = {}
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 0
if (percent_10 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 1
if (percent_20 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 1
if (percent_30 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 1
if (percent_50 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 1
if (percent_75 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 1
if (percent_100 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 1
#######################################################################
#######################################################################
# Step4 Fill this dictionary
type2strand2percentagedict={}
df_list=[]
if LAGGING_VERSUS_LEADING in strand_bias_list:
df_list.append(type_lagging_versus_leading_filtered_q_value_df)
if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list:
df_list.append(type_transcribed_versus_untranscribed_filtered_q_value_df)
if GENIC_VERSUS_INTERGENIC in strand_bias_list:
df_list.append(type_genic_versus_intergenic_filtered_q_value_df)
for df in df_list:
for index, row in df.iterrows():
cancer_type = row[CANCER_TYPE]
my_type = row[TYPE]
significant_strand=row[SIGNIFICANT_STRAND]
percent_10 = row[AT_LEAST_10_PERCENT_DIFF]
percent_20 = row[AT_LEAST_20_PERCENT_DIFF]
percent_30 = row[AT_LEAST_30_PERCENT_DIFF]
percent_50 = row[AT_LEAST_50_PERCENT_DIFF]
percent_75 = row[AT_LEAST_75_PERCENT_DIFF]
percent_100 = row[AT_LEAST_100_PERCENT_DIFF]
if my_type in type2strand2percentagedict:
if significant_strand in type2strand2percentagedict[my_type]:
if (percent_10 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_10_PERCENT_DIFF]=1
if (percent_20 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_20_PERCENT_DIFF]=1
if (percent_30 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_30_PERCENT_DIFF]=1
if (percent_50 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_50_PERCENT_DIFF]=1
if (percent_75 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_75_PERCENT_DIFF]=1
if (percent_100 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_100_PERCENT_DIFF]=1
else:
type2strand2percentagedict[my_type][significant_strand]={}
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 0
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 0
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 0
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 0
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 0
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 0
if (percent_10 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_10_PERCENT_DIFF]=1
if (percent_20 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_20_PERCENT_DIFF]=1
if (percent_30 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_30_PERCENT_DIFF]=1
if (percent_50 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_50_PERCENT_DIFF]=1
if (percent_75 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_75_PERCENT_DIFF]=1
if (percent_100 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_100_PERCENT_DIFF]=1
else:
type2strand2percentagedict[my_type] = {}
type2strand2percentagedict[my_type][significant_strand] = {}
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 0
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 0
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 0
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 0
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 0
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 0
if (percent_10 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_10_PERCENT_DIFF]=1
if (percent_20 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_20_PERCENT_DIFF]=1
if (percent_30 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_30_PERCENT_DIFF]=1
if (percent_50 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_50_PERCENT_DIFF]=1
if (percent_75 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_75_PERCENT_DIFF]=1
if (percent_100 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_100_PERCENT_DIFF]=1
#######################################################################
#######################################################################
# Step5 Plot figures
plot_legend(strandbias_figures_outputDir)
for strand_bias in strand_bias_list:
if np.any(subsSignatures):
plot_six_mutations_sbs_signatures_circle_figures(subsSignatures,
strand_bias,
strandbias_figures_outputDir,
SIGNIFICANCE_LEVEL,
signature2mutation_type2strand2percentagedict,
percentage_strings)
if np.any(dinucsSignatures):
plot_dbs_and_id_signatures_circle_figures(DBS,
dinucsSignatures,
strand_bias,
strandbias_figures_outputDir,
SIGNIFICANCE_LEVEL,
type2strand2percentagedict,
percentage_strings)
if np.any(indelsSignatures):
plot_dbs_and_id_signatures_circle_figures(ID,
indelsSignatures,
strand_bias,
strandbias_figures_outputDir,
SIGNIFICANCE_LEVEL,
type2strand2percentagedict,
percentage_strings)
# Circle plots ends
#######################################################################
########################################################################
########################## Part 2 starts ##############################
############## Mutation Types Scatter Plots starts #####################
############## Signatures Scatter Plots starts #########################
########################################################################
if (TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list) and (LAGGING_VERSUS_LEADING in strand_bias_list):
if ((not type_transcribed_versus_untranscribed_df.empty) and (not type_lagging_versus_leading_df.empty)):
plot_mutation_types_transcription_log10_ratio_replication_log_10_ratio_using_dataframes(None,None,
type_transcribed_versus_untranscribed_df,
type_lagging_versus_leading_df,
outputDir, jobname)
if ((not type_transcribed_versus_untranscribed_df.empty) and (not type_lagging_versus_leading_df.empty) and (not sbs_df.empty)):
plot_types_transcription_log10_ratio_replication_log10_ratio_using_dataframes('subs', None, None,
type_transcribed_versus_untranscribed_df,
type_lagging_versus_leading_df,
sbs_df,
outputDir, jobname)
if ((not type_transcribed_versus_untranscribed_df.empty) and (not type_lagging_versus_leading_df.empty) and (not dbs_df.empty)):
plot_types_transcription_log10_ratio_replication_log10_ratio_using_dataframes('dinucs', None, None,
type_transcribed_versus_untranscribed_df,
type_lagging_versus_leading_df,
dbs_df,
outputDir, jobname)
if ((not type_transcribed_versus_untranscribed_df.empty) and (not type_lagging_versus_leading_df.empty) and (not id_df.empty)):
plot_types_transcription_log10_ratio_replication_log10_ratio_using_dataframes('indels', None, None,
type_transcribed_versus_untranscribed_df,
type_lagging_versus_leading_df,
id_df,
outputDir, jobname)
########################################################################
############## Mutation Types Scatter Plots ends #######################
############## Signatures Scatter Plots ends ###########################
########################## Part 2 ends ################################
########################################################################
########################################################################
########################## Part 4 starts ##############################
######## Bar plot starts includes sample based bar plots ###############
########################################################################
isKeySample = False
width = 0.20
#######################################################
################# Plot types starts ###################
#######################################################
types_list= [('All Mutations', 'mutationtypes', six_mutation_types),
('All Signatures', 'subs_signatures', subsSignatures),
('All Signatures', 'indels_signatures', indelsSignatures),
('All Signatures', 'dinucs_signatures', dinucsSignatures)]
for mutationsOrSignatures, sub_figure_name, x_axis_labels in types_list:
x_axis_labels = sorted(x_axis_labels, key=natural_key)
N = len(x_axis_labels)
for strand_bias in strand_bias_list:
if (strand_bias == TRANSCRIBED_VERSUS_UNTRANSCRIBED):
type_strand1_versus_strand2_df = type_transcribed_versus_untranscribed_df
strand1 = transcriptionStrands[0]
strand2 = transcriptionStrands[1]
strand1_real_count_column_name = 'Transcribed_real_count'
strand2_real_count_column_name = 'UnTranscribed_real_count'
strand1_sims_mean_count_column_name = 'Transcribed_mean_sims_count'
strand2_sims_mean_count_column_name = 'UnTranscribed_mean_sims_count'
q_value_column_name = 'transcribed_versus_untranscribed_q_value'
color1 = 'royalblue'
color2 = 'yellowgreen'
figureName = '%s_transcription_strand_bias' %(sub_figure_name)
elif (strand_bias == GENIC_VERSUS_INTERGENIC):
type_strand1_versus_strand2_df = type_genic_versus_intergenic_df
strand1 = genicVersusIntergenicStrands[0]
strand2 = genicVersusIntergenicStrands[1]
strand1_real_count_column_name = 'genic_real_count'
strand2_real_count_column_name = 'intergenic_real_count'
strand1_sims_mean_count_column_name = 'genic_mean_sims_count'
strand2_sims_mean_count_column_name = 'intergenic_mean_sims_count'
q_value_column_name = 'genic_versus_intergenic_q_value'
color1 = 'cyan'
color2 = 'gray'
figureName = '%s_genic_versus_intergenic_strand_bias' %(sub_figure_name)
elif (strand_bias == LAGGING_VERSUS_LEADING):
type_strand1_versus_strand2_df = type_lagging_versus_leading_df
strand1 = replicationStrands[0]
strand2 = replicationStrands[1]
strand1_real_count_column_name = 'Lagging_real_count'
strand2_real_count_column_name = 'Leading_real_count'
strand1_sims_mean_count_column_name = 'Lagging_mean_sims_count'
strand2_sims_mean_count_column_name = 'Leading_mean_sims_count'
q_value_column_name = 'lagging_versus_leading_q_value'
color1 = 'indianred'
color2 = 'goldenrod'
figureName = '%s_replication_strand_bias' %(sub_figure_name)
types_strand1_real_count_list = []
types_strand2_real_count_list = []
types_strand1_sims_mean_count_list = []
types_strand2_sims_mean_count_list = []
types_strand1_versus_strand2_FDR_BH_adjusted_pvalues = []
for my_type in x_axis_labels:
strand1_real_count = 0
strand2_real_count = 0
strand1_sims_mean_count = 0
strand2_sims_mean_count = 0
q_value = None
if type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand1_real_count_column_name].values.size>0:
strand1_real_count= type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand1_real_count_column_name].values[0]
if type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand2_real_count_column_name].values.size>0:
strand2_real_count= type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand2_real_count_column_name].values[0]
if type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand1_sims_mean_count_column_name].values.size>0:
strand1_sims_mean_count= type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand1_sims_mean_count_column_name].values[0]
if type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand2_sims_mean_count_column_name].values.size>0:
strand2_sims_mean_count= type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand2_sims_mean_count_column_name].values[0]
if type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][q_value_column_name].values.size>0:
q_value= type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][q_value_column_name].values[0]
types_strand1_real_count_list.append(strand1_real_count)
types_strand2_real_count_list.append(strand2_real_count)
types_strand1_sims_mean_count_list.append(strand1_sims_mean_count)
types_strand2_sims_mean_count_list.append(strand2_sims_mean_count)
types_strand1_versus_strand2_FDR_BH_adjusted_pvalues.append(q_value)
if ((len(x_axis_labels) > 0) and types_strand1_real_count_list and types_strand2_real_count_list and types_strand1_sims_mean_count_list and types_strand2_sims_mean_count_list and (len(types_strand1_versus_strand2_FDR_BH_adjusted_pvalues)>0)):
if (types_strand1_real_count_list and types_strand2_real_count_list):
plotStrandBiasFigureWithBarPlots(outputDir,
jobname,
numberofSimulations,
None,
isKeySample,
None,
N,
x_axis_labels,
types_strand1_real_count_list,
types_strand2_real_count_list,
types_strand1_sims_mean_count_list,
types_strand2_sims_mean_count_list,
types_strand1_versus_strand2_FDR_BH_adjusted_pvalues,
strand1,strand2,
mutationsOrSignatures,
color1, color2,
figureName,
width,
plot_mode)
#######################################################
################# Plot types ends #####################
#######################################################
#################################################################
########### Plot sub signatures mutation types starts ###########
#################################################################
if not sbs_df.empty:
if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list:
plotBarPlotsUsingDataframes(outputDir,
jobname,
numberofSimulations,
sbs_df,
isKeySample,
six_mutation_types,
signature_transcribed_versus_untranscribed_df,
width,
TRANSCRIBED_VERSUS_UNTRANSCRIBED,
transcriptionStrands,
'royalblue',
'yellowgreen',
'All Mutations',
'mutationtypes_transcription_strand_bias',
plot_mode)
if GENIC_VERSUS_INTERGENIC in strand_bias_list:
plotBarPlotsUsingDataframes(outputDir,
jobname,
numberofSimulations,
sbs_df,
isKeySample,
six_mutation_types,
signature_genic_versus_intergenic_df,
width,
GENIC_VERSUS_INTERGENIC,
genicVersusIntergenicStrands,
'cyan',
'gray',
'All Mutations',
'mutationtypes_genic_versus_intergenic_strand_bias',
plot_mode)
if LAGGING_VERSUS_LEADING in strand_bias_list:
plotBarPlotsUsingDataframes(outputDir,
jobname,
numberofSimulations,
sbs_df,
isKeySample,
six_mutation_types,
signature_lagging_versus_leading_df,
width,
LAGGING_VERSUS_LEADING,
replicationStrands,
'indianred',
'goldenrod',
'All Mutations',
'mutationtypes_replication_strand_bias',
plot_mode)
#################################################################
########### Plot sub signatures mutation types ends #############
#################################################################
########################################################################
######## Bar plot starts includes sample based bar plots ###############
########################## Part 4 ends ################################
########################################################################
# Circle Bar Plots
# Plot circle plots and bar plots all together
# At top ax, circle plots with 3 rows: for genic vs. intergenic, transcribed vs. untranscribed, lagging vs. leading
# At middle ax, 3 bar plots: for genic vs. intergenic, transcribed vs. untranscribed, lagging vs. leading
# At below ax, 3 normalized bar plots: for genic vs. intergenic, transcribed vs. untranscribed, lagging vs. leading
if (TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list) and (LAGGING_VERSUS_LEADING in strand_bias_list):
sbs_signatures = sbs_df['signature'].unique()
for sbs_signature in sbs_signatures:
plot_circle_bar_plots_together(outputDir,
jobname,
sbs_signature,
six_mutation_types,
signature2mutation_type2strand2percentagedict,
signature_genic_versus_intergenic_df,
signature_transcribed_versus_untranscribed_df,
signature_lagging_versus_leading_df,
genicVersusIntergenicStrands,
transcriptionStrands,
replicationStrands)
###################################################################
############################################################################################################################
def plot_dbs_and_id_signatures_circle_figures(signature_type,
signatures,
strand_bias,
strandbias_figures_outputDir,
SIGNIFICANCE_LEVEL,
type2strand2percentagedict,
percentage_strings):
rows_signatures=[]
#####################################################################
if strand_bias==LAGGING_VERSUS_LEADING:
strands=replicationStrands
elif strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED:
strands=transcriptionStrands
elif strand_bias==GENIC_VERSUS_INTERGENIC:
strands=genicVersusIntergenicStrands
#####################################################################
#####################################################################
#Fill rows_DBS_signatures
#Fill rows_ID_signatures
for signature in signatures:
if signature in type2strand2percentagedict:
for strand in strands:
if strand in type2strand2percentagedict[signature]:
for percentage_string in percentage_strings:
if percentage_string in type2strand2percentagedict[signature][strand]:
print('signature:%s strand:%s percentage_string:%s' %(signature,strand,percentage_string))
if signature not in rows_signatures:
rows_signatures.append(signature)
#####################################################################
#####################################################################
rows_signatures=sorted(rows_signatures,key=natural_key,reverse=True)
#####################################################################
if (len(rows_signatures)>0):
#####################################################################
#New plot (width,height)
fig, ax = plt.subplots(figsize=(5+1.5*len(percentage_strings), 10+1.5*len(rows_signatures)))
#make aspect ratio square
ax.set_aspect(1.0)
#####################################################################
######################################################################################################################################
for percentage_diff_index, percentage_string in enumerate(percentage_strings):
for row_signature_index, row_signature in enumerate(rows_signatures):
if (strand_bias==LAGGING_VERSUS_LEADING):
if row_signature in type2strand2percentagedict:
lagging_percentage=None
leading_percentage=None
if LAGGING in type2strand2percentagedict[row_signature] and type2strand2percentagedict[row_signature][LAGGING][percentage_string]==1:
lagging_percentage = 100
if LEADING in type2strand2percentagedict[row_signature] and type2strand2percentagedict[row_signature][LEADING][percentage_string]==1:
leading_percentage = 100
if (lagging_percentage is not None) and (leading_percentage is None):
radius = 0.49
if (radius > 0):
# print('Plot circle at x=%d y=%d for %s %s' % (percentage_diff_index, row_signature_index, row_signature,percentage_string))
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius, color='indianred', fill=True)
ax.add_artist(circle)
elif (leading_percentage is not None) and (lagging_percentage is None):
radius = 0.49
if (radius > 0):
# print('Plot circle at x=%d y=%d for %s %s' % (percentage_diff_index, row_signature_index, row_signature,percentage_string))
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius, color='goldenrod', fill=True)
ax.add_artist(circle)
elif (lagging_percentage is not None) and (leading_percentage is not None):
radius_lagging = 0.49
radius_leading = 0.49
if (radius_lagging>radius_leading):
#First lagging
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_lagging, color='goldenrod', fill=True)
ax.add_artist(circle)
#Second leading
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_leading, color='goldenrod', fill=True)
ax.add_artist(circle)
else:
#First leading
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_leading, color='goldenrod', fill=True)
ax.add_artist(circle)
#Second lagging
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_lagging, color='goldenrod', fill=True)
ax.add_artist(circle)
elif (strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED):
if row_signature in type2strand2percentagedict:
transcribed_percentage=None
untranscribed_percentage=None
if TRANSCRIBED_STRAND in type2strand2percentagedict[row_signature] and type2strand2percentagedict[row_signature][TRANSCRIBED_STRAND][percentage_string]==1:
transcribed_percentage = 100
if UNTRANSCRIBED_STRAND in type2strand2percentagedict[row_signature] and type2strand2percentagedict[row_signature][UNTRANSCRIBED_STRAND][percentage_string]==1:
untranscribed_percentage = 100
if (transcribed_percentage is not None) and (untranscribed_percentage is None):
radius = 0.49
if (radius > 0):
# print('Plot circle at x=%d y=%d for %s %s' % (percentage_diff_index, row_signature_index, row_signature,percentage_string))
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius, color='royalblue', fill=True)
ax.add_artist(circle)
elif (untranscribed_percentage is not None) and (transcribed_percentage is None):
radius = 0.49
if (radius > 0):
# print('Plot circle at x=%d y=%d for %s %s' % (percentage_diff_index, row_signature_index, row_signature,percentage_string))
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius, color='yellowgreen', fill=True)
ax.add_artist(circle)
elif (transcribed_percentage is not None) and (untranscribed_percentage is not None):
radius_transcribed = 0.49
radius_untranscribed = 0.49
if (radius_transcribed>radius_untranscribed):
#First transcribed
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_transcribed, color='royalblue', fill=True)
ax.add_artist(circle)
#Second untranscribed
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_untranscribed, color='yellowgreen', fill=True)
ax.add_artist(circle)
else:
#First untranscribed
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_untranscribed, color='yellowgreen', fill=True)
ax.add_artist(circle)
#Second transcribed
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_transcribed, color='royalblue', fill=True)
ax.add_artist(circle)
elif (strand_bias==GENIC_VERSUS_INTERGENIC):
if row_signature in type2strand2percentagedict:
genic_percentage=None
intergenic_percentage=None
if GENIC in type2strand2percentagedict[row_signature] and type2strand2percentagedict[row_signature][GENIC][percentage_string]==1:
genic_percentage = 100
if INTERGENIC in type2strand2percentagedict[row_signature] and type2strand2percentagedict[row_signature][INTERGENIC][percentage_string]==1:
intergenic_percentage = 100
if (genic_percentage is not None) and (intergenic_percentage is None):
radius = 0.49
if (radius > 0):
# print('Plot circle at x=%d y=%d for %s %s' % (percentage_diff_index, row_signature_index, row_signature,percentage_string))
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius, color='cyan', fill=True)
ax.add_artist(circle)
elif (intergenic_percentage is not None) and (genic_percentage is None):
radius = 0.49
if (radius > 0):
# print('Plot circle at x=%d y=%d for %s %s' % (percentage_diff_index, row_signature_index, row_signature,percentage_string))
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius, color='gray', fill=True)
ax.add_artist(circle)
elif (genic_percentage is not None) and (intergenic_percentage is not None):
radius_genic = 0.49
radius_intergenic = 0.49
if (radius_genic>radius_intergenic):
#First genic
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_genic, color='cyan', fill=True)
ax.add_artist(circle)
#Second intergenic
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_intergenic, color='gray', fill=True)
ax.add_artist(circle)
else:
#First untranscribed
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_intergenic, color='gray', fill=True)
ax.add_artist(circle)
#Second transcribed
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_genic, color='cyan', fill=True)
ax.add_artist(circle)
######################################################################################################################################
##################################################################################
# CODE GOES HERE TO CENTER X-AXIS LABELS...
ax.set_xlim([0,len(percentage_strings)])
ax.set_xticklabels([])
ax.tick_params(axis='x', which='minor', length=0, labelsize=20)
#major ticks
ax.set_xticks(np.arange(0, len(percentage_strings), 1))
#minor ticks
ax.set_xticks(np.arange(0, len(percentage_strings), 1)+0.5,minor=True)
ax.set_xticklabels(percentage_strings,minor=True)
#Jul 7, 2020
if strand_bias==LAGGING_VERSUS_LEADING:
fig.suptitle('Lagging versus Leading Strand Bias', fontsize=30)
elif strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED:
fig.suptitle('Transcribed versus Untranscribed Strand Bias', fontsize=30)
elif strand_bias==GENIC_VERSUS_INTERGENIC:
fig.suptitle('Genic versus Intergenic Strand Bias', fontsize=30)
ax.xaxis.set_ticks_position('top')
plt.tick_params(
axis='x', # changes apply to the x-axis
which='major', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False) # labels along the bottom edge are off
##################################################################################
##################################################################################
# CODE GOES HERE TO CENTER Y-AXIS LABELS...
ax.set_ylim([0,len(rows_signatures)])
ax.set_yticklabels([])
ax.tick_params(axis='y', which='minor', length=0, labelsize=30)
#major ticks
ax.set_yticks(np.arange(0, len(rows_signatures), 1))
#minor ticks
ax.set_yticks(np.arange(0, len(rows_signatures), 1)+0.5,minor=True)
ax.set_yticklabels(rows_signatures, minor=True) # fontsize
plt.tick_params(
axis='y', # changes apply to the x-axis
which='major', # both major and minor ticks are affected
left=False) # labels along the bottom edge are off
##################################################################################
##################################################################################
# Gridlines based on major ticks
ax.grid(which='major', color='black')
##################################################################################
##################################################################################
# create the directory if it does not exists
filename = '%s_Signatures_%s_with_circles_%s.png' % (signature_type,strand_bias,str(SIGNIFICANCE_LEVEL).replace('.','_'))
figFile = os.path.join(strandbias_figures_outputDir, CIRCLE_PLOTS, filename)
fig.savefig(figFile)
fig.tight_layout()
plt.cla()
plt.close(fig)
##################################################################################
############################################################################################################################
############################################################################################################################
#Plot Legend only
def plot_legend(strandbias_figures_outputDir):
strand_biases=[TRANSCRIBED_VERSUS_UNTRANSCRIBED, GENIC_VERSUS_INTERGENIC, LAGGING_VERSUS_LEADING]
for strandbias in strand_biases:
##################################################################################
fig = plt.figure(figsize=(4,1), dpi=300)
ax = plt.gca()
plt.axis('off')
##################################################################################
##################################################################################
if strandbias==TRANSCRIBED_VERSUS_UNTRANSCRIBED:
legend_elements = [
Line2D([0], [0], marker='o', color='white', label=TRANSCRIBED_STRAND, markerfacecolor='royalblue' ,markersize=20),
Line2D([0], [0], marker='o', color='white', label=UNTRANSCRIBED_STRAND, markerfacecolor='yellowgreen',markersize=20)]
elif strandbias == GENIC_VERSUS_INTERGENIC:
legend_elements = [
Line2D([0], [0], marker='o', color='white', label=GENIC, markerfacecolor='cyan',markersize=20),
Line2D([0], [0], marker='o', color='white', label=INTERGENIC, markerfacecolor='gray',markersize=20)]
elif (strandbias==LAGGING_VERSUS_LEADING):
legend_elements = [
Line2D([0], [0], marker='o', color='white', label=LAGGING, markerfacecolor='indianred', markersize=20),
Line2D([0], [0], marker='o', color='white', label=LEADING, markerfacecolor='goldenrod', markersize=20)]
ax.legend(handles=legend_elements, bbox_to_anchor=(0, 0.5), loc='center left' ,fontsize = 20)
##################################################################################
##################################################################################
# create the directory if it does not exists
filename = 'Legend_%s.png' % (strandbias)
figFile = os.path.join(strandbias_figures_outputDir, CIRCLE_PLOTS, filename)
fig.savefig(figFile)
fig.tight_layout()
plt.cla()
plt.close(fig)
##################################################################################
############################################################################################################################
############################################################################################################################
#Sep 19, 2020
def plot_six_mutations_sbs_signatures_circle_figures(sbs_signatures,
strand_bias,
strandbias_figures_outputDir,
significance_level,
signature2mutation_type2strand2percentagedict,
percentage_strings):
mutation_types=six_mutation_types
#####################################################################
if strand_bias==LAGGING_VERSUS_LEADING:
strands=replicationStrands
elif strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED:
strands=transcriptionStrands
elif strand_bias==GENIC_VERSUS_INTERGENIC:
strands=genicVersusIntergenicStrands
#####################################################################
#####################################################################
rows_sbs_signatures=[]
#Fill rows_sbs_signatures
for signature in sbs_signatures:
if signature in signature2mutation_type2strand2percentagedict:
for mutation_type in signature2mutation_type2strand2percentagedict[signature]:
for strand in strands:
if strand in signature2mutation_type2strand2percentagedict[signature][mutation_type]:
for percentage_string in percentage_strings:
if (percentage_string in signature2mutation_type2strand2percentagedict[signature][mutation_type][strand]) and (signature2mutation_type2strand2percentagedict[signature][mutation_type][strand][percentage_string]==1):
if signature not in rows_sbs_signatures:
rows_sbs_signatures.append(signature)
#####################################################################
#####################################################################
rows_sbs_signatures=sorted(rows_sbs_signatures,key=natural_key,reverse=True)
#####################################################################
#####################################################################
xticklabels_list = percentage_strings * len(mutation_types)
#####################################################################
if (len(rows_sbs_signatures)>0):
#####################################################################
plot1, panel1 = plt.subplots(figsize=(5+1.5*len(xticklabels_list), 10+1.5*len(rows_sbs_signatures)))
# plot1, panel1 = plt.subplots(figsize=(5+1.4*len(xticklabels_list), 10+len(rows_sbs_signatures))) Title and mutation texts are not seen.
plt.rc('axes', edgecolor='lightgray')
# panel1 = plt.axes([0.04, 0.09, 0.95, 0.75])
#make aspect ratio square
panel1.set_aspect(1.0)
#####################################################################
##################################################################################
#set title
if strand_bias==LAGGING_VERSUS_LEADING:
title='Lagging versus Leading Strand Bias'
elif strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED:
title='Transcribed versus Untranscribed Strand Bias'
elif strand_bias==GENIC_VERSUS_INTERGENIC:
title='Genic versus Intergenic Strand Bias'
panel1.text(len(percentage_strings)*3, len(rows_sbs_signatures)+2.5, title, horizontalalignment='center', fontsize=60, fontweight='bold', fontname='Arial')
##################################################################################
##################################################################################
#Colors from SigProfilerPlotting tool to be consistent
colors = [[3 / 256, 189 / 256, 239 / 256],
[1 / 256, 1 / 256, 1 / 256],
[228 / 256, 41 / 256, 38 / 256],
[203 / 256, 202 / 256, 202 / 256],
[162 / 256, 207 / 256, 99 / 256],
[236 / 256, 199 / 256, 197 / 256]]
#Put rectangles
x = 0
for i in range(0, len(mutation_types), 1):
panel1.text((x+(len(percentage_strings)/2)-0.75), len(rows_sbs_signatures)+1.5, mutation_types[i], fontsize=55, fontweight='bold', fontname='Arial')
panel1.add_patch(plt.Rectangle((x+.0415, len(rows_sbs_signatures)+0.75), len(percentage_strings)-(2*.0415), .5, facecolor=colors[i], clip_on=False))
panel1.add_patch(plt.Rectangle((x, 0), len(percentage_strings), len(rows_sbs_signatures), facecolor=colors[i], zorder=0, alpha=0.25,edgecolor='grey'))
x += len(percentage_strings)
##################################################################################
##################################################################################
# CODE GOES HERE TO CENTER X-AXIS LABELS...
panel1.set_xlim([0,len(mutation_types)*len(percentage_strings)])
panel1.set_xticklabels([])
panel1.tick_params(axis='x', which='minor', length=0, labelsize=35)
#major ticks
panel1.set_xticks(np.arange(0, len(mutation_types)*len(percentage_strings), 1))
#minor ticks
panel1.set_xticks(np.arange(0, len(mutation_types)*len(percentage_strings), 1)+0.5,minor=True)
panel1.set_xticklabels(xticklabels_list,minor=True)
panel1.xaxis.set_label_position('top')
panel1.xaxis.set_ticks_position('top')
plt.tick_params(
axis='x', # changes apply to the x-axis
which='major', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False) # labels along the bottom edge are off
##################################################################################
##################################################################################
# CODE GOES HERE TO CENTER Y-AXIS LABELS...
panel1.set_ylim([0,len(rows_sbs_signatures)])
panel1.set_yticklabels([])
panel1.tick_params(axis='y', which='minor', length=0, labelsize=40)
#major ticks
panel1.set_yticks(np.arange(0, len(rows_sbs_signatures), 1))
#minor ticks
panel1.set_yticks(np.arange(0, len(rows_sbs_signatures), 1)+0.5,minor=True)
panel1.set_yticklabels(rows_sbs_signatures, minor=True) # fontsize
plt.tick_params(
axis='y', # changes apply to the x-axis
which='major', # both major and minor ticks are affected
left=False) # labels along the bottom edge are off
##################################################################################
##################################################################################
# Gridlines based on major ticks
panel1.grid(which='major', color='black', zorder=3)
##################################################################################
##################################################################################
#Put the legend
if strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED:
legend_elements = [
Line2D([0], [0], marker='o', color='white', label=TRANSCRIBED_STRAND, markerfacecolor='royalblue' ,markersize=40),
Line2D([0], [0], marker='o', color='white', label=UNTRANSCRIBED_STRAND, markerfacecolor='yellowgreen',markersize=40)]
elif strand_bias == GENIC_VERSUS_INTERGENIC:
legend_elements = [
Line2D([0], [0], marker='o', color='white', label=GENIC, markerfacecolor='cyan',markersize=40),
Line2D([0], [0], marker='o', color='white', label=INTERGENIC, markerfacecolor='gray',markersize=40)]
elif (strand_bias==LAGGING_VERSUS_LEADING):
legend_elements = [
Line2D([0], [0], marker='o', color='white', label=LAGGING, markerfacecolor='indianred', markersize=40),
Line2D([0], [0], marker='o', color='white', label=LEADING, markerfacecolor='goldenrod', markersize=40)]
panel1.legend(handles=legend_elements,ncol=len(legend_elements), bbox_to_anchor=(1, -0.1),loc='upper right', fontsize=40)
##################################################################################
######################################################################################################################################
for percentage_diff_index, percentage_string in enumerate(percentage_strings):
for mutation_type_index, mutation_type in enumerate(mutation_types):
for row_sbs_signature_index, row_sbs_signature in enumerate(rows_sbs_signatures):
if (strand_bias==LAGGING_VERSUS_LEADING):
if row_sbs_signature in signature2mutation_type2strand2percentagedict:
if mutation_type in signature2mutation_type2strand2percentagedict[row_sbs_signature]:
lagging_percentage = None
leading_percentage = None
if (LAGGING in signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type]) and (signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type][LAGGING][percentage_string]==1):
lagging_percentage = 100
if (LEADING in signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type]) and (signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type][LEADING][percentage_string]==1):
leading_percentage = 100
if (lagging_percentage is not None) and (leading_percentage is None):
radius = 0.49
if (radius > 0):
# print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index, row_sbs_signature,mutation_type, percentage_string))
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5, row_sbs_signature_index + 0.5),radius, color='indianred', fill=True))
elif (leading_percentage is not None) and (lagging_percentage is None):
radius = 0.49
if (radius > 0):
# print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index, row_sbs_signature,mutation_type, percentage_string))
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5, row_sbs_signature_index + 0.5),radius, color='goldenrod', fill=True))
elif (lagging_percentage is not None) and (leading_percentage is not None):
radius_lagging = 0.49
radius_leading = 0.49
if (radius_lagging > radius_leading):
# First lagging
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5, row_sbs_signature_index + 0.5),radius_lagging, color='indianred', fill=True))
# Second leading
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5, row_sbs_signature_index + 0.5),radius_leading, color='goldenrod', fill=True))
else:
# First leading
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5, row_sbs_signature_index + 0.5),radius_leading, color='goldenrod', fill=True))
# Second lagging
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5, row_sbs_signature_index + 0.5),radius_lagging, color='indianred', fill=True))
elif (strand_bias == GENIC_VERSUS_INTERGENIC):
if row_sbs_signature in signature2mutation_type2strand2percentagedict:
if mutation_type in signature2mutation_type2strand2percentagedict[row_sbs_signature]:
genic_percentage = None
intergenic_percentage = None
if (GENIC in signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type]) and (signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type][GENIC][percentage_string]==1):
genic_percentage = 100
if (INTERGENIC in signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type]) and (signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type][INTERGENIC][percentage_string]==1):
intergenic_percentage = 100
if (genic_percentage is not None) and (intergenic_percentage is None):
radius = 0.49
if (radius > 0):
# print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string))
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius, color='cyan',fill=True))
elif (intergenic_percentage is not None) and (genic_percentage is None):
radius = 0.49
if (radius > 0):
# print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string))
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius, color='gray',fill=True))
elif (genic_percentage is not None) and (intergenic_percentage is not None):
radius_genic = 0.49
radius_intergenic = 0.49
if (radius_genic > radius_intergenic):
# First genic
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_genic,color='cyan', fill=True))
# Second intergenic
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_intergenic,color='gray', fill=True))
else:
# First intergenic
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_intergenic, color='gray', fill=True))
# Second genic
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_genic,color='cyan', fill=True))
elif (strand_bias == TRANSCRIBED_VERSUS_UNTRANSCRIBED):
if row_sbs_signature in signature2mutation_type2strand2percentagedict:
if mutation_type in signature2mutation_type2strand2percentagedict[row_sbs_signature]:
transcribed_percentage = None
untranscribed_percentage = None
if (TRANSCRIBED_STRAND in signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type]) and (signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type][TRANSCRIBED_STRAND][percentage_string]==1):
transcribed_percentage = 100
if (UNTRANSCRIBED_STRAND in signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type]) and (signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type][UNTRANSCRIBED_STRAND][percentage_string]==1):
untranscribed_percentage = 100
if (transcribed_percentage is not None) and (untranscribed_percentage is None):
radius = 0.49
if (radius > 0):
# print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string))
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius, color='royalblue',fill=True))
elif (untranscribed_percentage is not None) and (transcribed_percentage is None):
radius = 0.49
if (radius > 0):
# print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string))
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius, color='yellowgreen',fill=True))
elif (transcribed_percentage is not None) and (untranscribed_percentage is not None):
radius_transcribed = 0.49
radius_untranscribed = 0.49
if (radius_transcribed > radius_untranscribed):
# First transcribed
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_transcribed,color='royalblue', fill=True))
# Second untranscribed
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_untranscribed,color='yellowgreen', fill=True))
else:
# First untranscribed
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_untranscribed,color='yellowgreen', fill=True))
# Second transcribed
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_transcribed,color='royalblue', fill=True))
######################################################################################################################################
##################################################################################
# create the directory if it does not exists
filename = 'SBS_Signatures_%s_with_circle_plot_%s.png' % (strand_bias,str(significance_level).replace('.','_'))
figFile = os.path.join(strandbias_figures_outputDir,CIRCLE_PLOTS, filename)
plot1.savefig(figFile,bbox_inches='tight')
plot1.tight_layout()
plt.cla()
plt.close(plot1)
##################################################################################
############################################################################################################################
| 64.015283 | 398 | 0.615946 |
import os
import numpy as np
import statsmodels.stats.multitest
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
from matplotlib import gridspec
import pandas as pd
from SigProfilerTopography.source.commons.TopographyCommons import natural_key
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_STRAND
from SigProfilerTopography.source.commons.TopographyCommons import UNTRANSCRIBED_STRAND
from SigProfilerTopography.source.commons.TopographyCommons import LAGGING
from SigProfilerTopography.source.commons.TopographyCommons import LEADING
from SigProfilerTopography.source.commons.TopographyCommons import six_mutation_types
from SigProfilerTopography.source.commons.TopographyCommons import STRANDBIAS
from SigProfilerTopography.source.commons.TopographyCommons import DATA
from SigProfilerTopography.source.commons.TopographyCommons import FIGURE
from SigProfilerTopography.source.commons.TopographyCommons import SCATTER_PLOTS
from SigProfilerTopography.source.commons.TopographyCommons import BAR_PLOTS
from SigProfilerTopography.source.commons.TopographyCommons import CIRCLE_PLOTS
from SigProfilerTopography.source.commons.TopographyCommons import CIRCLE_BAR_PLOTS
from SigProfilerTopography.source.commons.TopographyCommons import SAMPLES
from SigProfilerTopography.source.commons.TopographyCommons import TABLES
from SigProfilerTopography.source.commons.TopographyCommons import SUBS_STRAND_BIAS_NUMBER_OF_MUTATIONS_THRESHOLD
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIPTIONSTRANDBIAS
from SigProfilerTopography.source.commons.TopographyCommons import REPLICATIONSTRANDBIAS
from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_VERSUS_LEADING
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_VERSUS_UNTRANSCRIBED
from SigProfilerTopography.source.commons.TopographyCommons import GENIC_VERSUS_INTERGENIC
from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_VERSUS_LEADING_P_VALUE
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_VERSUS_UNTRANSCRIBED_P_VALUE
from SigProfilerTopography.source.commons.TopographyCommons import GENIC_VERSUS_INTERGENIC_P_VALUE
from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_VERSUS_LEADING_Q_VALUE
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE
from SigProfilerTopography.source.commons.TopographyCommons import GENIC_VERSUS_INTERGENIC_Q_VALUE
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_REAL_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import UNTRANSCRIBED_REAL_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import GENIC_REAL_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import INTERGENIC_REAL_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_REAL_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import LEADING_REAL_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_SIMULATIONS_MEAN_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import UNTRANSCRIBED_SIMULATIONS_MEAN_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import GENIC_SIMULATIONS_MEAN_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import INTERGENIC_SIMULATIONS_MEAN_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_SIMULATIONS_MEAN_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import LEADING_SIMULATIONS_MEAN_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import GENIC
from SigProfilerTopography.source.commons.TopographyCommons import INTERGENIC
from SigProfilerTopography.source.commons.TopographyCommons import percentage_numbers
from SigProfilerTopography.source.commons.TopographyCommons import percentage_strings
from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_10_PERCENT_DIFF
from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_20_PERCENT_DIFF
from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_30_PERCENT_DIFF
from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_50_PERCENT_DIFF
from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_75_PERCENT_DIFF
from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_100_PERCENT_DIFF
from SigProfilerTopography.source.commons.TopographyCommons import ID
from SigProfilerTopography.source.commons.TopographyCommons import DBS
from SigProfilerTopography.source.commons.TopographyCommons import SBS_CONTEXTS
from SigProfilerTopography.source.commons.TopographyCommons import PLOTTING_FOR_SIGPROFILERTOPOGRAPHY_TOOL
from SigProfilerTopography.source.commons.TopographyCommons import PLOTTING_FOR_SIGPROFILERTOPOGRAPHY_MANUSCRIPT
from SigProfilerTopography.source.commons.TopographyCommons import EXCEL_FILES
from SigProfilerTopography.source.commons.TopographyCommons import write_excel_file
from SigProfilerTopography.source.commons.TopographyCommons import NUMBER_OF_REQUIRED_MUTATIONS_FOR_STRAND_BIAS_BAR_PLOT
SIGNATURE = 'signature'
CANCER_TYPE = 'cancer_type'
MUTATION_TYPE = 'mutation_type'
TYPE = 'type'
SIGNIFICANT_STRAND = 'significant_strand'
SIGNIFICANCE_LEVEL = 0.05
from SigProfilerTopography.source.commons.TopographyCommons import Table_SBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_DBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_ID_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_SBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_DBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_ID_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import getSample2NumberofSubsDict
from SigProfilerTopography.source.commons.TopographyCommons import getSample2NumberofIndelsDict
from SigProfilerTopography.source.commons.TopographyCommons import Sample2NumberofDinucsDictFilename
from SigProfilerTopography.source.commons.TopographyCommons import getSample2SubsSignature2NumberofMutationsDict
from SigProfilerTopography.source.commons.TopographyCommons import getSample2IndelsSignature2NumberofMutationsDict
from SigProfilerTopography.source.commons.TopographyCommons import Sample2DinucsSignature2NumberofMutationsDictFilename
transcriptionStrands = [TRANSCRIBED_STRAND, UNTRANSCRIBED_STRAND]
genicVersusIntergenicStrands=[GENIC, INTERGENIC]
replicationStrands = [LAGGING, LEADING]
def plot_mutation_types_transcription_log10_ratio_replication_log_10_ratio_using_dataframes(sample,numberofMutations,
type_transcribed_versus_untranscribed_df,
type_lagging_versus_leading_df,
outputDir, jobname):
fig = plt.figure(figsize=(8,8), facecolor=None)
plt.style.use('ggplot')
left, width = .0, 1.
bottom, height = .0, 1.
right = left + width
top = bottom + height
ax = plt.gca()
ax.set_facecolor('white')
for edge_i in ['bottom','top','left','right']:
ax.spines[edge_i].set_edgecolor("black")
ax.spines[edge_i].set_linewidth(1)
ax.spines[edge_i].set_bounds(-0.3, 0.3)
plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes)
plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes)
plt.text((right+0.02),(bottom+top-0.08), 'Transcribed',horizontalalignment='center',verticalalignment='center',rotation='vertical',transform=ax.transAxes)
plt.text((right+0.02),(bottom+0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)
if (sample is not None):
plt.title(sample, fontsize=15, fontweight='bold')
plt.xlabel('Lagging/leading replication strand\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold')
plt.ylabel('Transcribed/untranscribed strand\nratio(log10)',fontstyle='normal', fontsize=12, fontweight='bold')
plt.xlim(-0.3, 0.3)
plt.ylim(-0.3, 0.3)
plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10)
yticks = [-0.2, -0.1, 0.0, 0.1, 0.2]
yticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']
plt.yticks(yticks, yticklabels)
xticks = [-0.2, -0.1, 0.0, 0.1, 0.2]
xticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']
plt.xticks(xticks, xticklabels)
transcriptionRatiosDict = {}
replicationRatiosDict = {}
for mutationType in six_mutation_types:
transcribed_real_count=0
untranscribed_real_count=0
if (type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type']==mutationType]['Transcribed_real_count'].values.size>0):
transcribed_real_count= type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type'] == mutationType]['Transcribed_real_count'].values[0]
if (type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type']==mutationType]['UnTranscribed_real_count'].values.size>0):
untranscribed_real_count= type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type'] == mutationType]['UnTranscribed_real_count'].values[0]
if (transcribed_real_count>0 and untranscribed_real_count>0):
transcriptionRatiosDict[mutationType] = np.log10(transcribed_real_count/untranscribed_real_count)
lagging_real_count = 0
leading_real_count = 0
if (type_lagging_versus_leading_df[type_lagging_versus_leading_df['type'] == mutationType]['Lagging_real_count'].values.size > 0):
lagging_real_count = type_lagging_versus_leading_df[type_lagging_versus_leading_df['type'] == mutationType]['Lagging_real_count'].values[0]
if (type_lagging_versus_leading_df[type_lagging_versus_leading_df['type'] == mutationType]['Leading_real_count'].values.size > 0):
leading_real_count = type_lagging_versus_leading_df[type_lagging_versus_leading_df['type'] == mutationType]['Leading_real_count'].values[0]
if (lagging_real_count>0 and leading_real_count>0):
replicationRatiosDict[mutationType] = np.log10(lagging_real_count/leading_real_count)
if (mutationType in replicationRatiosDict) and (mutationType in transcriptionRatiosDict):
plt.scatter(replicationRatiosDict[mutationType], transcriptionRatiosDict[mutationType], label=mutationType)
legend = plt.legend(loc='upper left', frameon=True, fancybox =False,labels=six_mutation_types, bbox_to_anchor=(-0.0095, 1.0095))
legend.get_frame().set_linewidth(1)
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
plt.axvline(x=0.0, color='gray', linestyle='--')
plt.axhline(y=0.0, color='gray', linestyle='--')
if sample is None:
figureName = 'all_mutation_types_%s_scatter_plot.png' %(STRANDBIAS)
figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,SCATTER_PLOTS,figureName)
else:
figureName = 'all_mutation_types_%s_%d_%s_scatter_plot.png' %(sample,numberofMutations,STRANDBIAS)
os.makedirs(os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS,SCATTER_PLOTS), exist_ok=True)
figureFile = os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS, SCATTER_PLOTS, figureName)
fig.savefig(figureFile)
plt.cla()
plt.close(fig)
def plot_ncomms11383_Supp_FigG_AllMutationTypes_TranscriptionLog10Ratio_ReplicationLog10Ratio(sample,numberofMutations,type2TranscriptionStrand2CountDict,type2ReplicationStrand2CountDict,outputDir,jobname):
fig = plt.figure(figsize=(8,8), facecolor=None)
plt.style.use('ggplot')
left, width = .0, 1.
bottom, height = .0, 1.
right = left + width
top = bottom + height
ax = plt.gca()
ax.set_facecolor('white')
for edge_i in ['bottom','top','left','right']:
ax.spines[edge_i].set_edgecolor("black")
ax.spines[edge_i].set_linewidth(1)
ax.spines[edge_i].set_bounds(-0.3, 0.3)
plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes)
plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes)
plt.text((right+0.02),(bottom+top-0.08), 'Transcribed',horizontalalignment='center',verticalalignment='center',rotation='vertical',transform=ax.transAxes)
plt.text((right+0.02),(bottom+0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)
if (sample is not None):
plt.title(sample, fontsize=15, fontweight='bold')
plt.xlabel('Lagging/leading replication strand\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold')
plt.ylabel('Transcribed/untranscribed strand\nratio(log10)',fontstyle='normal', fontsize=12, fontweight='bold')
plt.xlim(-0.3, 0.3)
plt.ylim(-0.3, 0.3)
plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10)
yticks = [-0.2, -0.1, 0.0, 0.1, 0.2]
yticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']
plt.yticks(yticks, yticklabels)
xticks = [-0.2, -0.1, 0.0, 0.1, 0.2]
xticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']
plt.xticks(xticks, xticklabels)
transcriptionRatiosDict = {}
replicationRatiosDict = {}
for mutationType in six_mutation_types:
if (mutationType in type2TranscriptionStrand2CountDict) and (mutationType in type2ReplicationStrand2CountDict):
if ((TRANSCRIBED_STRAND in type2TranscriptionStrand2CountDict[mutationType]) and (UNTRANSCRIBED_STRAND in type2TranscriptionStrand2CountDict[mutationType])):
transcriptionRatiosDict[mutationType]= np.log10(type2TranscriptionStrand2CountDict[mutationType][TRANSCRIBED_STRAND]/type2TranscriptionStrand2CountDict[mutationType][UNTRANSCRIBED_STRAND])
if ((LAGGING in type2ReplicationStrand2CountDict[mutationType]) and (LEADING in type2ReplicationStrand2CountDict[mutationType])):
replicationRatiosDict[mutationType] = np.log10(type2ReplicationStrand2CountDict[mutationType][LAGGING]/type2ReplicationStrand2CountDict[mutationType][LEADING])
if (mutationType in replicationRatiosDict) and (mutationType in transcriptionRatiosDict):
plt.scatter(replicationRatiosDict[mutationType],transcriptionRatiosDict[mutationType], label=mutationType)
legend = plt.legend(loc='upper left', frameon=True, fancybox =False,labels=six_mutation_types, bbox_to_anchor=(-0.0095, 1.0095))
legend.get_frame().set_linewidth(1)
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
plt.axvline(x=0.0, color='gray', linestyle='--')
plt.axhline(y=0.0, color='gray', linestyle='--')
if sample is None:
figureName = 'all_mutation_types_%s_scatter_plot.png' %(STRANDBIAS)
figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,SCATTER_PLOTS,figureName)
else:
figureName = 'all_mutation_types_%s_%d_%s_scatter_plot.png' %(sample,numberofMutations,STRANDBIAS)
os.makedirs(os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS,SCATTER_PLOTS), exist_ok=True)
figureFile = os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS, SCATTER_PLOTS, figureName)
fig.savefig(figureFile)
plt.cla()
plt.close(fig)
def plot_types_transcription_log10_ratio_replication_log10_ratio_using_dataframes(signatureType,
sample,
numberofMutations,
type_transcribed_versus_untranscribed_df,
type_lagging_versus_leading_df,
signature_cutoff_numberofmutations_averageprobability_df,
outputDir,
jobname):
fig = plt.figure(figsize=(8,8), facecolor=None)
plt.style.use('ggplot')
left, width = .0, 1.
bottom, height = .0, 1.
right = left + width
top = bottom + height
ax = plt.gca()
ax.set_facecolor('white')
for edge_i in ['bottom','top','left','right']:
ax.spines[edge_i].set_edgecolor("black")
ax.spines[edge_i].set_linewidth(1)
ax.spines[edge_i].set_bounds(-0.3, 0.3)
plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes)
plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes)
plt.text((right+0.02),(bottom+top-0.08), 'Transcribed',horizontalalignment='center',verticalalignment='center',rotation='vertical',transform=ax.transAxes)
plt.text((right+0.02),(bottom+0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)
if (sample is not None):
plt.title(sample, fontsize=15, fontweight='bold')
plt.xlabel('Lagging/leading replication strand\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold')
plt.ylabel('Transcribed/untranscribed strand\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold')
plt.xlim(-0.3, 0.3)
plt.ylim(-0.3, 0.3)
plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10)
yticks = [-0.2, -0.1, 0.0, 0.1, 0.2]
yticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']
plt.yticks(yticks, yticklabels)
xticks = [-0.2, -0.1, 0.0, 0.1, 0.2]
xticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']
plt.xticks(xticks, xticklabels)
transcriptionRatiosDict = {}
replicationRatiosDict = {}
for signature in signature_cutoff_numberofmutations_averageprobability_df['signature'].unique():
transcribed_real_count=0
untranscribed_real_count=0
if (type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type']==signature]['Transcribed_real_count'].values.size>0):
transcribed_real_count=type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type'] == signature]['Transcribed_real_count'].values[0]
if (type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type']==signature]['UnTranscribed_real_count'].values.size>0):
untranscribed_real_count=type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type'] == signature]['UnTranscribed_real_count'].values[0]
if (transcribed_real_count+untranscribed_real_count>=SUBS_STRAND_BIAS_NUMBER_OF_MUTATIONS_THRESHOLD):
transcriptionRatiosDict[signature] = np.log10(transcribed_real_count/untranscribed_real_count)
lagging_real_count=0
leading_real_count = 0
if (type_lagging_versus_leading_df[type_lagging_versus_leading_df['type']==signature]['Lagging_real_count'].values.size>0):
lagging_real_count=type_lagging_versus_leading_df[type_lagging_versus_leading_df['type']==signature]['Lagging_real_count'].values[0]
if (type_lagging_versus_leading_df[type_lagging_versus_leading_df['type']==signature]['Leading_real_count'].values.size>0):
leading_real_count=type_lagging_versus_leading_df[type_lagging_versus_leading_df['type']==signature]['Leading_real_count'].values[0]
if (lagging_real_count+leading_real_count>=SUBS_STRAND_BIAS_NUMBER_OF_MUTATIONS_THRESHOLD):
replicationRatiosDict[signature] = np.log10(lagging_real_count/leading_real_count)
if (transcriptionRatiosDict and replicationRatiosDict):
signaturesShownInLegend = []
for signature in signature_cutoff_numberofmutations_averageprobability_df['signature'].unique():
if ((signature in replicationRatiosDict.keys()) and (signature in transcriptionRatiosDict.keys())):
signaturesShownInLegend.append(signature)
plt.scatter(replicationRatiosDict[signature], transcriptionRatiosDict[signature], label=signature)
legend = plt.legend(loc='upper left', frameon=True, fancybox=False, labels=signaturesShownInLegend,
bbox_to_anchor=(-0.0095, 1.0095))
legend.get_frame().set_linewidth(1)
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
plt.axvline(x=0.0, color='gray', linestyle='--')
plt.axhline(y=0.0, color='gray', linestyle='--')
if sample is None:
figureName = 'all_%s_signatures_%s_scatter_plot.png' % (signatureType, STRANDBIAS)
figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,SCATTER_PLOTS,figureName)
else:
figureName = 'all_%s_signatures_%s_%d_%s_scatter_plot.png' % (signatureType, sample, numberofMutations, STRANDBIAS)
os.makedirs(os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS,SCATTER_PLOTS), exist_ok=True)
figureFile = os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS, SCATTER_PLOTS, figureName)
fig.savefig(figureFile)
plt.cla()
plt.close(fig)
def plot_ncomms11383_Supp_FigH_AllSignatures_TranscriptionLog10Ratio_ReplicationLog10Ratio(
signatureType,
sample,
numberofMutations,
signature2TranscriptionStrand2CountDict,
signature2ReplicationStrand2CountDict,
signature_cutoff_numberofmutations_averageprobability_df,
outputDir,
jobname):
fig = plt.figure(figsize=(8,8), facecolor=None)
plt.style.use('ggplot')
left, width = .0, 1.
bottom, height = .0, 1.
right = left + width
top = bottom + height
ax = plt.gca()
ax.set_facecolor('white')
for edge_i in ['bottom','top','left','right']:
ax.spines[edge_i].set_edgecolor("black")
ax.spines[edge_i].set_linewidth(1)
ax.spines[edge_i].set_bounds(-0.3, 0.3)
plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes)
plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes)
plt.text((right+0.02),(bottom+top-0.08), 'Transcribed',horizontalalignment='center',verticalalignment='center',rotation='vertical',transform=ax.transAxes)
plt.text((right+0.02),(bottom+0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)
if (sample is not None):
plt.title(sample, fontsize=15, fontweight='bold')
plt.xlabel('Lagging/leading replication strand\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold')
plt.ylabel('Transcribed/untranscribed strand\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold')
plt.xlim(-0.3, 0.3)
plt.ylim(-0.3, 0.3)
plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10)
yticks = [-0.2, -0.1, 0.0, 0.1, 0.2]
yticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']
plt.yticks(yticks, yticklabels)
xticks = [-0.2, -0.1, 0.0, 0.1, 0.2]
xticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']
plt.xticks(xticks, xticklabels)
transcriptionRatiosDict = {}
replicationRatiosDict = {}
for signature in signature_cutoff_numberofmutations_averageprobability_df['signature'].unique():
if ((signature in signature2TranscriptionStrand2CountDict) and (TRANSCRIBED_STRAND in (signature2TranscriptionStrand2CountDict[signature])) and
(UNTRANSCRIBED_STRAND in (signature2TranscriptionStrand2CountDict[signature])) ):
if ((signature2TranscriptionStrand2CountDict[signature][TRANSCRIBED_STRAND]+signature2TranscriptionStrand2CountDict[signature][UNTRANSCRIBED_STRAND]) >= SUBS_STRAND_BIAS_NUMBER_OF_MUTATIONS_THRESHOLD):
transcriptionRatiosDict[signature]= np.log10(signature2TranscriptionStrand2CountDict[signature][TRANSCRIBED_STRAND]/signature2TranscriptionStrand2CountDict[signature][UNTRANSCRIBED_STRAND])
if ((signature in signature2ReplicationStrand2CountDict) and (LAGGING in (signature2ReplicationStrand2CountDict[signature])) and
(LEADING in (signature2ReplicationStrand2CountDict[signature]))):
if ((signature2ReplicationStrand2CountDict[signature][LAGGING]+signature2ReplicationStrand2CountDict[signature][LEADING])>= SUBS_STRAND_BIAS_NUMBER_OF_MUTATIONS_THRESHOLD):
replicationRatiosDict[signature] = np.log10(signature2ReplicationStrand2CountDict[signature][LAGGING]/signature2ReplicationStrand2CountDict[signature][LEADING])
if (transcriptionRatiosDict and replicationRatiosDict):
signaturesShownInLegend = []
for signature in signature_cutoff_numberofmutations_averageprobability_df['signature'].unique():
if ((signature in replicationRatiosDict.keys()) and (signature in transcriptionRatiosDict.keys())):
signaturesShownInLegend.append(signature)
plt.scatter(replicationRatiosDict[signature], transcriptionRatiosDict[signature], label=signature)
legend = plt.legend(loc='upper left', frameon=True, fancybox=False, labels=signaturesShownInLegend,
bbox_to_anchor=(-0.0095, 1.0095))
legend.get_frame().set_linewidth(1)
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
plt.axvline(x=0.0, color='gray', linestyle='--')
plt.axhline(y=0.0, color='gray', linestyle='--')
if sample is None:
figureName = 'all_%s_signatures_%s_scatter_plot.png' % (signatureType, STRANDBIAS)
figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,SCATTER_PLOTS,figureName)
else:
figureName = 'all_%s_signatures_%s_%d_%s_scatter_plot.png' % (
signatureType, sample, numberofMutations, STRANDBIAS)
os.makedirs(os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS,SCATTER_PLOTS), exist_ok=True)
figureFile = os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS, SCATTER_PLOTS, figureName)
fig.savefig(figureFile)
plt.cla()
plt.close(fig)
def plot_ncomms11383_Supp_FigE_MutationTypeBased_AllSamples_TranscriptionLog10Ratio_ReplicationLog10Ratio(
type2Sample2TranscriptionStrand2CountDict,
type2Sample2ReplicationStrand2CountDict,
outputDir,
jobname,
isFigureAugmentation):
mutationType2ColorDict = {'C>A': 'blue', 'C>G':'black', 'C>T':'red', 'T>A':'gray', 'T>C':'green', 'T>G':'pink'}
transcriptionRatiosDict = {}
replicationRatiosDict = {}
for mutationType in six_mutation_types:
if mutationType not in transcriptionRatiosDict:
transcriptionRatiosDict[mutationType] = {}
if mutationType not in replicationRatiosDict:
replicationRatiosDict[mutationType] = {}
if mutationType in type2Sample2TranscriptionStrand2CountDict:
for sample in type2Sample2TranscriptionStrand2CountDict[mutationType].keys():
if ((TRANSCRIBED_STRAND in type2Sample2TranscriptionStrand2CountDict[mutationType][sample].keys()) and (UNTRANSCRIBED_STRAND in type2Sample2TranscriptionStrand2CountDict[mutationType][sample].keys())):
transcriptionRatiosDict[mutationType][sample]= np.log10(type2Sample2TranscriptionStrand2CountDict[mutationType][sample][TRANSCRIBED_STRAND]/type2Sample2TranscriptionStrand2CountDict[mutationType][sample][UNTRANSCRIBED_STRAND])
if mutationType in type2Sample2ReplicationStrand2CountDict:
for sample in type2Sample2ReplicationStrand2CountDict[mutationType].keys():
if ((LAGGING in type2Sample2ReplicationStrand2CountDict[mutationType][sample].keys()) and (LEADING in type2Sample2ReplicationStrand2CountDict[mutationType][sample].keys())):
replicationRatiosDict[mutationType][sample] = np.log10(type2Sample2ReplicationStrand2CountDict[mutationType][sample][LAGGING]/type2Sample2ReplicationStrand2CountDict[mutationType][sample][LEADING])
for mutationType in six_mutation_types:
fig = plt.figure(figsize=(8, 8), facecolor=None)
plt.style.use('ggplot')
left, width = .0, 1.
bottom, height = .0, 1.
right = left + width
top = bottom + height
ax = plt.gca()
ax.set_facecolor('white')
for edge_i in ['bottom', 'top', 'left', 'right']:
ax.spines[edge_i].set_edgecolor("black")
ax.spines[edge_i].set_linewidth(1)
ax.spines[edge_i].set_bounds(-0.65, 0.65)
plt.title(mutationType, fontsize=15, fontweight='bold')
plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes)
plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes)
plt.text((right + 0.02), (bottom + top - 0.08), 'Transcribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)
plt.text((right + 0.02), (bottom + 0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)
plt.xlabel('Lagging/leading replication strand\nratio(log10)', fontstyle='normal', fontsize=12,fontweight='bold')
plt.ylabel('Transcribed/untranscribed strand\nratio(log10)', fontstyle='normal', fontsize=12,fontweight='bold')
plt.xlim(-0.65, 0.65)
plt.ylim(-0.65, 0.65)
plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10)
yticks = [-0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6]
yticklabels = ['-0.6', '-0.4', '-0.2', '0.0', '0.2', '0.4', '0.6']
plt.yticks(yticks, yticklabels)
xticks = [-0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6]
xticklabels = ['-0.6', '-0.4', '-0.2', '0.0', '0.2', '0.4', '0.6']
plt.xticks(xticks, xticklabels)
if (mutationType in type2Sample2TranscriptionStrand2CountDict):
for sample in type2Sample2TranscriptionStrand2CountDict[mutationType].keys():
if ((sample in replicationRatiosDict[mutationType].keys()) and (sample in transcriptionRatiosDict[mutationType].keys())):
plt.scatter(replicationRatiosDict[mutationType][sample],transcriptionRatiosDict[mutationType][sample], facecolor='none', color=mutationType2ColorDict[mutationType])
plt.axvline(x=0.0, color='gray', linestyle='--')
plt.axhline(y=0.0, color='gray', linestyle='--')
if (isFigureAugmentation):
plt.title(jobname + ' ' + mutationType)
newMutationType = mutationType.replace('>', '2')
figureName = newMutationType + '_MutationType_' + STRANDBIAS + '.png'
figureFile = os.path.join(outputDir,jobname,FIGURE,STRANDBIAS,SCATTER_PLOTS,figureName)
fig.savefig(figureFile)
plt.cla()
plt.close(fig)
def plot_ncomms11383_Supp_FigF_SignatureBased_AllSamples_TranscriptionLog10Ratio_ReplicationLog10Ratio(type2Sample2TranscriptionStrand2CountDict,type2Sample2ReplicationStrand2CountDict,signatures,outputDir,jobname,isFigureAugmentation):
transcriptionRatiosDict = {}
replicationRatiosDict = {}
for signature in signatures:
if signature not in transcriptionRatiosDict:
transcriptionRatiosDict[signature] = {}
if signature not in replicationRatiosDict:
replicationRatiosDict[signature] = {}
if signature in type2Sample2TranscriptionStrand2CountDict:
for sample in type2Sample2TranscriptionStrand2CountDict[signature].keys():
if (UNTRANSCRIBED_STRAND in type2Sample2TranscriptionStrand2CountDict[signature][sample]) and (TRANSCRIBED_STRAND in type2Sample2TranscriptionStrand2CountDict[signature][sample]):
transcriptionRatiosDict[signature][sample] = np.log10(type2Sample2TranscriptionStrand2CountDict[signature][sample][TRANSCRIBED_STRAND] /type2Sample2TranscriptionStrand2CountDict[signature][sample][UNTRANSCRIBED_STRAND])
if signature in type2Sample2ReplicationStrand2CountDict:
for sample in type2Sample2ReplicationStrand2CountDict[signature].keys():
if (LAGGING in type2Sample2ReplicationStrand2CountDict[signature][sample]) and (LEADING in type2Sample2ReplicationStrand2CountDict[signature][sample]):
replicationRatiosDict[signature][sample] = np.log10(type2Sample2ReplicationStrand2CountDict[signature][sample][LAGGING] /type2Sample2ReplicationStrand2CountDict[signature][sample][LEADING])
for signature in signatures:
if (len(replicationRatiosDict[signature].keys())>0 and len(transcriptionRatiosDict[signature].keys())>0):
fig = plt.figure(figsize=(8, 8), facecolor=None)
plt.style.use('ggplot')
left, width = .0, 1.
bottom, height = .0, 1.
right = left + width
top = bottom + height
ax = plt.gca()
ax.set_facecolor('white')
for edge_i in ['bottom', 'top', 'left', 'right']:
ax.spines[edge_i].set_edgecolor("black")
ax.spines[edge_i].set_linewidth(1)
ax.spines[edge_i].set_bounds(-0.65, 0.65)
plt.title(signature, fontsize=15, fontweight='bold')
plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes)
plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes)
plt.text((right + 0.02), (bottom + top - 0.08), 'Transcribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)
plt.text((right + 0.02), (bottom + 0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)
plt.xlabel('Lagging/leading replication strand\nratio(log10)', fontstyle='normal', fontsize=12,fontweight='bold')
plt.ylabel('Transcribed/untranscribed strand\nratio(log10)', fontstyle='normal', fontsize=12,fontweight='bold')
plt.xlim(-0.65, 0.65)
plt.ylim(-0.65, 0.65)
plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10)
yticks = [-0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6]
yticklabels = ['-0.6', '-0.4', '-0.2', '0.0', '0.2', '0.4', '0.6']
plt.yticks(yticks, yticklabels)
xticks = [-0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6]
xticklabels = ['-0.6', '-0.4', '-0.2', '0.0', '0.2', '0.4', '0.6']
plt.xticks(xticks, xticklabels)
for sample in type2Sample2TranscriptionStrand2CountDict[signature].keys():
if (sample in replicationRatiosDict[signature]) and (sample in transcriptionRatiosDict[signature]):
plt.scatter(replicationRatiosDict[signature][sample], transcriptionRatiosDict[signature][sample],facecolor='none',color='green')
plt.axvline(x=0.0, color='gray', linestyle='--')
plt.axhline(y=0.0, color='gray', linestyle='--')
if (isFigureAugmentation):
plt.title(jobname + ' ' + signature)
figureName = signature.replace(' ','') + '_Signature_' + STRANDBIAS + '.png'
figureFile = os.path.join(outputDir,jobname,FIGURE,STRANDBIAS,SCATTER_PLOTS,figureName)
fig.savefig(figureFile)
plt.cla()
plt.close(fig)
def is_there_at_least_10perc_diff(strand1_value, strand2_value):
diff = abs(strand1_value - strand2_value)
if (diff >= (strand1_value/10)) or (diff >= (strand2_value/10)):
return True
else:
return False
def plotStrandBiasFigureWithBarPlots(outputDir,
jobname,
numberofSimulations,
key,
isKeySample,
numberofMutations,
N,
x_axis_labels,
strand1_values,
strand2_values,
strand1_simulations_median_values,
strand2_simulations_median_values,
fdr_bh_adjusted_pvalues,
strand1Name,
strand2Name,
mutationsOrSignatures,
color1,
color2,
figureName,
width,
plot_mode):
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
ind = np.arange(N)
fig, ax = plt.subplots(figsize=(16,10),dpi=300)
legend=None
rects1=None
rects2=None
rects3=None
rects4=None
rects1 = ax.bar(ind, strand1_values, width=width, edgecolor='black', color=color1)
rects2 = ax.bar(ind + width, strand2_values, width=width, edgecolor='black', color=color2)
if ((strand1_simulations_median_values is not None) and strand1_simulations_median_values):
rects3 = ax.bar(ind+ 2*width, strand1_simulations_median_values, width=width, edgecolor='black', color=color1, hatch = '///')
if ((strand2_simulations_median_values is not None) and strand2_simulations_median_values):
rects4 = ax.bar(ind +3*width, strand2_simulations_median_values, width=width, edgecolor='black', color=color2, hatch = '///')
if plot_mode==PLOTTING_FOR_SIGPROFILERTOPOGRAPHY_TOOL:
ax.tick_params(axis='x', labelsize=35)
ax.tick_params(axis='y', labelsize=35)
locs, labels = plt.yticks()
ax.set_ylim(0, locs[-1] + 5000)
if len(ind) < 6:
maxn = 6
ax.set_xlim(-0.5, maxn - 0.5)
if key is not None:
ax.set_title('%s %s vs. %s %s' %(key,strand1Name,strand2Name,mutationsOrSignatures), fontsize=20,fontweight='bold')
else:
ax.set_title('%s vs. %s %s' %(strand1Name,strand2Name,mutationsOrSignatures), fontsize=20,fontweight='bold')
if len(x_axis_labels) > 6:
ax.set_xticklabels(x_axis_labels, fontsize=35, rotation=90)
else:
ax.set_xticklabels(x_axis_labels, fontsize=35)
plt.ylabel('Number of single base substitutions', fontsize=35, fontweight='normal')
if (numberofSimulations > 0):
ax.set_xticks(ind + (3 * width) / 2)
realStrand1Name = 'Real %s' % (strand1Name)
realStrand2Name = 'Real %s' % (strand2Name)
simulationsStrand1Name = 'Simulated %s' % (strand1Name)
simulationsStrand2Name = 'Simulated %s' % (strand2Name)
if ((rects1 is not None) and (rects2 is not None) and (rects3 is not None) and (rects4 is not None)):
if ((len(rects1) > 0) and (len(rects2) > 0) and (len(rects3) > 0) and (len(rects4) > 0)):
legend = ax.legend((rects1[0], rects2[0], rects3[0], rects4[0]),(realStrand1Name, realStrand2Name, simulationsStrand1Name, simulationsStrand2Name),prop={'size': 25}, ncol=1, loc='best')
else:
ax.set_xticks(ind + width / 2)
if ((rects1 is not None) and (rects2 is not None)):
if ((len(rects1) > 0) and (len(rects2) > 0)):
legend = ax.legend((rects1[0], rects2[0]), (strand1Name, strand2Name), prop={'size': 25}, ncol=1, loc='upper right')
elif plot_mode == PLOTTING_FOR_SIGPROFILERTOPOGRAPHY_MANUSCRIPT:
ax.tick_params(axis='x', which='both', length=0)
ax.tick_params(axis='y', which='both', length=0)
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
if (numberofSimulations > 0):
realStrand1Name = 'Real %s' % (strand1Name)
realStrand2Name = 'Real %s' % (strand2Name)
simulationsStrand1Name = 'Simulated %s' % (strand1Name)
simulationsStrand2Name = 'Simulated %s' % (strand2Name)
if ((rects1 is not None) and (rects2 is not None) and (rects3 is not None) and (rects4 is not None)):
if ((len(rects1) > 0) and (len(rects2) > 0) and (len(rects3) > 0) and (len(rects4) > 0)):
legend = ax.legend((rects1[0], rects2[0], rects3[0], rects4[0]),(realStrand1Name, realStrand2Name, simulationsStrand1Name, simulationsStrand2Name),prop={'size': 30}, ncol=1, loc='best')
else:
if ((rects1 is not None) and (rects2 is not None)):
if ((len(rects1) > 0) and (len(rects2) > 0)):
legend = ax.legend((rects1[0], rects2[0]), (strand1Name, strand2Name), prop={'size': 35},loc='upper right')
ax.set_facecolor('white')
ax.spines["bottom"].set_color('black')
ax.spines["left"].set_color('black')
ax.spines["top"].set_color('black')
ax.spines["right"].set_color('black')
if (legend is not None):
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
if fdr_bh_adjusted_pvalues is not None:
for fdr_bh_adjusted_pvalue, strand1_value, strand2_value, rect1, rect2 in zip(fdr_bh_adjusted_pvalues, strand1_values, strand2_values, rects1, rects2):
y_value = max(rect1.get_height(),rect2.get_height())
x_value = rect1.get_x() + rect1.get_width()
space = 3
va = 'bottom'
if y_value < 0:
space *= -1
va = 'top'
label = "{:.1f}".format(y_value)
if ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.0001) and (is_there_at_least_10perc_diff(strand1_value, strand2_value))):
plt.annotate(
'***', (x_value, y_value), xytext=(0, space), textcoords="offset points", ha='center', va=va,
fontsize=20)
elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.001) and (is_there_at_least_10perc_diff(strand1_value, strand2_value))):
plt.annotate(
'**', (x_value, y_value), xytext=(0, space), textcoords="offset points", ha='center', va=va,
fontsize=20)
elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= SIGNIFICANCE_LEVEL) and (is_there_at_least_10perc_diff(strand1_value, strand2_value))):
plt.annotate(
'*', (x_value, y_value), xytext=(0, space), textcoords="offset points", ha='center', va=va,
fontsize=20)
if (key is None):
figureName = '%s_bar_plot.png' %(figureName)
figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS, BAR_PLOTS, figureName)
elif (not isKeySample):
figureName = '%s_%s_bar_plot.png' %(key,figureName)
figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS, BAR_PLOTS, figureName)
else:
figureName = '%s_%s_%d_bar_plot.png' %(figureName,key,numberofMutations)
os.makedirs(os.path.join(outputDir, jobname, FIGURE, SAMPLES, key, STRANDBIAS, BAR_PLOTS), exist_ok=True)
figureFile = os.path.join(outputDir, jobname, FIGURE, SAMPLES, key, STRANDBIAS, BAR_PLOTS, figureName)
fig.savefig(figureFile)
plt.cla()
plt.close(fig)
def plot_circle_plot_in_given_axis(ax,
percentage_strings,
sbs_signature,
six_mutation_types,
xticklabels_list,
signature2mutation_type2strand2percentagedict):
strand_bias_list=[LAGGING_VERSUS_LEADING, TRANSCRIBED_VERSUS_UNTRANSCRIBED, GENIC_VERSUS_INTERGENIC]
ax.set_aspect(1.0)
title = '%s Strand Bias' %(sbs_signature)
ax.text(len(percentage_strings) * 3, len(strand_bias_list) + 2.5, title, horizontalalignment='center',fontsize=60, fontweight='bold', fontname='Arial')
colors = [[3 / 256, 189 / 256, 239 / 256],
[1 / 256, 1 / 256, 1 / 256],
[228 / 256, 41 / 256, 38 / 256],
[203 / 256, 202 / 256, 202 / 256],
[162 / 256, 207 / 256, 99 / 256],
[236 / 256, 199 / 256, 197 / 256]]
x = 0
for i in range(0, len(six_mutation_types), 1):
ax.text((x + (len(percentage_strings) / 2) - 0.75), len(strand_bias_list) + 1.5, six_mutation_types[i],fontsize=55, fontweight='bold', fontname='Arial')
ax.add_patch(plt.Rectangle((x + .0415, len(strand_bias_list) + 0.75), len(percentage_strings) - (2 * .0415), .5,facecolor=colors[i], clip_on=False))
ax.add_patch(plt.Rectangle((x, 0), len(percentage_strings), len(strand_bias_list), facecolor=colors[i], zorder=0,alpha=0.25, edgecolor='grey'))
x += len(percentage_strings)
ax.set_xlim([0, len(six_mutation_types) * len(percentage_strings)])
ax.set_xticklabels([])
ax.tick_params(axis='x', which='minor', length=0, labelsize=35)
ax.set_xticks(np.arange(0, len(six_mutation_types) * len(percentage_strings), 1))
ax.set_xticks(np.arange(0, len(six_mutation_types) * len(percentage_strings), 1) + 0.5, minor=True)
ax.set_xticklabels(xticklabels_list, minor=True)
ax.xaxis.set_label_position('top')
ax.xaxis.set_ticks_position('top')
ax.tick_params(
axis='x', which='major', bottom=False, top=False)
ax.set_ylim([0, len(strand_bias_list)])
ax.set_yticklabels([])
ax.tick_params(axis='y', which='minor', length=0, labelsize=40)
ax.set_yticks(np.arange(0, len(strand_bias_list), 1))
ax.set_yticks(np.arange(0, len(strand_bias_list), 1) + 0.5, minor=True)
ax.set_yticklabels(['', sbs_signature,''], minor=True)
ax.tick_params(
axis='y', which='major', left=False)
ax.grid(which='major', color='black', zorder=3)
legend_elements = [
Line2D([0], [0], marker='o', color='white', label=GENIC, markerfacecolor='cyan', markersize=40),
Line2D([0], [0], marker='o', color='white', label=INTERGENIC, markerfacecolor='gray', markersize=40),
Line2D([0], [0], marker='o', color='white', label=TRANSCRIBED_STRAND, markerfacecolor='royalblue',markersize=40),
Line2D([0], [0], marker='o', color='white', label=UNTRANSCRIBED_STRAND, markerfacecolor='yellowgreen',markersize=40),
Line2D([0], [0], marker='o', color='white', label=LAGGING, markerfacecolor='indianred', markersize=40),
Line2D([0], [0], marker='o', color='white', label=LEADING, markerfacecolor='goldenrod', markersize=40)]
legend = ax.legend(handles=legend_elements, ncol=len(legend_elements), bbox_to_anchor=(0.5, 0), loc='upper center',fontsize=40)
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
for percentage_diff_index, percentage_string in enumerate(percentage_strings):
for mutation_type_index, mutation_type in enumerate(six_mutation_types):
for strand_bias_index, strand_bias in enumerate(strand_bias_list):
if (strand_bias == LAGGING_VERSUS_LEADING):
if sbs_signature in signature2mutation_type2strand2percentagedict:
if mutation_type in signature2mutation_type2strand2percentagedict[sbs_signature]:
lagging_percentage = None
leading_percentage = None
if (LAGGING in signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type]) and (
signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][LAGGING][percentage_string] == 1):
lagging_percentage = 100
if (LEADING in signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type]) and (
signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][LEADING][percentage_string] == 1):
leading_percentage = 100
if (lagging_percentage is not None) and (leading_percentage is None):
radius = 0.49
if (radius > 0):
ax.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,strand_bias_index + 0.5), radius, color='indianred', fill=True))
elif (leading_percentage is not None) and (lagging_percentage is None):
radius = 0.49
if (radius > 0):
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius,
color='goldenrod', fill=True))
elif (lagging_percentage is not None) and (leading_percentage is not None):
radius_lagging = 0.49
radius_leading = 0.49
if (radius_lagging > radius_leading):
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_lagging,
color='indianred', fill=True))
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_leading,
color='goldenrod', fill=True))
else:
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_leading,
color='goldenrod', fill=True))
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_lagging,
color='indianred', fill=True))
elif (strand_bias == GENIC_VERSUS_INTERGENIC):
if sbs_signature in signature2mutation_type2strand2percentagedict:
if mutation_type in signature2mutation_type2strand2percentagedict[sbs_signature]:
genic_percentage = None
intergenic_percentage = None
if (GENIC in signature2mutation_type2strand2percentagedict[sbs_signature][
mutation_type]) and (
signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][
GENIC][percentage_string] == 1):
genic_percentage = 100
if (INTERGENIC in signature2mutation_type2strand2percentagedict[sbs_signature][
mutation_type]) and (
signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][
INTERGENIC][percentage_string] == 1):
intergenic_percentage = 100
if (genic_percentage is not None) and (intergenic_percentage is None):
radius = 0.49
if (radius > 0):
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius, color='cyan',
fill=True))
elif (intergenic_percentage is not None) and (genic_percentage is None):
radius = 0.49
if (radius > 0):
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius, color='gray',
fill=True))
elif (genic_percentage is not None) and (intergenic_percentage is not None):
radius_genic = 0.49
radius_intergenic = 0.49
if (radius_genic > radius_intergenic):
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_genic,
color='cyan', fill=True))
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_intergenic,
color='gray', fill=True))
else:
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_intergenic,
color='gray', fill=True))
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_genic,
color='cyan', fill=True))
elif (strand_bias == TRANSCRIBED_VERSUS_UNTRANSCRIBED):
if sbs_signature in signature2mutation_type2strand2percentagedict:
if mutation_type in signature2mutation_type2strand2percentagedict[sbs_signature]:
transcribed_percentage = None
untranscribed_percentage = None
if (TRANSCRIBED_STRAND in signature2mutation_type2strand2percentagedict[sbs_signature][
mutation_type]) and (
signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][
TRANSCRIBED_STRAND][percentage_string] == 1):
transcribed_percentage = 100
if (UNTRANSCRIBED_STRAND in
signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type]) and (
signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][
UNTRANSCRIBED_STRAND][percentage_string] == 1):
untranscribed_percentage = 100
if (transcribed_percentage is not None) and (untranscribed_percentage is None):
radius = 0.49
if (radius > 0):
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius,
color='royalblue', fill=True))
elif (untranscribed_percentage is not None) and (transcribed_percentage is None):
radius = 0.49
if (radius > 0):
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius,
color='yellowgreen', fill=True))
elif (transcribed_percentage is not None) and (untranscribed_percentage is not None):
radius_transcribed = 0.49
radius_untranscribed = 0.49
if (radius_transcribed > radius_untranscribed):
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_transcribed,
color='royalblue', fill=True))
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_untranscribed,
color='yellowgreen', fill=True))
else:
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_untranscribed,
color='yellowgreen', fill=True))
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_transcribed,
color='royalblue', fill=True))
def plot_strand_bias_figure_with_bar_plots(strand_bias,
strandbias_figures_outputDir,
numberofSimulations,
signature,
N,
x_axis_tick_labels,
y_axis_label,
strand1_values,
strand2_values,
strand1_simulations_median_values,
strand2_simulations_median_values,
fdr_bh_adjusted_pvalues,
strand1Name,
strand2Name,
color1,
color2,
width,
axis_given=None):
ind = np.arange(N)
if axis_given == None:
fig, ax = plt.subplots(figsize=(16,10),dpi=100)
else:
ax = axis_given
legend = None
rects3 = None
rects4 = None
rects1 = ax.bar(ind, strand1_values, width=width, edgecolor='black', color=color1)
rects2 = ax.bar(ind + width, strand2_values, width=width, edgecolor='black', color=color2)
if ((strand1_simulations_median_values is not None) and strand1_simulations_median_values):
rects3 = ax.bar(ind+ 2*width, strand1_simulations_median_values, width=width, edgecolor='black', color=color1, hatch = '///')
if ((strand2_simulations_median_values is not None) and strand2_simulations_median_values):
rects4 = ax.bar(ind + 3*width, strand2_simulations_median_values, width=width, edgecolor='black', color=color2, hatch = '///')
ax.tick_params(axis='x', labelsize=35)
ax.tick_params(axis='y', labelsize=35)
ymax = np.nanmax([np.nanmax(strand1_values),
np.nanmax(strand2_values),
np.nanmax(strand1_simulations_median_values),
np.nanmax(strand2_simulations_median_values)])
y = ymax / 1.025
ytick_offest = float(y / 3)
ylabs = [0, ytick_offest, ytick_offest * 2, ytick_offest * 3, ytick_offest * 4]
ylabels = [0, ytick_offest, ytick_offest * 2, ytick_offest * 3, ytick_offest * 4]
ylabels = ['{:,}'.format(int(x)) for x in ylabels]
if len(ylabels[-1]) > 3:
ylabels_temp = []
if len(ylabels[-1]) > 7:
for label in ylabels:
if len(label) > 7:
ylabels_temp.append(label[0:-8] + "m")
elif len(label) > 3:
ylabels_temp.append(label[0:-4] + "k")
else:
ylabels_temp.append(label)
else:
for label in ylabels:
if len(label) > 3:
ylabels_temp.append(label[0:-4] + "k")
else:
ylabels_temp.append(label)
ylabels = ylabels_temp
ax.set_ylim([0, y])
ax.set_yticks(ylabs)
ax.set_yticklabels(ylabels, fontsize=35, fontweight='bold', fontname='Arial')
if len(ind) < 6:
maxn = 6
ax.set_xlim(-0.5, maxn - 0.5)
ax.set_title('%s vs. %s' %(strand1Name,strand2Name), fontsize=40, fontweight='bold')
if len(x_axis_tick_labels) > 6:
ax.set_xticklabels(x_axis_tick_labels, fontsize=35, rotation=90)
else:
ax.set_xticklabels(x_axis_tick_labels, fontsize=35)
if y_axis_label:
ax.set_ylabel(y_axis_label, fontsize=35, fontweight='normal', labelpad=15)
if (numberofSimulations > 0):
ax.set_xticks(ind + (3 * width) / 2)
realStrand1Name = 'Real %s' % (strand1Name)
realStrand2Name = 'Real %s' % (strand2Name)
simulationsStrand1Name = 'Simulated %s' % (strand1Name)
simulationsStrand2Name = 'Simulated %s' % (strand2Name)
if ((rects1 is not None) and (rects2 is not None) and (rects3 is not None) and (rects4 is not None)):
if ((len(rects1) > 0) and (len(rects2) > 0) and (len(rects3) > 0) and (len(rects4) > 0)):
legend = ax.legend((rects1[0], rects2[0], rects3[0], rects4[0]),
(realStrand1Name, realStrand2Name, simulationsStrand1Name, simulationsStrand2Name),prop={'size': 25}, ncol=1, loc='best')
else:
ax.set_xticks(ind + width / 2)
if ((rects1 is not None) and (rects2 is not None)):
if ((len(rects1) > 0) and (len(rects2) > 0)):
legend = ax.legend((rects1[0], rects2[0]), (strand1Name, strand2Name), prop={'size': 25}, ncol=1, loc='upper right')
ax.set_facecolor('white')
ax.spines["bottom"].set_color('black')
ax.spines["left"].set_color('black')
ax.spines["top"].set_color('black')
ax.spines["right"].set_color('black')
if (legend is not None):
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
if fdr_bh_adjusted_pvalues is not None:
for fdr_bh_adjusted_pvalue, strand1_value, strand2_value, rect1, rect2 in zip(fdr_bh_adjusted_pvalues, strand1_values, strand2_values, rects1, rects2):
y_value = max(rect1.get_height(),rect2.get_height())
x_value = rect1.get_x() + rect1.get_width()
space = 3
va = 'bottom'
if y_value < 0:
space *= -1
va = 'top'
label = "{:.1f}".format(y_value)
if ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.0001) and is_there_at_least_10perc_diff(strand1_value, strand2_value)):
ax.annotate(
'***', (x_value, y_value), xytext=(0, space), textcoords="offset points", ha='center', va=va,
fontsize=25)
elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.001) and is_there_at_least_10perc_diff(strand1_value, strand2_value)):
ax.annotate(
'**', (x_value, y_value), xytext=(0, space), textcoords="offset points", ha='center', va=va,
fontsize=25)
elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= SIGNIFICANCE_LEVEL) and is_there_at_least_10perc_diff(strand1_value, strand2_value)) :
ax.annotate(
'*', (x_value, y_value), xytext=(0, space), textcoords="offset points", ha='center', va=va,
fontsize=25)
if axis_given == None:
filename = '%s_%s_with_bars.png' %(signature,strand_bias)
figFile = os.path.join(strandbias_figures_outputDir, filename)
fig.savefig(figFile, dpi=100, bbox_inches="tight")
plt.cla()
plt.close(fig)
def plot_bar_plot_in_given_axis(axis,
sbs_signature,
strand_bias,
strands_list,
signature_strand1_versus_strand2_df,
y_axis_label = None):
box = axis.get_position()
axis.set_position([box.x0, box.y0 + 0.125, box.width * 1, box.height * 1], which='both')
mutation_types = six_mutation_types
numberofSimulations = 100
width = 0.20
if strand_bias == LAGGING_VERSUS_LEADING:
strands = strands_list
strand1 = "Lagging_real_count"
strand2 = "Leading_real_count"
strand1_sims = "Lagging_mean_sims_count"
strand2_sims = "Leading_mean_sims_count"
q_value_column_name = "lagging_versus_leading_q_value"
color1 = 'indianred'
color2 = 'goldenrod'
elif strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED:
strands = strands_list
strand1 = "Transcribed_real_count"
strand2 = "UnTranscribed_real_count"
strand1_sims = "Transcribed_mean_sims_count"
strand2_sims = "UnTranscribed_mean_sims_count"
q_value_column_name = "transcribed_versus_untranscribed_q_value"
color1 = 'royalblue'
color2 = 'yellowgreen'
elif strand_bias == GENIC_VERSUS_INTERGENIC:
strands = strands_list
strand1 = "genic_real_count"
strand2 = "intergenic_real_count"
strand1_sims = "genic_mean_sims_count"
strand2_sims = "intergenic_mean_sims_count"
q_value_column_name = "genic_versus_intergenic_q_value"
color1 = 'cyan'
color2 = 'gray'
groupby_df = signature_strand1_versus_strand2_df.groupby(['signature'])
group_df = groupby_df.get_group(sbs_signature)
mutationtype_strand1_real_list = []
mutationtype_strand2_real_list = []
mutationtype_strand1_sims_mean_list = []
mutationtype_strand2_sims_mean_list = []
mutationtype_FDR_BH_adjusted_pvalues_list = []
for mutation_type in six_mutation_types:
strand1_real_count=group_df[group_df['mutation_type'] == mutation_type][strand1].values[0]
strand2_real_count=group_df[group_df['mutation_type'] == mutation_type][strand2].values[0]
strand1_sims_count=group_df[group_df['mutation_type'] == mutation_type][strand1_sims].values[0]
strand2_sims_count=group_df[group_df['mutation_type'] == mutation_type][strand2_sims].values[0]
q_value=group_df[group_df['mutation_type'] == mutation_type][q_value_column_name].values[0]
mutationtype_strand1_real_list.append(strand1_real_count)
mutationtype_strand2_real_list.append(strand2_real_count)
mutationtype_strand1_sims_mean_list.append(strand1_sims_count)
mutationtype_strand2_sims_mean_list.append(strand2_sims_count)
mutationtype_FDR_BH_adjusted_pvalues_list.append(q_value)
plot_strand_bias_figure_with_bar_plots(strand_bias,
None,
numberofSimulations,
sbs_signature,
len(mutation_types),
mutation_types,
y_axis_label,
mutationtype_strand1_real_list,
mutationtype_strand2_real_list,
mutationtype_strand1_sims_mean_list,
mutationtype_strand2_sims_mean_list,
mutationtype_FDR_BH_adjusted_pvalues_list,
strands[0],
strands[1],
color1,
color2,
width,
axis_given = axis)
def plot_strand_bias_figure_with_stacked_bar_plots(strand_bias,
strandbias_figures_outputDir,
numberofSimulations,
signature,
N,
x_axis_tick_labels,
y_axis_label,
strand1_values,
strand2_values,
strand1_simulations_median_values,
strand2_simulations_median_values,
fdr_bh_adjusted_pvalues,
strand1Name,
strand2Name,
color1,
color2,
width,
axis_given=None):
strand1_values = [0 if np.isnan(x) else x for x in strand1_values]
strand2_values = [0 if np.isnan(x) else x for x in strand2_values]
strand1_simulations_median_values = [0 if np.isnan(x) else x for x in strand1_simulations_median_values]
strand2_simulations_median_values = [0 if np.isnan(x) else x for x in strand2_simulations_median_values]
odds_real_list = []
odds_sims_list = []
for a, b in zip(strand1_values, strand2_values):
odds_real = np.nan
if b>0:
odds_real = a/b
odds_real_list.append(odds_real)
for x, y in zip(strand1_simulations_median_values, strand2_simulations_median_values):
odds_sims = np.nan
if y > 0:
odds_sims = x/y
odds_sims_list.append(odds_sims)
odds_ratio_list = [odds_real/odds_sims if odds_sims>0 else np.nan for (odds_real, odds_sims) in zip(odds_real_list,odds_sims_list)]
ind = np.arange(N)
if axis_given == None:
fig, ax = plt.subplots(figsize=(16,10),dpi=100)
else:
ax = axis_given
legend=None
rects1 = ax.bar(ind, strand1_values, width=width, edgecolor='black', color=color1)
rects2 = ax.bar(ind, strand2_values, width=width, edgecolor='black', color=color2, bottom=strand1_values)
if ((strand1_simulations_median_values is not None) and strand1_simulations_median_values):
ax.bar(ind + width, strand1_simulations_median_values, width=width, edgecolor='black', color=color1, hatch = '///')
if ((strand2_simulations_median_values is not None) and strand2_simulations_median_values):
ax.bar(ind + width, strand2_simulations_median_values, width=width, edgecolor='black', color=color2, hatch = '///', bottom=strand1_simulations_median_values)
ax.tick_params(axis='x', labelsize=35)
ax.tick_params(axis='y', labelsize=35)
ax.set_ylim(0, 1.1)
ax.set_yticklabels([0.0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=35)
if len(ind) < 6:
maxn = 6
ax.set_xlim(-0.5, maxn - 0.5)
stacked_bar_title = 'Real vs. Simulated\nOdds Ratio of %s vs. %s' %(strand1Name, strand2Name)
ax.set_title(stacked_bar_title, fontsize=40, fontweight='bold')
if len(x_axis_tick_labels) > 6:
ax.set_xticklabels(x_axis_tick_labels, fontsize=35, rotation=90)
else:
ax.set_xticklabels(x_axis_tick_labels, fontsize=35)
if y_axis_label:
ax.set_ylabel(y_axis_label, fontsize=35, fontweight='normal', labelpad=15)
if (numberofSimulations > 0):
ax.set_xticks(ind + (width/2))
else:
ax.set_xticks(ind + width / 2)
ax.set_facecolor('white')
ax.spines["bottom"].set_color('black')
ax.spines["left"].set_color('black')
ax.spines["top"].set_color('black')
ax.spines["right"].set_color('black')
if (legend is not None):
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
if odds_ratio_list is not None:
for odds_ratio, fdr_bh_adjusted_pvalue, strand1_value, strand2_value, rect1, rect2 in zip(odds_ratio_list, fdr_bh_adjusted_pvalues, strand1_values, strand2_values, rects1, rects2):
y_value = rect1.get_height() + rect2.get_height()
x_value = rect1.get_x() + rect1.get_width()
space = 3
va = 'bottom'
if y_value < 0:
space *= -1
va = 'top'
label = "{:.1f}".format(y_value)
if not np.isnan(odds_ratio):
if ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.0001) and is_there_at_least_10perc_diff(strand1_value, strand2_value)):
ax.annotate(
'%.2f ***' %(odds_ratio), (x_value, y_value), xytext=(0, space), textcoords="offset points", ha='center', va=va,
fontsize=25)
elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.001) and is_there_at_least_10perc_diff(strand1_value, strand2_value)):
ax.annotate(
'%.2f **' %(odds_ratio), (x_value, y_value), xytext=(0, space), textcoords="offset points", ha='center', va=va,
fontsize=25)
elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= SIGNIFICANCE_LEVEL) and is_there_at_least_10perc_diff(strand1_value, strand2_value)):
ax.annotate(
'%.2f *' %(odds_ratio), (x_value, y_value), xytext=(0, space), textcoords="offset points", ha='center', va=va,
fontsize=25) else:
ax.annotate(
'%.2f' %(odds_ratio), (x_value, y_value), xytext=(0, space), textcoords="offset points", ha='center', va=va,
fontsize=25)
if axis_given==None:
filename = '%s_%s_with_bars.png' %(signature,strand_bias)
figFile = os.path.join(strandbias_figures_outputDir, filename)
fig.savefig(figFile, dpi=100, bbox_inches="tight")
plt.cla()
plt.close(fig)
def plot_stacked_bar_plot_in_given_axis(axis,
sbs_signature,
strand_bias,
strands_list,
signature_strand1_versus_strand2_df,
y_axis_label = None):
box = axis.get_position()
axis.set_position([box.x0, box.y0+0.125, box.width * 1, box.height * 1], which='both')
mutation_types = six_mutation_types
numberofSimulations = 100
width = 0.20
if strand_bias == LAGGING_VERSUS_LEADING:
strands = strands_list
strand1 = "Lagging_real_count"
strand2 = "Leading_real_count"
strand1_sims = "Lagging_mean_sims_count"
strand2_sims = "Leading_mean_sims_count"
q_value_column_name = "lagging_versus_leading_q_value"
color1 = 'indianred'
color2 = 'goldenrod'
elif strand_bias == TRANSCRIBED_VERSUS_UNTRANSCRIBED:
strands = strands_list
strand1 = "Transcribed_real_count"
strand2 = "UnTranscribed_real_count"
strand1_sims = "Transcribed_mean_sims_count"
strand2_sims = "UnTranscribed_mean_sims_count"
q_value_column_name = "transcribed_versus_untranscribed_q_value"
color1 = 'royalblue'
color2 = 'yellowgreen'
elif strand_bias == GENIC_VERSUS_INTERGENIC:
strands = strands_list
strand1 = "genic_real_count"
strand2 = "intergenic_real_count"
strand1_sims = "genic_mean_sims_count"
strand2_sims = "intergenic_mean_sims_count"
q_value_column_name = "genic_versus_intergenic_q_value"
color1 = 'cyan'
color2 = 'gray'
groupby_df = signature_strand1_versus_strand2_df.groupby(['signature'])
group_df = groupby_df.get_group(sbs_signature)
mutationtype_strand1_real_list = []
mutationtype_strand2_real_list = []
mutationtype_strand1_sims_mean_list = []
mutationtype_strand2_sims_mean_list = []
mutationtype_FDR_BH_adjusted_pvalues_list = []
for mutation_type in six_mutation_types:
strand1_real_count=group_df[group_df['mutation_type'] == mutation_type][strand1].values[0]
strand2_real_count=group_df[group_df['mutation_type'] == mutation_type][strand2].values[0]
strand1_sims_count=group_df[group_df['mutation_type'] == mutation_type][strand1_sims].values[0]
strand2_sims_count=group_df[group_df['mutation_type'] == mutation_type][strand2_sims].values[0]
q_value=group_df[group_df['mutation_type'] == mutation_type][q_value_column_name].values[0]
mutationtype_FDR_BH_adjusted_pvalues_list.append(q_value)
if (strand1_real_count >= NUMBER_OF_REQUIRED_MUTATIONS_FOR_STRAND_BIAS_BAR_PLOT) or (strand2_real_count >= NUMBER_OF_REQUIRED_MUTATIONS_FOR_STRAND_BIAS_BAR_PLOT):
mutationtype_strand1_real_list.append(strand1_real_count/(strand1_real_count+strand2_real_count))
mutationtype_strand2_real_list.append(strand2_real_count/(strand1_real_count+strand2_real_count))
else:
mutationtype_strand1_real_list.append(np.nan)
mutationtype_strand2_real_list.append(np.nan)
if (strand1_sims_count >= NUMBER_OF_REQUIRED_MUTATIONS_FOR_STRAND_BIAS_BAR_PLOT) or (strand2_sims_count >= NUMBER_OF_REQUIRED_MUTATIONS_FOR_STRAND_BIAS_BAR_PLOT):
mutationtype_strand1_sims_mean_list.append(strand1_sims_count/(strand1_sims_count+strand2_sims_count))
mutationtype_strand2_sims_mean_list.append(strand2_sims_count/(strand1_sims_count+strand2_sims_count))
else:
mutationtype_strand1_sims_mean_list.append(np.nan)
mutationtype_strand2_sims_mean_list.append(np.nan)
plot_strand_bias_figure_with_stacked_bar_plots(strand_bias,
None,
numberofSimulations,
sbs_signature,
len(mutation_types),
mutation_types,
y_axis_label,
mutationtype_strand1_real_list,
mutationtype_strand2_real_list,
mutationtype_strand1_sims_mean_list,
mutationtype_strand2_sims_mean_list,
mutationtype_FDR_BH_adjusted_pvalues_list,
strands[0],
strands[1],
color1,
color2,
width,
axis_given=axis)
def plot_circle_bar_plots_together(outputDir,
jobname,
sbs_signature,
six_mutation_types,
signature2mutation_type2strand2percentagedict,
signature_genic_versus_intergenic_df,
signature_transcribed_versus_untranscribed_df,
signature_lagging_versus_leading_df,
genic_vs_intergenic_strands,
transcription_strands,
replication_strands):
x_ticklabels_list = percentage_strings * 6
fig = plt.figure(figsize=(5 + 1.5 * len(x_ticklabels_list), 30 + 1.5))
plt.rc('axes', edgecolor='lightgray')
width = 6
height = 6
width_ratios = [1] * width
height_ratios = [1] * height
gs = gridspec.GridSpec(height, width, height_ratios = height_ratios, width_ratios = width_ratios)
fig.subplots_adjust(hspace=0, wspace=3)
cirle_plot_axis = plt.subplot(gs[0:2, :])
genic_vs_intergenic_bar_plot_axis = plt.subplot(gs[2:4, 0:2])
transcribed_vs_untranscribed_bar_plot_axis = plt.subplot(gs[2:4, 2:4])
lagging_vs_leading_bar_plot_axis = plt.subplot(gs[2:4, 4:6])
genic_vs_intergenic_stacked_bar_plot_axis = plt.subplot(gs[4:, 0:2])
transcribed_vs_untranscribed_stacked_bar_plot_axis = plt.subplot(gs[4:, 2:4])
lagging_vs_leading_stacked_bar_plot_axis = plt.subplot(gs[4:, 4:6])
plot_circle_plot_in_given_axis(cirle_plot_axis,
percentage_strings,
sbs_signature,
six_mutation_types,
x_ticklabels_list,
signature2mutation_type2strand2percentagedict)
plot_bar_plot_in_given_axis(genic_vs_intergenic_bar_plot_axis,
sbs_signature,
GENIC_VERSUS_INTERGENIC,
genic_vs_intergenic_strands,
signature_genic_versus_intergenic_df,
y_axis_label = 'Number of Single Base Substitutions')
plot_bar_plot_in_given_axis(transcribed_vs_untranscribed_bar_plot_axis,
sbs_signature,
TRANSCRIBED_VERSUS_UNTRANSCRIBED,
transcription_strands,
signature_transcribed_versus_untranscribed_df)
plot_bar_plot_in_given_axis(lagging_vs_leading_bar_plot_axis,
sbs_signature,
LAGGING_VERSUS_LEADING,
replication_strands,
signature_lagging_versus_leading_df)
plot_stacked_bar_plot_in_given_axis(genic_vs_intergenic_stacked_bar_plot_axis,
sbs_signature,
GENIC_VERSUS_INTERGENIC,
genic_vs_intergenic_strands,
signature_genic_versus_intergenic_df,
y_axis_label = 'Ratio of mutations on each strand')
plot_stacked_bar_plot_in_given_axis(transcribed_vs_untranscribed_stacked_bar_plot_axis,
sbs_signature,
TRANSCRIBED_VERSUS_UNTRANSCRIBED,
transcription_strands,
signature_transcribed_versus_untranscribed_df)
plot_stacked_bar_plot_in_given_axis(lagging_vs_leading_stacked_bar_plot_axis,
sbs_signature,
LAGGING_VERSUS_LEADING,
replication_strands,
signature_lagging_versus_leading_df)
filename = '%s_circle_bar_plots.png' % (sbs_signature)
figurepath = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS, CIRCLE_BAR_PLOTS, filename)
fig.savefig(figurepath, dpi=100, bbox_inches="tight")
plt.cla()
plt.close(fig)
def plotBarPlotsUsingDataframes(outputDir,
jobname,
numberofSimulations,
signature_cutoff_numberofmutations_averageprobability_df,
isKeySample,
existingMutationTypesList,
signature_strand1_versus_strand2_df,
width,
strand1_versus_strand2,
strands,
color1,
color2,
title,
figureName,
plot_mode):
signatures = signature_strand1_versus_strand2_df['signature'].unique()
x_axis_labels = existingMutationTypesList
N = len(x_axis_labels)
for signature in signatures:
numberofMutations = int(signature_cutoff_numberofmutations_averageprobability_df[signature_cutoff_numberofmutations_averageprobability_df['signature'] == signature]['number_of_mutations'].values[0])
mutationtype_strand1_real_list=[]
mutationtype_strand2_real_list=[]
mutationtype_strand1_sims_mean_list=[]
mutationtype_strand2_sims_mean_list=[]
mutationtype_FDR_BH_adjusted_pvalues_list=[]
for mutation_type in existingMutationTypesList:
if (strand1_versus_strand2==TRANSCRIBED_VERSUS_UNTRANSCRIBED):
strand1_real_count_column_name=TRANSCRIBED_REAL_COUNT
strand1_sims_mean_count_Column_name=TRANSCRIBED_SIMULATIONS_MEAN_COUNT
strand2_real_count_column_name=UNTRANSCRIBED_REAL_COUNT
strand2_sims_mean_count_Column_name=UNTRANSCRIBED_SIMULATIONS_MEAN_COUNT
q_value_column_name = TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE
elif (strand1_versus_strand2 == GENIC_VERSUS_INTERGENIC):
strand1_real_count_column_name=GENIC_REAL_COUNT
strand1_sims_mean_count_Column_name=GENIC_SIMULATIONS_MEAN_COUNT
strand2_real_count_column_name=INTERGENIC_REAL_COUNT
strand2_sims_mean_count_Column_name=INTERGENIC_SIMULATIONS_MEAN_COUNT
q_value_column_name = GENIC_VERSUS_INTERGENIC_Q_VALUE
elif (strand1_versus_strand2 == LAGGING_VERSUS_LEADING):
strand1_real_count_column_name=LAGGING_REAL_COUNT
strand1_sims_mean_count_Column_name=LAGGING_SIMULATIONS_MEAN_COUNT
strand2_real_count_column_name=LEADING_REAL_COUNT
strand2_sims_mean_count_Column_name=LEADING_SIMULATIONS_MEAN_COUNT
q_value_column_name = LAGGING_VERSUS_LEADING_Q_VALUE
strand1_real_count = 0
strand1_sims_mean_count = 0
strand2_real_count = 0
strand2_sims_mean_count = 0
q_value = None
if (signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature']==signature) & (signature_strand1_versus_strand2_df['mutation_type']==mutation_type)][strand1_real_count_column_name].values.size>0):
strand1_real_count=signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature']==signature) & (signature_strand1_versus_strand2_df['mutation_type']==mutation_type)][strand1_real_count_column_name].values[0]
if (signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][strand1_sims_mean_count_Column_name].values.size>0):
strand1_sims_mean_count = signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][strand1_sims_mean_count_Column_name].values[0]
if (signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature']==signature) & (signature_strand1_versus_strand2_df['mutation_type']==mutation_type)][strand2_real_count_column_name].values.size>0):
strand2_real_count=signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature']==signature) & (signature_strand1_versus_strand2_df['mutation_type']==mutation_type)][strand2_real_count_column_name].values[0]
if (signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][strand2_sims_mean_count_Column_name].values.size>0):
strand2_sims_mean_count = signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][strand2_sims_mean_count_Column_name].values[0]
if (signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][q_value_column_name].values.size>0):
q_value = signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][q_value_column_name].values[0]
mutationtype_strand1_real_list.append(strand1_real_count)
mutationtype_strand1_sims_mean_list.append(strand1_sims_mean_count)
mutationtype_strand2_real_list.append(strand2_real_count)
mutationtype_strand2_sims_mean_list.append(strand2_sims_mean_count)
mutationtype_FDR_BH_adjusted_pvalues_list.append(q_value)
plotStrandBiasFigureWithBarPlots(outputDir,
jobname,
numberofSimulations,
signature,
isKeySample,
numberofMutations,
N,
x_axis_labels,
mutationtype_strand1_real_list,
mutationtype_strand2_real_list,
mutationtype_strand1_sims_mean_list,
mutationtype_strand2_sims_mean_list,
mutationtype_FDR_BH_adjusted_pvalues_list,
strands[0],
strands[1],
title,
color1,
color2,
figureName,
width,
plot_mode)
def transcriptionReplicationStrandBiasFiguresUsingDataframes(outputDir, jobname, numberofSimulations, mutation_types_contexts, strand_bias_list, is_discreet, plot_mode):
subsSignature_cutoff_numberofmutations_averageprobability_df = pd.DataFrame()
dinucsSignature_cutoff_numberofmutations_averageprobability_df = pd.DataFrame()
indelsSignature_cutoff_numberofmutations_averageprobability_df = pd.DataFrame()
sbs_df = pd.DataFrame()
dbs_df = pd.DataFrame()
id_df = pd.DataFrame()
subsSignatures = np.array([])
dinucsSignatures = np.array([])
indelsSignatures = np.array([])
os.makedirs(os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,SCATTER_PLOTS), exist_ok=True)
os.makedirs(os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,BAR_PLOTS), exist_ok=True)
os.makedirs(os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,CIRCLE_PLOTS), exist_ok=True)
os.makedirs(os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,CIRCLE_BAR_PLOTS), exist_ok=True)
os.makedirs(os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,TABLES), exist_ok=True)
os.makedirs(os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,EXCEL_FILES), exist_ok=True)
strandbias_figures_outputDir = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS)
strandbias_figures_tables_outputDir = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS, TABLES)
strandbias_figures_excel_files_outputDir = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS, EXCEL_FILES)
for mutation_type_context in mutation_types_contexts:
if (mutation_type_context in SBS_CONTEXTS):
subsSignature_cutoff_numberofmutations_averageprobability_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_SBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename),sep='\t', header=0,dtype={'cutoff': np.float32,'signature': str,'number_of_mutations': np.int32,'average_probability': np.float32})
subsSignatures = subsSignature_cutoff_numberofmutations_averageprobability_df['signature'].unique()
if (DBS in mutation_types_contexts):
dinucsSignature_cutoff_numberofmutations_averageprobability_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_DBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename),sep='\t', header=0,dtype={'cutoff': np.float32,'signature': str,'number_of_mutations': np.int32,'average_probability': np.float32})
dinucsSignatures = dinucsSignature_cutoff_numberofmutations_averageprobability_df['signature'].unique()
if (ID in mutation_types_contexts):
indelsSignature_cutoff_numberofmutations_averageprobability_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_ID_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename),sep='\t', header=0,dtype={'cutoff': np.float32,'signature': str,'number_of_mutations': np.int32,'average_probability': np.float32})
indelsSignatures = indelsSignature_cutoff_numberofmutations_averageprobability_df['signature'].unique()
if is_discreet:
sbs_df = subsSignature_cutoff_numberofmutations_averageprobability_df
dbs_df = dinucsSignature_cutoff_numberofmutations_averageprobability_df
id_df = indelsSignature_cutoff_numberofmutations_averageprobability_df
else:
if os.path.exists(os.path.join(outputDir, jobname, DATA, Table_SBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename)):
sbs_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_SBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename), sep='\t', header=0, dtype={'signature': str,'number_of_mutations': np.int32,'average_probability': np.float32})
subsSignatures = sbs_df['signature'].unique()
if os.path.exists(os.path.join(outputDir, jobname, DATA, Table_DBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename)):
dbs_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_DBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename), sep='\t', header=0, dtype={'signature': str,'number_of_mutations': np.int32,'average_probability': np.float32})
dinucsSignatures = dbs_df['signature'].unique()
if os.path.exists(os.path.join(outputDir, jobname, DATA, Table_ID_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename)):
id_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_ID_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename), sep='\t', header=0, dtype={'signature': str,'number_of_mutations': np.int32,'average_probability': np.float32})
indelsSignatures = id_df['signature'].unique()
if LAGGING_VERSUS_LEADING in strand_bias_list:
signature_mutation_type_lagging_versus_leading_table_file_name = 'Signature_Mutation_Type_%s_Strand_Table.txt' % (LAGGING_VERSUS_LEADING)
signature_mutation_type_lagging_versus_leading_table_filepath = os.path.join(outputDir, jobname, DATA, REPLICATIONSTRANDBIAS,signature_mutation_type_lagging_versus_leading_table_file_name)
signature_lagging_versus_leading_df = pd.read_csv(signature_mutation_type_lagging_versus_leading_table_filepath, header=0, sep='\t')
type_lagging_versus_leading_table_file_name = 'Type_%s_Strand_Table.txt' % (LAGGING_VERSUS_LEADING)
type_lagging_versus_leading_table_filepath = os.path.join(outputDir, jobname, DATA, REPLICATIONSTRANDBIAS,type_lagging_versus_leading_table_file_name)
type_lagging_versus_leading_df = pd.read_csv(type_lagging_versus_leading_table_filepath, header=0, sep='\t')
if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list:
signature_mutation_type_transcribed_versus_untranscribed_table_file_name = 'Signature_Mutation_Type_%s_Strand_Table.txt' % (TRANSCRIBED_VERSUS_UNTRANSCRIBED)
signature_mutation_type_transcribed_versus_untranscribed_table_filepath = os.path.join(outputDir, jobname, DATA, TRANSCRIPTIONSTRANDBIAS, signature_mutation_type_transcribed_versus_untranscribed_table_file_name)
signature_transcribed_versus_untranscribed_df = pd.read_csv(signature_mutation_type_transcribed_versus_untranscribed_table_filepath, header=0, sep='\t')
type_transcribed_versus_untranscribed_table_file_name = 'Type_%s_Strand_Table.txt' % (TRANSCRIBED_VERSUS_UNTRANSCRIBED)
type_transcribed_versus_untranscribed_table_filepath = os.path.join(outputDir, jobname, DATA, TRANSCRIPTIONSTRANDBIAS, type_transcribed_versus_untranscribed_table_file_name)
type_transcribed_versus_untranscribed_df = pd.read_csv(type_transcribed_versus_untranscribed_table_filepath, header=0, sep='\t')
if GENIC_VERSUS_INTERGENIC in strand_bias_list:
signature_mutation_type_genic_versus_intergenic_table_file_name = 'Signature_Mutation_Type_%s_Strand_Table.txt' % (GENIC_VERSUS_INTERGENIC)
signature_mutation_type_genic_versus_intergenic_table_filepath = os.path.join(outputDir, jobname, DATA, TRANSCRIPTIONSTRANDBIAS, signature_mutation_type_genic_versus_intergenic_table_file_name)
signature_genic_versus_intergenic_df = pd.read_csv(signature_mutation_type_genic_versus_intergenic_table_filepath, header=0, sep='\t')
type_genic_versus_intergenic_table_file_name = 'Type_%s_Strand_Table.txt' % (GENIC_VERSUS_INTERGENIC)
type_genic_versus_intergenic_table_filepath = os.path.join(outputDir, jobname, DATA, TRANSCRIPTIONSTRANDBIAS, type_genic_versus_intergenic_table_file_name)
type_genic_versus_intergenic_df = pd.read_csv(type_genic_versus_intergenic_table_filepath, header=0, sep='\t')
p_values_list=[]
element_names=[]
if LAGGING_VERSUS_LEADING in strand_bias_list:
for index, row in signature_lagging_versus_leading_df.iterrows():
element_name = (row[CANCER_TYPE], row[SIGNATURE], row[MUTATION_TYPE], LAGGING_VERSUS_LEADING)
element_names.append(element_name)
p_values_list.append(row[LAGGING_VERSUS_LEADING_P_VALUE])
for index, row in type_lagging_versus_leading_df.iterrows():
element_name=(row[CANCER_TYPE], None, row[TYPE], LAGGING_VERSUS_LEADING)
element_names.append(element_name)
p_values_list.append(row[LAGGING_VERSUS_LEADING_P_VALUE])
if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list:
for index, row in signature_transcribed_versus_untranscribed_df.iterrows():
element_name=(row[CANCER_TYPE], row[SIGNATURE], row[MUTATION_TYPE], TRANSCRIBED_VERSUS_UNTRANSCRIBED)
element_names.append(element_name)
p_values_list.append(row[TRANSCRIBED_VERSUS_UNTRANSCRIBED_P_VALUE])
for index, row in type_transcribed_versus_untranscribed_df.iterrows():
element_name=(row[CANCER_TYPE], None, row[TYPE], TRANSCRIBED_VERSUS_UNTRANSCRIBED)
element_names.append(element_name)
p_values_list.append(row[TRANSCRIBED_VERSUS_UNTRANSCRIBED_P_VALUE])
if GENIC_VERSUS_INTERGENIC in strand_bias_list:
for index, row in signature_genic_versus_intergenic_df.iterrows():
element_name = (row[CANCER_TYPE], row[SIGNATURE], row[MUTATION_TYPE], GENIC_VERSUS_INTERGENIC)
element_names.append(element_name)
p_values_list.append(row[GENIC_VERSUS_INTERGENIC_P_VALUE])
for index, row in type_genic_versus_intergenic_df.iterrows():
element_name=(row[CANCER_TYPE], None, row[TYPE], GENIC_VERSUS_INTERGENIC)
element_names.append(element_name)
p_values_list.append(row[GENIC_VERSUS_INTERGENIC_P_VALUE])
if ((p_values_list is not None) and p_values_list):
rejected, all_FDR_BH_adjusted_p_values, alphacSidak, alphacBonf = statsmodels.stats.multitest.multipletests(p_values_list, alpha=0.05, method='fdr_bh', is_sorted=False, returnsorted=False)
if LAGGING_VERSUS_LEADING in strand_bias_list:
signature_lagging_versus_leading_df[LAGGING_VERSUS_LEADING_Q_VALUE] = np.nan
type_lagging_versus_leading_df[LAGGING_VERSUS_LEADING_Q_VALUE] = np.nan
if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list:
signature_transcribed_versus_untranscribed_df[TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE] = np.nan
type_transcribed_versus_untranscribed_df[TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE]= np.nan
if GENIC_VERSUS_INTERGENIC in strand_bias_list:
signature_genic_versus_intergenic_df[GENIC_VERSUS_INTERGENIC_Q_VALUE]= np.nan
type_genic_versus_intergenic_df[GENIC_VERSUS_INTERGENIC_Q_VALUE]= np.nan
for element_index, element_name in enumerate(element_names,0):
(cancer_type, signature, mutation_type, versus_type)=element_name
q_value=all_FDR_BH_adjusted_p_values[element_index]
if (signature is not None) and (versus_type == TRANSCRIBED_VERSUS_UNTRANSCRIBED):
signature_transcribed_versus_untranscribed_df.loc[(signature_transcribed_versus_untranscribed_df[CANCER_TYPE]==cancer_type) &
(signature_transcribed_versus_untranscribed_df[SIGNATURE]==signature) &
(signature_transcribed_versus_untranscribed_df[MUTATION_TYPE]==mutation_type),TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE]=q_value
elif (signature is not None) and (versus_type == GENIC_VERSUS_INTERGENIC):
signature_genic_versus_intergenic_df.loc[(signature_genic_versus_intergenic_df[CANCER_TYPE]==cancer_type) &
(signature_genic_versus_intergenic_df[SIGNATURE]==signature) &
(signature_genic_versus_intergenic_df[MUTATION_TYPE]==mutation_type),GENIC_VERSUS_INTERGENIC_Q_VALUE]=q_value
elif (signature is not None) and (versus_type==LAGGING_VERSUS_LEADING):
signature_lagging_versus_leading_df.loc[(signature_lagging_versus_leading_df[CANCER_TYPE]==cancer_type) &
(signature_lagging_versus_leading_df[SIGNATURE]==signature) &
(signature_lagging_versus_leading_df[MUTATION_TYPE]==mutation_type),LAGGING_VERSUS_LEADING_Q_VALUE]=q_value
elif (signature is None) and (versus_type == TRANSCRIBED_VERSUS_UNTRANSCRIBED):
type_transcribed_versus_untranscribed_df.loc[(type_transcribed_versus_untranscribed_df[CANCER_TYPE] == cancer_type) & (type_transcribed_versus_untranscribed_df[TYPE] == mutation_type),TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE] = q_value
elif (signature is None) and (versus_type == GENIC_VERSUS_INTERGENIC):
type_genic_versus_intergenic_df.loc[(type_genic_versus_intergenic_df[CANCER_TYPE] == cancer_type) & (type_genic_versus_intergenic_df[TYPE] == mutation_type),GENIC_VERSUS_INTERGENIC_Q_VALUE] = q_value
elif (signature is None) and (versus_type == LAGGING_VERSUS_LEADING):
type_lagging_versus_leading_df.loc[(type_lagging_versus_leading_df[CANCER_TYPE] == cancer_type) & (type_lagging_versus_leading_df[TYPE] == mutation_type),LAGGING_VERSUS_LEADING_Q_VALUE] = q_value
if LAGGING_VERSUS_LEADING in strand_bias_list:
signature_lagging_versus_leading_df = signature_lagging_versus_leading_df[
['cancer_type', 'signature', 'mutation_type',
'Lagging_real_count', 'Leading_real_count', 'Lagging_mean_sims_count', 'Leading_mean_sims_count',
'lagging_versus_leading_p_value', 'lagging_versus_leading_q_value',
'Lagging_real_count.1', 'Lagging_mean_sims_count.1', 'Lagging_min_sims_count',
'Lagging_max_sims_count', 'Lagging_sims_count_list',
'Leading_real_count.1', 'Leading_mean_sims_count.1', 'Leading_min_sims_count',
'Leading_max_sims_count', 'Leading_sims_count_list']]
type_lagging_versus_leading_df=type_lagging_versus_leading_df[['cancer_type', 'type',
'Lagging_real_count', 'Leading_real_count', 'Lagging_mean_sims_count', 'Leading_mean_sims_count', 'lagging_versus_leading_p_value', 'lagging_versus_leading_q_value',
'Lagging_real_count.1', 'Lagging_mean_sims_count.1', 'Lagging_min_sims_count', 'Lagging_max_sims_count', 'Lagging_sims_count_list',
'Leading_real_count.1', 'Leading_mean_sims_count.1', 'Leading_min_sims_count', 'Leading_max_sims_count', 'Leading_sims_count_list' ]]
signature_filename = 'Signature_Mutation_Type_%s_Q_Value_Table.txt' % (LAGGING_VERSUS_LEADING)
signature_filepath = os.path.join(strandbias_figures_tables_outputDir, signature_filename)
signature_lagging_versus_leading_df.to_csv(signature_filepath, sep='\t', header=True, index=False)
type_filename = 'Type_%s_Q_Value_Table.txt' % (LAGGING_VERSUS_LEADING)
type_filepath = os.path.join(strandbias_figures_tables_outputDir, type_filename)
type_lagging_versus_leading_df.to_csv(type_filepath, sep='\t', header=True, index=False)
if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list:
signature_transcribed_versus_untranscribed_df=signature_transcribed_versus_untranscribed_df[['cancer_type', 'signature', 'mutation_type',
'Transcribed_real_count', 'UnTranscribed_real_count', 'NonTranscribed_real_count',
'Transcribed_mean_sims_count', 'UnTranscribed_mean_sims_count', 'NonTranscribed_mean_sims_count',
'transcribed_versus_untranscribed_p_value', 'transcribed_versus_untranscribed_q_value',
'Transcribed_real_count.1', 'Transcribed_mean_sims_count.1', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list',
'UnTranscribed_real_count.1', 'UnTranscribed_mean_sims_count.1', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list',
'NonTranscribed_real_count.1', 'NonTranscribed_mean_sims_count.1', 'NonTranscribed_min_sims_count', 'NonTranscribed_max_sims_count', 'NonTranscribed_sims_count_list']]
type_transcribed_versus_untranscribed_df=type_transcribed_versus_untranscribed_df[['cancer_type', 'type',
'Transcribed_real_count', 'UnTranscribed_real_count', 'NonTranscribed_real_count',
'Transcribed_mean_sims_count', 'UnTranscribed_mean_sims_count', 'NonTranscribed_mean_sims_count',
'transcribed_versus_untranscribed_p_value', 'transcribed_versus_untranscribed_q_value',
'Transcribed_real_count.1', 'Transcribed_mean_sims_count.1', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list',
'UnTranscribed_real_count.1', 'UnTranscribed_mean_sims_count.1', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list',
'NonTranscribed_real_count.1', 'NonTranscribed_mean_sims_count.1', 'NonTranscribed_min_sims_count', 'NonTranscribed_max_sims_count', 'NonTranscribed_sims_count_list']]
signature_filename = 'Signature_Mutation_Type_%s_Q_Value_Table.txt' % (TRANSCRIBED_VERSUS_UNTRANSCRIBED)
signature_filepath = os.path.join(strandbias_figures_tables_outputDir, signature_filename)
signature_transcribed_versus_untranscribed_df.to_csv(signature_filepath, sep='\t', header=True, index=False)
type_filename = 'Type_%s_Q_Value_Table.txt' % (TRANSCRIBED_VERSUS_UNTRANSCRIBED)
type_filepath = os.path.join(strandbias_figures_tables_outputDir, type_filename)
type_transcribed_versus_untranscribed_df.to_csv(type_filepath, sep='\t', header=True, index=False)
if GENIC_VERSUS_INTERGENIC in strand_bias_list:
signature_genic_versus_intergenic_df=signature_genic_versus_intergenic_df[['cancer_type', 'signature', 'mutation_type',
'genic_real_count', 'intergenic_real_count', 'genic_mean_sims_count', 'intergenic_mean_sims_count', 'genic_versus_intergenic_p_value', 'genic_versus_intergenic_q_value',
'Transcribed_real_count', 'Transcribed_mean_sims_count', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list',
'UnTranscribed_real_count', 'UnTranscribed_mean_sims_count', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list',
'NonTranscribed_real_count', 'NonTranscribed_mean_sims_count', 'NonTranscribed_min_sims_count', 'NonTranscribed_max_sims_count', 'NonTranscribed_sims_count_list' ]]
type_genic_versus_intergenic_df=type_genic_versus_intergenic_df[['cancer_type', 'type',
'genic_real_count', 'intergenic_real_count', 'genic_mean_sims_count', 'intergenic_mean_sims_count', 'genic_versus_intergenic_p_value', 'genic_versus_intergenic_q_value',
'Transcribed_real_count', 'Transcribed_mean_sims_count', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list',
'UnTranscribed_real_count', 'UnTranscribed_mean_sims_count', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list',
'NonTranscribed_real_count', 'NonTranscribed_mean_sims_count', 'NonTranscribed_min_sims_count', 'NonTranscribed_max_sims_count', 'NonTranscribed_sims_count_list' ]]
signature_filename = 'Signature_Mutation_Type_%s_Q_Value_Table.txt' % (GENIC_VERSUS_INTERGENIC)
signature_filepath = os.path.join(strandbias_figures_tables_outputDir, signature_filename)
signature_genic_versus_intergenic_df.to_csv(signature_filepath, sep='\t', header=True, index=False)
type_filename = 'Type_%s_Q_Value_Table.txt' % (GENIC_VERSUS_INTERGENIC)
type_filepath = os.path.join(strandbias_figures_tables_outputDir, type_filename)
type_genic_versus_intergenic_df.to_csv(type_filepath, sep='\t', header=True, index=False)
if LAGGING_VERSUS_LEADING in strand_bias_list:
signature_lagging_versus_leading_filtered_q_value_df = signature_lagging_versus_leading_df[signature_lagging_versus_leading_df[LAGGING_VERSUS_LEADING_Q_VALUE] <= SIGNIFICANCE_LEVEL].copy()
type_lagging_versus_leading_filtered_q_value_df= type_lagging_versus_leading_df[type_lagging_versus_leading_df[LAGGING_VERSUS_LEADING_Q_VALUE] <= SIGNIFICANCE_LEVEL].copy()
signature_lagging_versus_leading_filtered_q_value_df[SIGNIFICANT_STRAND] = None
type_lagging_versus_leading_filtered_q_value_df[SIGNIFICANT_STRAND] = None
for percentage_string in percentage_strings:
signature_lagging_versus_leading_filtered_q_value_df[percentage_string] = None
type_lagging_versus_leading_filtered_q_value_df[percentage_string] = None
signature_lagging_versus_leading_filtered_q_value_df.loc[(signature_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT] > signature_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT]), SIGNIFICANT_STRAND] = LAGGING
signature_lagging_versus_leading_filtered_q_value_df.loc[(signature_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT] > signature_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT]), SIGNIFICANT_STRAND] = LEADING
type_lagging_versus_leading_filtered_q_value_df.loc[(type_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT] > type_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT]), SIGNIFICANT_STRAND]=LAGGING
type_lagging_versus_leading_filtered_q_value_df.loc[(type_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT] > type_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT]),SIGNIFICANT_STRAND]=LEADING
for percentage_index, percentage_number in enumerate(percentage_numbers, 0):
percentage_string = percentage_strings[percentage_index]
signature_lagging_versus_leading_filtered_q_value_df.loc[((signature_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT] - signature_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT]) >= (signature_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT] * percentage_number / 100)), percentage_string] = 1
signature_lagging_versus_leading_filtered_q_value_df.loc[((signature_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT] - signature_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT]) >= (signature_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT] * percentage_number / 100)), percentage_string] = 1
type_lagging_versus_leading_filtered_q_value_df.loc[((type_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT] - type_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT]) >= (type_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT] * percentage_number / 100)), percentage_string] = 1
type_lagging_versus_leading_filtered_q_value_df.loc[((type_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT] - type_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT]) >= (type_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT] * percentage_number / 100)), percentage_string] = 1
signature_filename = 'Signature_Mutation_Type_%s_Filtered_Q_Value_Percentages_Table.txt' % (LAGGING_VERSUS_LEADING)
signature_filepath = os.path.join(strandbias_figures_tables_outputDir, signature_filename)
signature_lagging_versus_leading_filtered_q_value_df.to_csv(signature_filepath, sep='\t', header=True,index=False)
type_filename = 'Type_%s_Filtered_Q_Value_Percentages_Table.txt' % (LAGGING_VERSUS_LEADING)
type_filepath = os.path.join(strandbias_figures_tables_outputDir, type_filename)
type_lagging_versus_leading_filtered_q_value_df.to_csv(type_filepath, sep='\t', header=True, index=False)
if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list:
signature_transcribed_versus_untranscribed_filtered_q_value_df = signature_transcribed_versus_untranscribed_df[signature_transcribed_versus_untranscribed_df[TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE] <= SIGNIFICANCE_LEVEL].copy()
type_transcribed_versus_untranscribed_filtered_q_value_df= type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df[TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE]<= SIGNIFICANCE_LEVEL].copy()
signature_transcribed_versus_untranscribed_filtered_q_value_df[SIGNIFICANT_STRAND] = None
type_transcribed_versus_untranscribed_filtered_q_value_df[SIGNIFICANT_STRAND]=None
for percentage_string in percentage_strings:
signature_transcribed_versus_untranscribed_filtered_q_value_df[percentage_string]=None
type_transcribed_versus_untranscribed_filtered_q_value_df[percentage_string] = None
signature_transcribed_versus_untranscribed_filtered_q_value_df.loc[(signature_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT] > signature_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]), SIGNIFICANT_STRAND] = TRANSCRIBED_STRAND
signature_transcribed_versus_untranscribed_filtered_q_value_df.loc[(signature_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT] > signature_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]), SIGNIFICANT_STRAND] = UNTRANSCRIBED_STRAND
type_transcribed_versus_untranscribed_filtered_q_value_df.loc[(type_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT] > type_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]), SIGNIFICANT_STRAND] = TRANSCRIBED_STRAND
type_transcribed_versus_untranscribed_filtered_q_value_df.loc[(type_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT] > type_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]), SIGNIFICANT_STRAND] = UNTRANSCRIBED_STRAND
for percentage_index, percentage_number in enumerate(percentage_numbers,0):
percentage_string=percentage_strings[percentage_index]
signature_transcribed_versus_untranscribed_filtered_q_value_df.loc[((signature_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]-signature_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]) >= (signature_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]*percentage_number/100)), percentage_string] = 1
signature_transcribed_versus_untranscribed_filtered_q_value_df.loc[((signature_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]-signature_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]) >= (signature_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]*percentage_number/100)), percentage_string] = 1
type_transcribed_versus_untranscribed_filtered_q_value_df.loc[((type_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]-type_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]) >= (type_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]*percentage_number/100)), percentage_string] = 1
type_transcribed_versus_untranscribed_filtered_q_value_df.loc[((type_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]-type_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]) >= (type_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]*percentage_number/100)), percentage_string] = 1
signature_filename = 'Signature_Mutation_Type_%s_Filtered_Q_Value_Percentages_Table.txt' % (TRANSCRIBED_VERSUS_UNTRANSCRIBED)
signature_filepath = os.path.join(strandbias_figures_tables_outputDir, signature_filename)
signature_transcribed_versus_untranscribed_filtered_q_value_df.to_csv(signature_filepath, sep='\t', header=True, index=False)
type_filename = 'Type_%s_Filtered_Q_Value_Percentages_Table.txt' % (TRANSCRIBED_VERSUS_UNTRANSCRIBED)
type_filepath = os.path.join(strandbias_figures_tables_outputDir, type_filename)
type_transcribed_versus_untranscribed_filtered_q_value_df.to_csv(type_filepath, sep='\t', header=True,index=False)
if GENIC_VERSUS_INTERGENIC in strand_bias_list:
signature_genic_versus_intergenic_filtered_q_value_df = signature_genic_versus_intergenic_df[signature_genic_versus_intergenic_df[GENIC_VERSUS_INTERGENIC_Q_VALUE] <= SIGNIFICANCE_LEVEL].copy()
type_genic_versus_intergenic_filtered_q_value_df= type_genic_versus_intergenic_df[type_genic_versus_intergenic_df[GENIC_VERSUS_INTERGENIC_Q_VALUE]<= SIGNIFICANCE_LEVEL].copy()
signature_genic_versus_intergenic_filtered_q_value_df[SIGNIFICANT_STRAND] = None
type_genic_versus_intergenic_filtered_q_value_df[SIGNIFICANT_STRAND] = None
for percentage_string in percentage_strings:
signature_genic_versus_intergenic_filtered_q_value_df[percentage_string] = None
type_genic_versus_intergenic_filtered_q_value_df[percentage_string] = None
signature_genic_versus_intergenic_filtered_q_value_df.loc[(signature_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT] > signature_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]), SIGNIFICANT_STRAND] = GENIC
signature_genic_versus_intergenic_filtered_q_value_df.loc[(signature_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT] > signature_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]),SIGNIFICANT_STRAND] = INTERGENIC
type_genic_versus_intergenic_filtered_q_value_df.loc[(type_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT] > type_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]), SIGNIFICANT_STRAND] = GENIC
type_genic_versus_intergenic_filtered_q_value_df.loc[(type_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT] > type_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]), SIGNIFICANT_STRAND] = INTERGENIC
for percentage_index, percentage_number in enumerate(percentage_numbers,0):
percentage_string=percentage_strings[percentage_index]
signature_genic_versus_intergenic_filtered_q_value_df.loc[((signature_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]-signature_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]) >= (signature_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]*percentage_number/100)), percentage_string] = 1
signature_genic_versus_intergenic_filtered_q_value_df.loc[((signature_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]-signature_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]) >= (signature_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]*percentage_number/100)), percentage_string] = 1
type_genic_versus_intergenic_filtered_q_value_df.loc[((type_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]-type_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]) >= (type_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]*percentage_number/100)), percentage_string] = 1
type_genic_versus_intergenic_filtered_q_value_df.loc[((type_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]-type_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]) >= (type_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]*percentage_number/100)), percentage_string] = 1
signature_filename = 'Signature_Mutation_Type_%s_Filtered_Q_Value_Percentages_Table.txt' % (GENIC_VERSUS_INTERGENIC)
signature_filepath = os.path.join(strandbias_figures_tables_outputDir, signature_filename)
signature_genic_versus_intergenic_filtered_q_value_df.to_csv(signature_filepath, sep='\t', header=True,index=False)
type_filename = 'Type_%s_Filtered_Q_Value_Percentages_Table.txt' % (GENIC_VERSUS_INTERGENIC)
type_filepath = os.path.join(strandbias_figures_tables_outputDir, type_filename)
type_genic_versus_intergenic_filtered_q_value_df.to_csv(type_filepath, sep='\t', header=True, index=False)
sheet_list = ['corrected_p_value', 'percentages']
for strand1_versus_strand2 in strand_bias_list:
if strand1_versus_strand2==LAGGING_VERSUS_LEADING:
signatures_df_list=[signature_lagging_versus_leading_df,signature_lagging_versus_leading_filtered_q_value_df]
types_df_list = [type_lagging_versus_leading_df, type_lagging_versus_leading_filtered_q_value_df]
elif strand1_versus_strand2==TRANSCRIBED_VERSUS_UNTRANSCRIBED:
signatures_df_list = [signature_transcribed_versus_untranscribed_df,signature_transcribed_versus_untranscribed_filtered_q_value_df]
types_df_list = [type_transcribed_versus_untranscribed_df, type_transcribed_versus_untranscribed_filtered_q_value_df]
elif strand1_versus_strand2==GENIC_VERSUS_INTERGENIC:
signatures_df_list = [signature_genic_versus_intergenic_df,signature_genic_versus_intergenic_filtered_q_value_df]
types_df_list = [type_genic_versus_intergenic_df, type_genic_versus_intergenic_filtered_q_value_df]
signatures_filename="Signatures_Mutation_Types_%s.xlsx" %(strand1_versus_strand2)
file_name_with_path=os.path.join(strandbias_figures_excel_files_outputDir, signatures_filename)
write_excel_file(signatures_df_list, sheet_list, file_name_with_path)
types_filename="Types_%s.xlsx" %(strand1_versus_strand2)
file_name_with_path=os.path.join(strandbias_figures_excel_files_outputDir, types_filename)
write_excel_file(types_df_list, sheet_list, file_name_with_path)
signature2mutation_type2strand2percentagedict={}
df_list=[]
if LAGGING_VERSUS_LEADING in strand_bias_list:
df_list.append(signature_lagging_versus_leading_filtered_q_value_df)
if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list:
df_list.append(signature_transcribed_versus_untranscribed_filtered_q_value_df)
if GENIC_VERSUS_INTERGENIC in strand_bias_list:
df_list.append(signature_genic_versus_intergenic_filtered_q_value_df)
for df in df_list:
for index, row in df.iterrows():
cancer_type = row[CANCER_TYPE]
signature = row[SIGNATURE]
mutation_type = row[MUTATION_TYPE]
significant_strand=row[SIGNIFICANT_STRAND]
percent_10 = row[AT_LEAST_10_PERCENT_DIFF]
percent_20 = row[AT_LEAST_20_PERCENT_DIFF]
percent_30 = row[AT_LEAST_30_PERCENT_DIFF]
percent_50 = row[AT_LEAST_50_PERCENT_DIFF]
percent_75 = row[AT_LEAST_75_PERCENT_DIFF]
percent_100 = row[AT_LEAST_100_PERCENT_DIFF]
if signature in signature2mutation_type2strand2percentagedict:
if mutation_type in signature2mutation_type2strand2percentagedict[signature]:
if significant_strand in signature2mutation_type2strand2percentagedict[signature][mutation_type]:
if (percent_10 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF]=1
if (percent_20 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF]=1
if (percent_30 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF]=1
if (percent_50 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF]=1
if (percent_75 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF]=1
if (percent_100 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF]=1
else:
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand]={}
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 0
if (percent_10 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 1
if (percent_20 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 1
if (percent_30 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 1
if (percent_50 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 1
if (percent_75 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 1
if (percent_100 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 1
else:
signature2mutation_type2strand2percentagedict[signature][mutation_type] = {}
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand] = {}
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 0
if (percent_10 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 1
if (percent_20 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 1
if (percent_30 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 1
if (percent_50 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 1
if (percent_75 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 1
if (percent_100 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 1
else:
signature2mutation_type2strand2percentagedict[signature] = {}
signature2mutation_type2strand2percentagedict[signature][mutation_type] = {}
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand] = {}
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 0
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 0
if (percent_10 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 1
if (percent_20 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 1
if (percent_30 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 1
if (percent_50 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 1
if (percent_75 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 1
if (percent_100 == 1):
signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 1
type2strand2percentagedict={}
df_list=[]
if LAGGING_VERSUS_LEADING in strand_bias_list:
df_list.append(type_lagging_versus_leading_filtered_q_value_df)
if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list:
df_list.append(type_transcribed_versus_untranscribed_filtered_q_value_df)
if GENIC_VERSUS_INTERGENIC in strand_bias_list:
df_list.append(type_genic_versus_intergenic_filtered_q_value_df)
for df in df_list:
for index, row in df.iterrows():
cancer_type = row[CANCER_TYPE]
my_type = row[TYPE]
significant_strand=row[SIGNIFICANT_STRAND]
percent_10 = row[AT_LEAST_10_PERCENT_DIFF]
percent_20 = row[AT_LEAST_20_PERCENT_DIFF]
percent_30 = row[AT_LEAST_30_PERCENT_DIFF]
percent_50 = row[AT_LEAST_50_PERCENT_DIFF]
percent_75 = row[AT_LEAST_75_PERCENT_DIFF]
percent_100 = row[AT_LEAST_100_PERCENT_DIFF]
if my_type in type2strand2percentagedict:
if significant_strand in type2strand2percentagedict[my_type]:
if (percent_10 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_10_PERCENT_DIFF]=1
if (percent_20 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_20_PERCENT_DIFF]=1
if (percent_30 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_30_PERCENT_DIFF]=1
if (percent_50 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_50_PERCENT_DIFF]=1
if (percent_75 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_75_PERCENT_DIFF]=1
if (percent_100 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_100_PERCENT_DIFF]=1
else:
type2strand2percentagedict[my_type][significant_strand]={}
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 0
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 0
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 0
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 0
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 0
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 0
if (percent_10 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_10_PERCENT_DIFF]=1
if (percent_20 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_20_PERCENT_DIFF]=1
if (percent_30 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_30_PERCENT_DIFF]=1
if (percent_50 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_50_PERCENT_DIFF]=1
if (percent_75 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_75_PERCENT_DIFF]=1
if (percent_100 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_100_PERCENT_DIFF]=1
else:
type2strand2percentagedict[my_type] = {}
type2strand2percentagedict[my_type][significant_strand] = {}
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 0
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 0
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 0
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 0
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 0
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 0
if (percent_10 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_10_PERCENT_DIFF]=1
if (percent_20 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_20_PERCENT_DIFF]=1
if (percent_30 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_30_PERCENT_DIFF]=1
if (percent_50 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_50_PERCENT_DIFF]=1
if (percent_75 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_75_PERCENT_DIFF]=1
if (percent_100 == 1):
type2strand2percentagedict[my_type][significant_strand][AT_LEAST_100_PERCENT_DIFF]=1
plot_legend(strandbias_figures_outputDir)
for strand_bias in strand_bias_list:
if np.any(subsSignatures):
plot_six_mutations_sbs_signatures_circle_figures(subsSignatures,
strand_bias,
strandbias_figures_outputDir,
SIGNIFICANCE_LEVEL,
signature2mutation_type2strand2percentagedict,
percentage_strings)
if np.any(dinucsSignatures):
plot_dbs_and_id_signatures_circle_figures(DBS,
dinucsSignatures,
strand_bias,
strandbias_figures_outputDir,
SIGNIFICANCE_LEVEL,
type2strand2percentagedict,
percentage_strings)
if np.any(indelsSignatures):
plot_dbs_and_id_signatures_circle_figures(ID,
indelsSignatures,
strand_bias,
strandbias_figures_outputDir,
SIGNIFICANCE_LEVEL,
type2strand2percentagedict,
percentage_strings)
if (TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list) and (LAGGING_VERSUS_LEADING in strand_bias_list):
if ((not type_transcribed_versus_untranscribed_df.empty) and (not type_lagging_versus_leading_df.empty)):
plot_mutation_types_transcription_log10_ratio_replication_log_10_ratio_using_dataframes(None,None,
type_transcribed_versus_untranscribed_df,
type_lagging_versus_leading_df,
outputDir, jobname)
if ((not type_transcribed_versus_untranscribed_df.empty) and (not type_lagging_versus_leading_df.empty) and (not sbs_df.empty)):
plot_types_transcription_log10_ratio_replication_log10_ratio_using_dataframes('subs', None, None,
type_transcribed_versus_untranscribed_df,
type_lagging_versus_leading_df,
sbs_df,
outputDir, jobname)
if ((not type_transcribed_versus_untranscribed_df.empty) and (not type_lagging_versus_leading_df.empty) and (not dbs_df.empty)):
plot_types_transcription_log10_ratio_replication_log10_ratio_using_dataframes('dinucs', None, None,
type_transcribed_versus_untranscribed_df,
type_lagging_versus_leading_df,
dbs_df,
outputDir, jobname)
if ((not type_transcribed_versus_untranscribed_df.empty) and (not type_lagging_versus_leading_df.empty) and (not id_df.empty)):
plot_types_transcription_log10_ratio_replication_log10_ratio_using_dataframes('indels', None, None,
type_transcribed_versus_untranscribed_df,
type_lagging_versus_leading_df,
id_df,
outputDir, jobname)
isKeySample = False
width = 0.20
types_list= [('All Mutations', 'mutationtypes', six_mutation_types),
('All Signatures', 'subs_signatures', subsSignatures),
('All Signatures', 'indels_signatures', indelsSignatures),
('All Signatures', 'dinucs_signatures', dinucsSignatures)]
for mutationsOrSignatures, sub_figure_name, x_axis_labels in types_list:
x_axis_labels = sorted(x_axis_labels, key=natural_key)
N = len(x_axis_labels)
for strand_bias in strand_bias_list:
if (strand_bias == TRANSCRIBED_VERSUS_UNTRANSCRIBED):
type_strand1_versus_strand2_df = type_transcribed_versus_untranscribed_df
strand1 = transcriptionStrands[0]
strand2 = transcriptionStrands[1]
strand1_real_count_column_name = 'Transcribed_real_count'
strand2_real_count_column_name = 'UnTranscribed_real_count'
strand1_sims_mean_count_column_name = 'Transcribed_mean_sims_count'
strand2_sims_mean_count_column_name = 'UnTranscribed_mean_sims_count'
q_value_column_name = 'transcribed_versus_untranscribed_q_value'
color1 = 'royalblue'
color2 = 'yellowgreen'
figureName = '%s_transcription_strand_bias' %(sub_figure_name)
elif (strand_bias == GENIC_VERSUS_INTERGENIC):
type_strand1_versus_strand2_df = type_genic_versus_intergenic_df
strand1 = genicVersusIntergenicStrands[0]
strand2 = genicVersusIntergenicStrands[1]
strand1_real_count_column_name = 'genic_real_count'
strand2_real_count_column_name = 'intergenic_real_count'
strand1_sims_mean_count_column_name = 'genic_mean_sims_count'
strand2_sims_mean_count_column_name = 'intergenic_mean_sims_count'
q_value_column_name = 'genic_versus_intergenic_q_value'
color1 = 'cyan'
color2 = 'gray'
figureName = '%s_genic_versus_intergenic_strand_bias' %(sub_figure_name)
elif (strand_bias == LAGGING_VERSUS_LEADING):
type_strand1_versus_strand2_df = type_lagging_versus_leading_df
strand1 = replicationStrands[0]
strand2 = replicationStrands[1]
strand1_real_count_column_name = 'Lagging_real_count'
strand2_real_count_column_name = 'Leading_real_count'
strand1_sims_mean_count_column_name = 'Lagging_mean_sims_count'
strand2_sims_mean_count_column_name = 'Leading_mean_sims_count'
q_value_column_name = 'lagging_versus_leading_q_value'
color1 = 'indianred'
color2 = 'goldenrod'
figureName = '%s_replication_strand_bias' %(sub_figure_name)
types_strand1_real_count_list = []
types_strand2_real_count_list = []
types_strand1_sims_mean_count_list = []
types_strand2_sims_mean_count_list = []
types_strand1_versus_strand2_FDR_BH_adjusted_pvalues = []
for my_type in x_axis_labels:
strand1_real_count = 0
strand2_real_count = 0
strand1_sims_mean_count = 0
strand2_sims_mean_count = 0
q_value = None
if type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand1_real_count_column_name].values.size>0:
strand1_real_count= type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand1_real_count_column_name].values[0]
if type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand2_real_count_column_name].values.size>0:
strand2_real_count= type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand2_real_count_column_name].values[0]
if type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand1_sims_mean_count_column_name].values.size>0:
strand1_sims_mean_count= type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand1_sims_mean_count_column_name].values[0]
if type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand2_sims_mean_count_column_name].values.size>0:
strand2_sims_mean_count= type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand2_sims_mean_count_column_name].values[0]
if type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][q_value_column_name].values.size>0:
q_value= type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][q_value_column_name].values[0]
types_strand1_real_count_list.append(strand1_real_count)
types_strand2_real_count_list.append(strand2_real_count)
types_strand1_sims_mean_count_list.append(strand1_sims_mean_count)
types_strand2_sims_mean_count_list.append(strand2_sims_mean_count)
types_strand1_versus_strand2_FDR_BH_adjusted_pvalues.append(q_value)
if ((len(x_axis_labels) > 0) and types_strand1_real_count_list and types_strand2_real_count_list and types_strand1_sims_mean_count_list and types_strand2_sims_mean_count_list and (len(types_strand1_versus_strand2_FDR_BH_adjusted_pvalues)>0)):
if (types_strand1_real_count_list and types_strand2_real_count_list):
plotStrandBiasFigureWithBarPlots(outputDir,
jobname,
numberofSimulations,
None,
isKeySample,
None,
N,
x_axis_labels,
types_strand1_real_count_list,
types_strand2_real_count_list,
types_strand1_sims_mean_count_list,
types_strand2_sims_mean_count_list,
types_strand1_versus_strand2_FDR_BH_adjusted_pvalues,
strand1,strand2,
mutationsOrSignatures,
color1, color2,
figureName,
width,
plot_mode)
if not sbs_df.empty:
if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list:
plotBarPlotsUsingDataframes(outputDir,
jobname,
numberofSimulations,
sbs_df,
isKeySample,
six_mutation_types,
signature_transcribed_versus_untranscribed_df,
width,
TRANSCRIBED_VERSUS_UNTRANSCRIBED,
transcriptionStrands,
'royalblue',
'yellowgreen',
'All Mutations',
'mutationtypes_transcription_strand_bias',
plot_mode)
if GENIC_VERSUS_INTERGENIC in strand_bias_list:
plotBarPlotsUsingDataframes(outputDir,
jobname,
numberofSimulations,
sbs_df,
isKeySample,
six_mutation_types,
signature_genic_versus_intergenic_df,
width,
GENIC_VERSUS_INTERGENIC,
genicVersusIntergenicStrands,
'cyan',
'gray',
'All Mutations',
'mutationtypes_genic_versus_intergenic_strand_bias',
plot_mode)
if LAGGING_VERSUS_LEADING in strand_bias_list:
plotBarPlotsUsingDataframes(outputDir,
jobname,
numberofSimulations,
sbs_df,
isKeySample,
six_mutation_types,
signature_lagging_versus_leading_df,
width,
LAGGING_VERSUS_LEADING,
replicationStrands,
'indianred',
'goldenrod',
'All Mutations',
'mutationtypes_replication_strand_bias',
plot_mode)
if (TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list) and (LAGGING_VERSUS_LEADING in strand_bias_list):
sbs_signatures = sbs_df['signature'].unique()
for sbs_signature in sbs_signatures:
plot_circle_bar_plots_together(outputDir,
jobname,
sbs_signature,
six_mutation_types,
signature2mutation_type2strand2percentagedict,
signature_genic_versus_intergenic_df,
signature_transcribed_versus_untranscribed_df,
signature_lagging_versus_leading_df,
genicVersusIntergenicStrands,
transcriptionStrands,
replicationStrands)
def plot_dbs_and_id_signatures_circle_figures(signature_type,
signatures,
strand_bias,
strandbias_figures_outputDir,
SIGNIFICANCE_LEVEL,
type2strand2percentagedict,
percentage_strings):
rows_signatures=[]
if strand_bias==LAGGING_VERSUS_LEADING:
strands=replicationStrands
elif strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED:
strands=transcriptionStrands
elif strand_bias==GENIC_VERSUS_INTERGENIC:
strands=genicVersusIntergenicStrands
for signature in signatures:
if signature in type2strand2percentagedict:
for strand in strands:
if strand in type2strand2percentagedict[signature]:
for percentage_string in percentage_strings:
if percentage_string in type2strand2percentagedict[signature][strand]:
print('signature:%s strand:%s percentage_string:%s' %(signature,strand,percentage_string))
if signature not in rows_signatures:
rows_signatures.append(signature)
rows_signatures=sorted(rows_signatures,key=natural_key,reverse=True)
if (len(rows_signatures)>0):
fig, ax = plt.subplots(figsize=(5+1.5*len(percentage_strings), 10+1.5*len(rows_signatures)))
ax.set_aspect(1.0)
for percentage_diff_index, percentage_string in enumerate(percentage_strings):
for row_signature_index, row_signature in enumerate(rows_signatures):
if (strand_bias==LAGGING_VERSUS_LEADING):
if row_signature in type2strand2percentagedict:
lagging_percentage=None
leading_percentage=None
if LAGGING in type2strand2percentagedict[row_signature] and type2strand2percentagedict[row_signature][LAGGING][percentage_string]==1:
lagging_percentage = 100
if LEADING in type2strand2percentagedict[row_signature] and type2strand2percentagedict[row_signature][LEADING][percentage_string]==1:
leading_percentage = 100
if (lagging_percentage is not None) and (leading_percentage is None):
radius = 0.49
if (radius > 0):
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius, color='indianred', fill=True)
ax.add_artist(circle)
elif (leading_percentage is not None) and (lagging_percentage is None):
radius = 0.49
if (radius > 0):
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius, color='goldenrod', fill=True)
ax.add_artist(circle)
elif (lagging_percentage is not None) and (leading_percentage is not None):
radius_lagging = 0.49
radius_leading = 0.49
if (radius_lagging>radius_leading):
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_lagging, color='goldenrod', fill=True)
ax.add_artist(circle)
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_leading, color='goldenrod', fill=True)
ax.add_artist(circle)
else:
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_leading, color='goldenrod', fill=True)
ax.add_artist(circle)
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_lagging, color='goldenrod', fill=True)
ax.add_artist(circle)
elif (strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED):
if row_signature in type2strand2percentagedict:
transcribed_percentage=None
untranscribed_percentage=None
if TRANSCRIBED_STRAND in type2strand2percentagedict[row_signature] and type2strand2percentagedict[row_signature][TRANSCRIBED_STRAND][percentage_string]==1:
transcribed_percentage = 100
if UNTRANSCRIBED_STRAND in type2strand2percentagedict[row_signature] and type2strand2percentagedict[row_signature][UNTRANSCRIBED_STRAND][percentage_string]==1:
untranscribed_percentage = 100
if (transcribed_percentage is not None) and (untranscribed_percentage is None):
radius = 0.49
if (radius > 0):
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius, color='royalblue', fill=True)
ax.add_artist(circle)
elif (untranscribed_percentage is not None) and (transcribed_percentage is None):
radius = 0.49
if (radius > 0):
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius, color='yellowgreen', fill=True)
ax.add_artist(circle)
elif (transcribed_percentage is not None) and (untranscribed_percentage is not None):
radius_transcribed = 0.49
radius_untranscribed = 0.49
if (radius_transcribed>radius_untranscribed):
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_transcribed, color='royalblue', fill=True)
ax.add_artist(circle)
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_untranscribed, color='yellowgreen', fill=True)
ax.add_artist(circle)
else:
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_untranscribed, color='yellowgreen', fill=True)
ax.add_artist(circle)
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_transcribed, color='royalblue', fill=True)
ax.add_artist(circle)
elif (strand_bias==GENIC_VERSUS_INTERGENIC):
if row_signature in type2strand2percentagedict:
genic_percentage=None
intergenic_percentage=None
if GENIC in type2strand2percentagedict[row_signature] and type2strand2percentagedict[row_signature][GENIC][percentage_string]==1:
genic_percentage = 100
if INTERGENIC in type2strand2percentagedict[row_signature] and type2strand2percentagedict[row_signature][INTERGENIC][percentage_string]==1:
intergenic_percentage = 100
if (genic_percentage is not None) and (intergenic_percentage is None):
radius = 0.49
if (radius > 0):
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius, color='cyan', fill=True)
ax.add_artist(circle)
elif (intergenic_percentage is not None) and (genic_percentage is None):
radius = 0.49
if (radius > 0):
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius, color='gray', fill=True)
ax.add_artist(circle)
elif (genic_percentage is not None) and (intergenic_percentage is not None):
radius_genic = 0.49
radius_intergenic = 0.49
if (radius_genic>radius_intergenic):
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_genic, color='cyan', fill=True)
ax.add_artist(circle)
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_intergenic, color='gray', fill=True)
ax.add_artist(circle)
else:
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_intergenic, color='gray', fill=True)
ax.add_artist(circle)
circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_genic, color='cyan', fill=True)
ax.add_artist(circle)
ax.set_xlim([0,len(percentage_strings)])
ax.set_xticklabels([])
ax.tick_params(axis='x', which='minor', length=0, labelsize=20)
ax.set_xticks(np.arange(0, len(percentage_strings), 1))
ax.set_xticks(np.arange(0, len(percentage_strings), 1)+0.5,minor=True)
ax.set_xticklabels(percentage_strings,minor=True)
if strand_bias==LAGGING_VERSUS_LEADING:
fig.suptitle('Lagging versus Leading Strand Bias', fontsize=30)
elif strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED:
fig.suptitle('Transcribed versus Untranscribed Strand Bias', fontsize=30)
elif strand_bias==GENIC_VERSUS_INTERGENIC:
fig.suptitle('Genic versus Intergenic Strand Bias', fontsize=30)
ax.xaxis.set_ticks_position('top')
plt.tick_params(
axis='x', which='major', bottom=False, top=False)
ax.set_ylim([0,len(rows_signatures)])
ax.set_yticklabels([])
ax.tick_params(axis='y', which='minor', length=0, labelsize=30)
ax.set_yticks(np.arange(0, len(rows_signatures), 1))
ax.set_yticks(np.arange(0, len(rows_signatures), 1)+0.5,minor=True)
ax.set_yticklabels(rows_signatures, minor=True)
plt.tick_params(
axis='y', which='major', left=False)
ax.grid(which='major', color='black')
filename = '%s_Signatures_%s_with_circles_%s.png' % (signature_type,strand_bias,str(SIGNIFICANCE_LEVEL).replace('.','_'))
figFile = os.path.join(strandbias_figures_outputDir, CIRCLE_PLOTS, filename)
fig.savefig(figFile)
fig.tight_layout()
plt.cla()
plt.close(fig)
def plot_legend(strandbias_figures_outputDir):
strand_biases=[TRANSCRIBED_VERSUS_UNTRANSCRIBED, GENIC_VERSUS_INTERGENIC, LAGGING_VERSUS_LEADING]
for strandbias in strand_biases:
fig = plt.figure(figsize=(4,1), dpi=300)
ax = plt.gca()
plt.axis('off')
if strandbias==TRANSCRIBED_VERSUS_UNTRANSCRIBED:
legend_elements = [
Line2D([0], [0], marker='o', color='white', label=TRANSCRIBED_STRAND, markerfacecolor='royalblue' ,markersize=20),
Line2D([0], [0], marker='o', color='white', label=UNTRANSCRIBED_STRAND, markerfacecolor='yellowgreen',markersize=20)]
elif strandbias == GENIC_VERSUS_INTERGENIC:
legend_elements = [
Line2D([0], [0], marker='o', color='white', label=GENIC, markerfacecolor='cyan',markersize=20),
Line2D([0], [0], marker='o', color='white', label=INTERGENIC, markerfacecolor='gray',markersize=20)]
elif (strandbias==LAGGING_VERSUS_LEADING):
legend_elements = [
Line2D([0], [0], marker='o', color='white', label=LAGGING, markerfacecolor='indianred', markersize=20),
Line2D([0], [0], marker='o', color='white', label=LEADING, markerfacecolor='goldenrod', markersize=20)]
ax.legend(handles=legend_elements, bbox_to_anchor=(0, 0.5), loc='center left' ,fontsize = 20)
filename = 'Legend_%s.png' % (strandbias)
figFile = os.path.join(strandbias_figures_outputDir, CIRCLE_PLOTS, filename)
fig.savefig(figFile)
fig.tight_layout()
plt.cla()
plt.close(fig)
def plot_six_mutations_sbs_signatures_circle_figures(sbs_signatures,
strand_bias,
strandbias_figures_outputDir,
significance_level,
signature2mutation_type2strand2percentagedict,
percentage_strings):
mutation_types=six_mutation_types
if strand_bias==LAGGING_VERSUS_LEADING:
strands=replicationStrands
elif strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED:
strands=transcriptionStrands
elif strand_bias==GENIC_VERSUS_INTERGENIC:
strands=genicVersusIntergenicStrands
rows_sbs_signatures=[]
for signature in sbs_signatures:
if signature in signature2mutation_type2strand2percentagedict:
for mutation_type in signature2mutation_type2strand2percentagedict[signature]:
for strand in strands:
if strand in signature2mutation_type2strand2percentagedict[signature][mutation_type]:
for percentage_string in percentage_strings:
if (percentage_string in signature2mutation_type2strand2percentagedict[signature][mutation_type][strand]) and (signature2mutation_type2strand2percentagedict[signature][mutation_type][strand][percentage_string]==1):
if signature not in rows_sbs_signatures:
rows_sbs_signatures.append(signature)
rows_sbs_signatures=sorted(rows_sbs_signatures,key=natural_key,reverse=True)
xticklabels_list = percentage_strings * len(mutation_types)
if (len(rows_sbs_signatures)>0):
plot1, panel1 = plt.subplots(figsize=(5+1.5*len(xticklabels_list), 10+1.5*len(rows_sbs_signatures)))
plt.rc('axes', edgecolor='lightgray')
panel1.set_aspect(1.0)
if strand_bias==LAGGING_VERSUS_LEADING:
title='Lagging versus Leading Strand Bias'
elif strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED:
title='Transcribed versus Untranscribed Strand Bias'
elif strand_bias==GENIC_VERSUS_INTERGENIC:
title='Genic versus Intergenic Strand Bias'
panel1.text(len(percentage_strings)*3, len(rows_sbs_signatures)+2.5, title, horizontalalignment='center', fontsize=60, fontweight='bold', fontname='Arial')
colors = [[3 / 256, 189 / 256, 239 / 256],
[1 / 256, 1 / 256, 1 / 256],
[228 / 256, 41 / 256, 38 / 256],
[203 / 256, 202 / 256, 202 / 256],
[162 / 256, 207 / 256, 99 / 256],
[236 / 256, 199 / 256, 197 / 256]]
x = 0
for i in range(0, len(mutation_types), 1):
panel1.text((x+(len(percentage_strings)/2)-0.75), len(rows_sbs_signatures)+1.5, mutation_types[i], fontsize=55, fontweight='bold', fontname='Arial')
panel1.add_patch(plt.Rectangle((x+.0415, len(rows_sbs_signatures)+0.75), len(percentage_strings)-(2*.0415), .5, facecolor=colors[i], clip_on=False))
panel1.add_patch(plt.Rectangle((x, 0), len(percentage_strings), len(rows_sbs_signatures), facecolor=colors[i], zorder=0, alpha=0.25,edgecolor='grey'))
x += len(percentage_strings)
panel1.set_xlim([0,len(mutation_types)*len(percentage_strings)])
panel1.set_xticklabels([])
panel1.tick_params(axis='x', which='minor', length=0, labelsize=35)
panel1.set_xticks(np.arange(0, len(mutation_types)*len(percentage_strings), 1))
panel1.set_xticks(np.arange(0, len(mutation_types)*len(percentage_strings), 1)+0.5,minor=True)
panel1.set_xticklabels(xticklabels_list,minor=True)
panel1.xaxis.set_label_position('top')
panel1.xaxis.set_ticks_position('top')
plt.tick_params(
axis='x', which='major', bottom=False, top=False)
panel1.set_ylim([0,len(rows_sbs_signatures)])
panel1.set_yticklabels([])
panel1.tick_params(axis='y', which='minor', length=0, labelsize=40)
panel1.set_yticks(np.arange(0, len(rows_sbs_signatures), 1))
panel1.set_yticks(np.arange(0, len(rows_sbs_signatures), 1)+0.5,minor=True)
panel1.set_yticklabels(rows_sbs_signatures, minor=True)
plt.tick_params(
axis='y', which='major', left=False)
panel1.grid(which='major', color='black', zorder=3)
if strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED:
legend_elements = [
Line2D([0], [0], marker='o', color='white', label=TRANSCRIBED_STRAND, markerfacecolor='royalblue' ,markersize=40),
Line2D([0], [0], marker='o', color='white', label=UNTRANSCRIBED_STRAND, markerfacecolor='yellowgreen',markersize=40)]
elif strand_bias == GENIC_VERSUS_INTERGENIC:
legend_elements = [
Line2D([0], [0], marker='o', color='white', label=GENIC, markerfacecolor='cyan',markersize=40),
Line2D([0], [0], marker='o', color='white', label=INTERGENIC, markerfacecolor='gray',markersize=40)]
elif (strand_bias==LAGGING_VERSUS_LEADING):
legend_elements = [
Line2D([0], [0], marker='o', color='white', label=LAGGING, markerfacecolor='indianred', markersize=40),
Line2D([0], [0], marker='o', color='white', label=LEADING, markerfacecolor='goldenrod', markersize=40)]
panel1.legend(handles=legend_elements,ncol=len(legend_elements), bbox_to_anchor=(1, -0.1),loc='upper right', fontsize=40)
for percentage_diff_index, percentage_string in enumerate(percentage_strings):
for mutation_type_index, mutation_type in enumerate(mutation_types):
for row_sbs_signature_index, row_sbs_signature in enumerate(rows_sbs_signatures):
if (strand_bias==LAGGING_VERSUS_LEADING):
if row_sbs_signature in signature2mutation_type2strand2percentagedict:
if mutation_type in signature2mutation_type2strand2percentagedict[row_sbs_signature]:
lagging_percentage = None
leading_percentage = None
if (LAGGING in signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type]) and (signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type][LAGGING][percentage_string]==1):
lagging_percentage = 100
if (LEADING in signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type]) and (signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type][LEADING][percentage_string]==1):
leading_percentage = 100
if (lagging_percentage is not None) and (leading_percentage is None):
radius = 0.49
if (radius > 0):
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5, row_sbs_signature_index + 0.5),radius, color='indianred', fill=True))
elif (leading_percentage is not None) and (lagging_percentage is None):
radius = 0.49
if (radius > 0):
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5, row_sbs_signature_index + 0.5),radius, color='goldenrod', fill=True))
elif (lagging_percentage is not None) and (leading_percentage is not None):
radius_lagging = 0.49
radius_leading = 0.49
if (radius_lagging > radius_leading):
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5, row_sbs_signature_index + 0.5),radius_lagging, color='indianred', fill=True))
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5, row_sbs_signature_index + 0.5),radius_leading, color='goldenrod', fill=True))
else:
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5, row_sbs_signature_index + 0.5),radius_leading, color='goldenrod', fill=True))
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5, row_sbs_signature_index + 0.5),radius_lagging, color='indianred', fill=True))
elif (strand_bias == GENIC_VERSUS_INTERGENIC):
if row_sbs_signature in signature2mutation_type2strand2percentagedict:
if mutation_type in signature2mutation_type2strand2percentagedict[row_sbs_signature]:
genic_percentage = None
intergenic_percentage = None
if (GENIC in signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type]) and (signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type][GENIC][percentage_string]==1):
genic_percentage = 100
if (INTERGENIC in signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type]) and (signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type][INTERGENIC][percentage_string]==1):
intergenic_percentage = 100
if (genic_percentage is not None) and (intergenic_percentage is None):
radius = 0.49
if (radius > 0):
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius, color='cyan',fill=True))
elif (intergenic_percentage is not None) and (genic_percentage is None):
radius = 0.49
if (radius > 0):
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius, color='gray',fill=True))
elif (genic_percentage is not None) and (intergenic_percentage is not None):
radius_genic = 0.49
radius_intergenic = 0.49
if (radius_genic > radius_intergenic):
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_genic,color='cyan', fill=True))
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_intergenic,color='gray', fill=True))
else:
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_intergenic, color='gray', fill=True))
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_genic,color='cyan', fill=True))
elif (strand_bias == TRANSCRIBED_VERSUS_UNTRANSCRIBED):
if row_sbs_signature in signature2mutation_type2strand2percentagedict:
if mutation_type in signature2mutation_type2strand2percentagedict[row_sbs_signature]:
transcribed_percentage = None
untranscribed_percentage = None
if (TRANSCRIBED_STRAND in signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type]) and (signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type][TRANSCRIBED_STRAND][percentage_string]==1):
transcribed_percentage = 100
if (UNTRANSCRIBED_STRAND in signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type]) and (signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type][UNTRANSCRIBED_STRAND][percentage_string]==1):
untranscribed_percentage = 100
if (transcribed_percentage is not None) and (untranscribed_percentage is None):
radius = 0.49
if (radius > 0):
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius, color='royalblue',fill=True))
elif (untranscribed_percentage is not None) and (transcribed_percentage is None):
radius = 0.49
if (radius > 0):
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius, color='yellowgreen',fill=True))
elif (transcribed_percentage is not None) and (untranscribed_percentage is not None):
radius_transcribed = 0.49
radius_untranscribed = 0.49
if (radius_transcribed > radius_untranscribed):
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_transcribed,color='royalblue', fill=True))
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_untranscribed,color='yellowgreen', fill=True))
else:
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_untranscribed,color='yellowgreen', fill=True))
panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_transcribed,color='royalblue', fill=True))
filename = 'SBS_Signatures_%s_with_circle_plot_%s.png' % (strand_bias,str(significance_level).replace('.','_'))
figFile = os.path.join(strandbias_figures_outputDir,CIRCLE_PLOTS, filename)
plot1.savefig(figFile,bbox_inches='tight')
plot1.tight_layout()
plt.cla()
plt.close(plot1)
| true | true |
1c4a0071eef9fbd124ada34ed39dfb7abd9d10cb | 5,376 | py | Python | models/face_parsing/modules/bn.py | soumik12345/Barbershop | 971be31afca55499287e97a7034a59a66b871ba8 | [
"MIT"
] | null | null | null | models/face_parsing/modules/bn.py | soumik12345/Barbershop | 971be31afca55499287e97a7034a59a66b871ba8 | [
"MIT"
] | 2 | 2022-03-30T17:49:03.000Z | 2022-03-30T19:20:28.000Z | models/face_parsing/modules/bn.py | soumik12345/Barbershop | 971be31afca55499287e97a7034a59a66b871ba8 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as functional
try:
from queue import Queue
except ImportError:
from Queue import Queue
from .functions import *
class ABN(nn.Module):
"""Activated Batch Normalization
This gathers a `BatchNorm2d` and an activation function in a single module
"""
def __init__(
self,
num_features,
eps=1e-5,
momentum=0.1,
affine=True,
activation="leaky_relu",
slope=0.01,
):
"""Creates an Activated Batch Normalization module
Parameters
----------
num_features : int
Number of feature channels in the input and output.
eps : float
Small constant to prevent numerical issues.
momentum : float
Momentum factor applied to compute running statistics as.
affine : bool
If `True` apply learned scale and shift transformation after normalization.
activation : str
Name of the activation functions, one of: `leaky_relu`, `elu` or `none`.
slope : float
Negative slope for the `leaky_relu` activation.
"""
super(ABN, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
self.momentum = momentum
self.activation = activation
self.slope = slope
if self.affine:
self.weight = nn.Parameter(torch.ones(num_features))
self.bias = nn.Parameter(torch.zeros(num_features))
else:
self.register_parameter("weight", None)
self.register_parameter("bias", None)
self.register_buffer("running_mean", torch.zeros(num_features))
self.register_buffer("running_var", torch.ones(num_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.running_mean, 0)
nn.init.constant_(self.running_var, 1)
if self.affine:
nn.init.constant_(self.weight, 1)
nn.init.constant_(self.bias, 0)
def forward(self, x):
x = functional.batch_norm(
x,
self.running_mean,
self.running_var,
self.weight,
self.bias,
self.training,
self.momentum,
self.eps,
)
if self.activation == ACT_RELU:
return functional.relu(x, inplace=True)
elif self.activation == ACT_LEAKY_RELU:
return functional.leaky_relu(x, negative_slope=self.slope, inplace=True)
elif self.activation == ACT_ELU:
return functional.elu(x, inplace=True)
else:
return x
def __repr__(self):
rep = (
"{name}({num_features}, eps={eps}, momentum={momentum},"
" affine={affine}, activation={activation}"
)
if self.activation == "leaky_relu":
rep += ", slope={slope})"
else:
rep += ")"
return rep.format(name=self.__class__.__name__, **self.__dict__)
class InPlaceABN(ABN):
"""InPlace Activated Batch Normalization"""
def __init__(
self,
num_features,
eps=1e-5,
momentum=0.1,
affine=True,
activation="leaky_relu",
slope=0.01,
):
"""Creates an InPlace Activated Batch Normalization module
Parameters
----------
num_features : int
Number of feature channels in the input and output.
eps : float
Small constant to prevent numerical issues.
momentum : float
Momentum factor applied to compute running statistics as.
affine : bool
If `True` apply learned scale and shift transformation after normalization.
activation : str
Name of the activation functions, one of: `leaky_relu`, `elu` or `none`.
slope : float
Negative slope for the `leaky_relu` activation.
"""
super(InPlaceABN, self).__init__(
num_features, eps, momentum, affine, activation, slope
)
def forward(self, x):
return inplace_abn(
x,
self.weight,
self.bias,
self.running_mean,
self.running_var,
self.training,
self.momentum,
self.eps,
self.activation,
self.slope,
)
class InPlaceABNSync(ABN):
"""InPlace Activated Batch Normalization with cross-GPU synchronization
This assumes that it will be replicated across GPUs using the same mechanism as in `nn.DistributedDataParallel`.
"""
def forward(self, x):
return inplace_abn_sync(
x,
self.weight,
self.bias,
self.running_mean,
self.running_var,
self.training,
self.momentum,
self.eps,
self.activation,
self.slope,
)
def __repr__(self):
rep = (
"{name}({num_features}, eps={eps}, momentum={momentum},"
" affine={affine}, activation={activation}"
)
if self.activation == "leaky_relu":
rep += ", slope={slope})"
else:
rep += ")"
return rep.format(name=self.__class__.__name__, **self.__dict__)
| 30.03352 | 116 | 0.570499 | import torch
import torch.nn as nn
import torch.nn.functional as functional
try:
from queue import Queue
except ImportError:
from Queue import Queue
from .functions import *
class ABN(nn.Module):
def __init__(
self,
num_features,
eps=1e-5,
momentum=0.1,
affine=True,
activation="leaky_relu",
slope=0.01,
):
super(ABN, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
self.momentum = momentum
self.activation = activation
self.slope = slope
if self.affine:
self.weight = nn.Parameter(torch.ones(num_features))
self.bias = nn.Parameter(torch.zeros(num_features))
else:
self.register_parameter("weight", None)
self.register_parameter("bias", None)
self.register_buffer("running_mean", torch.zeros(num_features))
self.register_buffer("running_var", torch.ones(num_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.running_mean, 0)
nn.init.constant_(self.running_var, 1)
if self.affine:
nn.init.constant_(self.weight, 1)
nn.init.constant_(self.bias, 0)
def forward(self, x):
x = functional.batch_norm(
x,
self.running_mean,
self.running_var,
self.weight,
self.bias,
self.training,
self.momentum,
self.eps,
)
if self.activation == ACT_RELU:
return functional.relu(x, inplace=True)
elif self.activation == ACT_LEAKY_RELU:
return functional.leaky_relu(x, negative_slope=self.slope, inplace=True)
elif self.activation == ACT_ELU:
return functional.elu(x, inplace=True)
else:
return x
def __repr__(self):
rep = (
"{name}({num_features}, eps={eps}, momentum={momentum},"
" affine={affine}, activation={activation}"
)
if self.activation == "leaky_relu":
rep += ", slope={slope})"
else:
rep += ")"
return rep.format(name=self.__class__.__name__, **self.__dict__)
class InPlaceABN(ABN):
def __init__(
self,
num_features,
eps=1e-5,
momentum=0.1,
affine=True,
activation="leaky_relu",
slope=0.01,
):
super(InPlaceABN, self).__init__(
num_features, eps, momentum, affine, activation, slope
)
def forward(self, x):
return inplace_abn(
x,
self.weight,
self.bias,
self.running_mean,
self.running_var,
self.training,
self.momentum,
self.eps,
self.activation,
self.slope,
)
class InPlaceABNSync(ABN):
def forward(self, x):
return inplace_abn_sync(
x,
self.weight,
self.bias,
self.running_mean,
self.running_var,
self.training,
self.momentum,
self.eps,
self.activation,
self.slope,
)
def __repr__(self):
rep = (
"{name}({num_features}, eps={eps}, momentum={momentum},"
" affine={affine}, activation={activation}"
)
if self.activation == "leaky_relu":
rep += ", slope={slope})"
else:
rep += ")"
return rep.format(name=self.__class__.__name__, **self.__dict__)
| true | true |
1c4a00a6ef4dd2fe7cdd0c9ca505b8fd1ccc0977 | 14,574 | py | Python | QEBATangentAttack/utils.py | machanic/TangentAttack | 17c1a8e93f9bbd03e209e8650631af744a0ff6b8 | [
"Apache-2.0"
] | 4 | 2021-11-12T04:06:32.000Z | 2022-01-27T09:01:41.000Z | QEBATangentAttack/utils.py | machanic/TangentAttack | 17c1a8e93f9bbd03e209e8650631af744a0ff6b8 | [
"Apache-2.0"
] | 1 | 2022-02-22T14:00:59.000Z | 2022-02-25T08:57:29.000Z | QEBATangentAttack/utils.py | machanic/TangentAttack | 17c1a8e93f9bbd03e209e8650631af744a0ff6b8 | [
"Apache-2.0"
] | null | null | null | """
Provides classes to measure the distance between inputs.
Distances
---------
.. autosummary::
:nosignatures:
MeanSquaredDistance
MeanAbsoluteDistance
Linfinity
L0
Aliases
-------
.. autosummary::
:nosignatures:
MSE
MAE
Linf
Base class
----------
To implement a new distance, simply subclass the :class:`Distance` class and
implement the :meth:`_calculate` method.
.. autosummary::
:nosignatures:
Distance
"""
from __future__ import division
import sys
import abc
import torch
abstractmethod = abc.abstractmethod
if sys.version_info >= (3, 4):
ABC = abc.ABC
else: # pragma: no cover
ABC = abc.ABCMeta('ABC', (), {})
import functools
from numbers import Number
from torch.nn import functional as F
import numpy as np
@functools.total_ordering
class Distance(ABC):
"""Base class for distances.
This class should be subclassed when implementing
new distances. Subclasses must implement _calculate.
"""
def __init__(
self,
reference=None,
other=None,
bounds=None,
value=None):
if value is not None:
# alternative constructor
assert isinstance(value, Number)
assert reference is None
assert other is None
assert bounds is None
self.reference = None
self.other = None
self._bounds = None
self._value = value
self._gradient = None
else:
# standard constructor
self.reference = reference
self.other = other
self._bounds = bounds
self._value, self._gradient = self._calculate()
assert self._value is not None
@property
def value(self):
return self._value
@property
def gradient(self):
return self._gradient
@abstractmethod
def _calculate(self):
"""Returns distance and gradient of distance w.r.t. to self.other"""
raise NotImplementedError
def name(self):
return self.__class__.__name__
def __str__(self):
return '{} = {:.6e}'.format(self.name(), self._value)
def __repr__(self):
return self.__str__()
def __eq__(self, other):
if other.__class__ != self.__class__:
raise TypeError('Comparisons are only possible between the same distance types.')
return self.value == other.value
def __lt__(self, other):
if other.__class__ != self.__class__:
raise TypeError('Comparisons are only possible between the same distance types.')
return self.value < other.value
class MeanSquaredDistance(Distance):
"""Calculates the mean squared error between two inputs.
"""
def _calculate(self):
min_, max_ = self._bounds
n = self.reference.numel()
f = n * (max_ - min_)**2
diff = self.other - self.reference
value = torch.dot(diff.view(-1), diff.view(-1)).item() / f
# calculate the gradient only when needed
self._g_diff = diff
self._g_f = f
gradient = None
return value, gradient
@property
def gradient(self):
if self._gradient is None:
self._gradient = self._g_diff / (self._g_f / 2)
return self._gradient
def __str__(self):
return 'normalized MSE = {:.2e}'.format(self._value)
MSE = MeanSquaredDistance
class MeanAbsoluteDistance(Distance):
"""Calculates the mean absolute error between two inputs.
"""
def _calculate(self):
min_, max_ = self._bounds
diff = (self.other - self.reference) / (max_ - min_)
value = torch.mean(torch.abs(diff)).type(torch.float64)
n = self.reference.size
gradient = 1 / n * torch.sign(diff) / (max_ - min_)
return value, gradient
def __str__(self):
return 'normalized MAE = {:.2e}'.format(self._value)
MAE = MeanAbsoluteDistance
class Linfinity(Distance):
"""Calculates the L-infinity norm of the difference between two inputs.
"""
def _calculate(self):
min_, max_ = self._bounds
diff = (self.other - self.reference) / (max_ - min_)
value = torch.max(torch.abs(diff)).type(torch.float64)
gradient = None
return value, gradient
@property
def gradient(self):
raise NotImplementedError
def __str__(self):
return 'normalized Linf distance = {:.2e}'.format(self._value)
Linf = Linfinity
class L0(Distance):
"""Calculates the L0 norm of the difference between two inputs.
"""
def _calculate(self):
diff = self.other - self.reference
value = torch.sum(diff != 0)
gradient = None
return value, gradient
@property
def gradient(self):
raise NotImplementedError
def __str__(self):
return 'L0 distance = {}'.format(self._value)
"""
Provides classes that define what is adversarial.
Criteria
--------
We provide criteria for untargeted and targeted adversarial attacks.
.. autosummary::
:nosignatures:
Misclassification
TopKMisclassification
OriginalClassProbability
ConfidentMisclassification
.. autosummary::
:nosignatures:
TargetClass
TargetClassProbability
Examples
--------
Untargeted criteria:
>>> from foolbox.criteria import Misclassification
>>> criterion1 = Misclassification()
>>> from foolbox.criteria import TopKMisclassification
>>> criterion2 = TopKMisclassification(k=5)
Targeted criteria:
>>> from foolbox.criteria import TargetClass
>>> criterion3 = TargetClass(22)
>>> from foolbox.criteria import TargetClassProbability
>>> criterion4 = TargetClassProbability(22, p=0.99)
Criteria can be combined to create a new criterion:
>>> criterion5 = criterion2 & criterion3
"""
class Criterion(ABC):
"""Base class for criteria that define what is adversarial.
The :class:`Criterion` class represents a criterion used to
determine if predictions for an image are adversarial given
a reference label. It should be subclassed when implementing
new criteria. Subclasses must implement is_adversarial.
"""
def name(self):
"""Returns a human readable name that uniquely identifies
the criterion with its hyperparameters.
Returns
-------
str
Human readable name that uniquely identifies the criterion
with its hyperparameters.
Notes
-----
Defaults to the class name but subclasses can provide more
descriptive names and must take hyperparameters into account.
"""
return self.__class__.__name__
@abstractmethod
def is_adversarial(self, predictions, label):
"""Decides if predictions for an image are adversarial given
a reference label.
Parameters
----------
predictions : :class:`numpy.ndarray`
A vector with the pre-softmax predictions for some image.
label : int
The label of the unperturbed reference image.
Returns
-------
bool
True if an image with the given predictions is an adversarial
example when the ground-truth class is given by label, False
otherwise.
"""
raise NotImplementedError
def __and__(self, other):
return CombinedCriteria(self, other)
class CombinedCriteria(Criterion):
"""Meta criterion that combines several criteria into a new one.
Considers inputs as adversarial that are considered adversarial
by all sub-criteria that are combined by this criterion.
Instead of using this class directly, it is possible to combine
criteria like this: criteria1 & criteria2
Parameters
----------
*criteria : variable length list of :class:`Criterion` instances
List of sub-criteria that will be combined.
Notes
-----
This class uses lazy evaluation of the criteria in the order they
are passed to the constructor.
"""
def __init__(self, *criteria):
super(CombinedCriteria, self).__init__()
self._criteria = criteria
def name(self):
"""Concatenates the names of the given criteria in alphabetical order.
If a sub-criterion is itself a combined criterion, its name is
first split into the individual names and the names of the
sub-sub criteria is used instead of the name of the sub-criterion.
This is done recursively to ensure that the order and the hierarchy
of the criteria does not influence the name.
Returns
-------
str
The alphabetically sorted names of the sub-criteria concatenated
using double underscores between them.
"""
names = (criterion.name() for criterion in self._criteria)
return '__'.join(sorted(names))
def is_adversarial(self, predictions, label):
for criterion in self._criteria:
if not criterion.is_adversarial(predictions, label):
# lazy evaluation
return False
return True
class Misclassification(Criterion):
"""Defines adversarials as inputs for which the predicted class
is not the original class.
See Also
--------
:class:`TopKMisclassification`
Notes
-----
Uses `numpy.argmax` to break ties.
"""
def name(self):
return 'Top1Misclassification'
def is_adversarial(self, predictions, label):
top1 = torch.argmax(predictions).item()
return top1 != label
class ConfidentMisclassification(Criterion):
"""Defines adversarials as inputs for which the probability
of any class other than the original is above a given threshold.
Parameters
----------
p : float
The threshold probability. If the probability of any class
other than the original is at least p, the image is
considered an adversarial. It must satisfy 0 <= p <= 1.
"""
def __init__(self, p):
super(ConfidentMisclassification, self).__init__()
assert 0 <= p <= 1
self.p = p
def name(self):
return '{}-{:.04f}'.format(self.__class__.__name__, self.p)
def is_adversarial(self, predictions, label):
top1 = torch.argmax(predictions)
probabilities = F.softmax(predictions)
return (torch.max(probabilities) >= self.p) and (top1 != label)
class TopKMisclassification(Criterion):
"""Defines adversarials as inputs for which the original class is
not one of the top k predicted classes.
For k = 1, the :class:`Misclassification` class provides a more
efficient implementation.
Parameters
----------
k : int
Number of top predictions to which the reference label is
compared to.
See Also
--------
:class:`Misclassification` : Provides a more effcient implementation
for k = 1.
Notes
-----
Uses `numpy.argsort` to break ties.
"""
def __init__(self, k):
super(TopKMisclassification, self).__init__()
self.k = k
def name(self):
return 'Top{}Misclassification'.format(self.k)
def is_adversarial(self, predictions, label):
topk = torch.argsort(predictions)[-self.k:]
return label not in topk
class TargetClass(Criterion):
"""Defines adversarials as inputs for which the predicted class
is the given target class.
Parameters
----------
target_class : int
The target class that needs to be predicted for an image
to be considered an adversarial.
Notes
-----
Uses `numpy.argmax` to break ties.
"""
def __init__(self, target_class=None):
super(TargetClass, self).__init__()
self._target_class = target_class
def target_class(self):
return self._target_class
def name(self):
return '{}-{}'.format(self.__class__.__name__, self.target_class())
def is_adversarial(self, predictions, label=None):
top1 = torch.argmax(predictions,dim=-1).item()
return top1 == self.target_class() # target class 其实是true label
class OriginalClassProbability(Criterion):
"""Defines adversarials as inputs for which the probability
of the original class is below a given threshold.
This criterion alone does not guarantee that the class
predicted for the adversarial image is not the original class
(unless p < 1 / number of classes). Therefore, it should usually
be combined with a classifcation criterion.
Parameters
----------
p : float
The threshold probability. If the probability of the
original class is below this threshold, the image is
considered an adversarial. It must satisfy 0 <= p <= 1.
"""
def __init__(self, p):
super(OriginalClassProbability, self).__init__()
assert 0 <= p <= 1
self.p = p
def name(self):
return '{}-{:.04f}'.format(self.__class__.__name__, self.p)
def is_adversarial(self, predictions, label):
probabilities = F.softmax(predictions)
return probabilities[label] < self.p
class TargetClassProbability(Criterion):
"""Defines adversarials as inputs for which the probability
of a given target class is above a given threshold.
If the threshold is below 0.5, this criterion does not guarantee
that the class predicted for the adversarial image is not the
original class. In that case, it should usually be combined with
a classification criterion.
Parameters
----------
target_class : int
The target class for which the predicted probability must
be above the threshold probability p, otherwise the image
is not considered an adversarial.
p : float
The threshold probability. If the probability of the
target class is above this threshold, the image is
considered an adversarial. It must satisfy 0 <= p <= 1.
"""
def __init__(self, target_class, p):
super(TargetClassProbability, self).__init__()
self._target_class = target_class
assert 0 <= p <= 1
self.p = p
def target_class(self):
return self._target_class
def name(self):
return '{}-{}-{:.04f}'.format(
self.__class__.__name__, self.target_class(), self.p)
def is_adversarial(self, predictions, label):
probabilities = softmax(predictions)
return probabilities[self.target_class()] > self.p
| 25.97861 | 93 | 0.645327 | from __future__ import division
import sys
import abc
import torch
abstractmethod = abc.abstractmethod
if sys.version_info >= (3, 4):
ABC = abc.ABC
else: ABC = abc.ABCMeta('ABC', (), {})
import functools
from numbers import Number
from torch.nn import functional as F
import numpy as np
@functools.total_ordering
class Distance(ABC):
def __init__(
self,
reference=None,
other=None,
bounds=None,
value=None):
if value is not None:
assert isinstance(value, Number)
assert reference is None
assert other is None
assert bounds is None
self.reference = None
self.other = None
self._bounds = None
self._value = value
self._gradient = None
else:
self.reference = reference
self.other = other
self._bounds = bounds
self._value, self._gradient = self._calculate()
assert self._value is not None
@property
def value(self):
return self._value
@property
def gradient(self):
return self._gradient
@abstractmethod
def _calculate(self):
raise NotImplementedError
def name(self):
return self.__class__.__name__
def __str__(self):
return '{} = {:.6e}'.format(self.name(), self._value)
def __repr__(self):
return self.__str__()
def __eq__(self, other):
if other.__class__ != self.__class__:
raise TypeError('Comparisons are only possible between the same distance types.')
return self.value == other.value
def __lt__(self, other):
if other.__class__ != self.__class__:
raise TypeError('Comparisons are only possible between the same distance types.')
return self.value < other.value
class MeanSquaredDistance(Distance):
def _calculate(self):
min_, max_ = self._bounds
n = self.reference.numel()
f = n * (max_ - min_)**2
diff = self.other - self.reference
value = torch.dot(diff.view(-1), diff.view(-1)).item() / f
self._g_diff = diff
self._g_f = f
gradient = None
return value, gradient
@property
def gradient(self):
if self._gradient is None:
self._gradient = self._g_diff / (self._g_f / 2)
return self._gradient
def __str__(self):
return 'normalized MSE = {:.2e}'.format(self._value)
MSE = MeanSquaredDistance
class MeanAbsoluteDistance(Distance):
def _calculate(self):
min_, max_ = self._bounds
diff = (self.other - self.reference) / (max_ - min_)
value = torch.mean(torch.abs(diff)).type(torch.float64)
n = self.reference.size
gradient = 1 / n * torch.sign(diff) / (max_ - min_)
return value, gradient
def __str__(self):
return 'normalized MAE = {:.2e}'.format(self._value)
MAE = MeanAbsoluteDistance
class Linfinity(Distance):
def _calculate(self):
min_, max_ = self._bounds
diff = (self.other - self.reference) / (max_ - min_)
value = torch.max(torch.abs(diff)).type(torch.float64)
gradient = None
return value, gradient
@property
def gradient(self):
raise NotImplementedError
def __str__(self):
return 'normalized Linf distance = {:.2e}'.format(self._value)
Linf = Linfinity
class L0(Distance):
def _calculate(self):
diff = self.other - self.reference
value = torch.sum(diff != 0)
gradient = None
return value, gradient
@property
def gradient(self):
raise NotImplementedError
def __str__(self):
return 'L0 distance = {}'.format(self._value)
class Criterion(ABC):
def name(self):
return self.__class__.__name__
@abstractmethod
def is_adversarial(self, predictions, label):
raise NotImplementedError
def __and__(self, other):
return CombinedCriteria(self, other)
class CombinedCriteria(Criterion):
def __init__(self, *criteria):
super(CombinedCriteria, self).__init__()
self._criteria = criteria
def name(self):
names = (criterion.name() for criterion in self._criteria)
return '__'.join(sorted(names))
def is_adversarial(self, predictions, label):
for criterion in self._criteria:
if not criterion.is_adversarial(predictions, label):
return False
return True
class Misclassification(Criterion):
def name(self):
return 'Top1Misclassification'
def is_adversarial(self, predictions, label):
top1 = torch.argmax(predictions).item()
return top1 != label
class ConfidentMisclassification(Criterion):
def __init__(self, p):
super(ConfidentMisclassification, self).__init__()
assert 0 <= p <= 1
self.p = p
def name(self):
return '{}-{:.04f}'.format(self.__class__.__name__, self.p)
def is_adversarial(self, predictions, label):
top1 = torch.argmax(predictions)
probabilities = F.softmax(predictions)
return (torch.max(probabilities) >= self.p) and (top1 != label)
class TopKMisclassification(Criterion):
def __init__(self, k):
super(TopKMisclassification, self).__init__()
self.k = k
def name(self):
return 'Top{}Misclassification'.format(self.k)
def is_adversarial(self, predictions, label):
topk = torch.argsort(predictions)[-self.k:]
return label not in topk
class TargetClass(Criterion):
def __init__(self, target_class=None):
super(TargetClass, self).__init__()
self._target_class = target_class
def target_class(self):
return self._target_class
def name(self):
return '{}-{}'.format(self.__class__.__name__, self.target_class())
def is_adversarial(self, predictions, label=None):
top1 = torch.argmax(predictions,dim=-1).item()
return top1 == self.target_class()
class OriginalClassProbability(Criterion):
def __init__(self, p):
super(OriginalClassProbability, self).__init__()
assert 0 <= p <= 1
self.p = p
def name(self):
return '{}-{:.04f}'.format(self.__class__.__name__, self.p)
def is_adversarial(self, predictions, label):
probabilities = F.softmax(predictions)
return probabilities[label] < self.p
class TargetClassProbability(Criterion):
def __init__(self, target_class, p):
super(TargetClassProbability, self).__init__()
self._target_class = target_class
assert 0 <= p <= 1
self.p = p
def target_class(self):
return self._target_class
def name(self):
return '{}-{}-{:.04f}'.format(
self.__class__.__name__, self.target_class(), self.p)
def is_adversarial(self, predictions, label):
probabilities = softmax(predictions)
return probabilities[self.target_class()] > self.p
| true | true |
1c4a01f57bcc1a7f20369f01c8316e7174a4aa93 | 2,789 | py | Python | src/data_upload/batch.py | yourtrading-ai/py_yourtrading_ai | b69424f2afc40fe258c7ddae2fb47acc383ecbe5 | [
"MIT"
] | null | null | null | src/data_upload/batch.py | yourtrading-ai/py_yourtrading_ai | b69424f2afc40fe258c7ddae2fb47acc383ecbe5 | [
"MIT"
] | null | null | null | src/data_upload/batch.py | yourtrading-ai/py_yourtrading_ai | b69424f2afc40fe258c7ddae2fb47acc383ecbe5 | [
"MIT"
] | null | null | null | import asyncio
import io
import ssl
import aiohttp
import aleph_client.asynchronous
import certifi
import pandas as pd
from data_upload.data_utils import clean_time_duplicates
def get_download_url(symbol, interval="hourly"):
if interval == "daily":
interval = "d"
elif interval == "hourly":
interval = "1h"
elif interval == "minutely":
interval = "minute"
return f"https://www.cryptodatadownload.com/cdd/Binance_{symbol}USDT_{interval}.csv"
# Code for all async
# responses = asyncio.get_event_loop().run_until_complete(post_all_to_aleph_async(currencies))
# hashes = [resp['item_hash'] for resp in responses]
async def post_to_aleph_async(account, client, symbol, interval="hourly"):
url = get_download_url(symbol, interval)
sslcontext = ssl.create_default_context(cafile=certifi.where())
async with client.get(url, ssl=sslcontext) as response:
with io.StringIO(await response.text()) as text_io:
df = pd.read_csv(text_io, header=1)
clean_time_duplicates(df)
print(df.describe())
return await aleph_client.asynchronous.create_post(account=account,
post_content=df.to_dict(),
post_type="ohlcv_timeseries",
channel="TEST-CRYPTODATADOWNLOAD")
async def post_all_to_aleph_async(account, symbols: list, interval="hourly"):
async with aiohttp.ClientSession(trust_env=True, connector=aiohttp.TCPConnector(limit_per_host=4)) as client:
futures = [post_to_aleph_async(account, client, symbol, interval) for symbol in symbols]
return await asyncio.gather(*futures)
def post_to_aleph(account, url, amend_hash=None):
df = pd.read_csv(url, header=1)
print(df.describe())
post_type = 'ohlcv_timeseries' if amend_hash is None else 'amend'
return aleph_client.create_post(account=account,
post_content=df.describe().to_dict(),
post_type=post_type,
channel="TEST-CRYPTODATADOWNLOAD",
ref=amend_hash)
def post_all_to_aleph(account, symbols: list, amend_hashes=None, interval="hourly"):
hashes = {}
for symbol in symbols:
url = get_download_url(symbol, interval)
if amend_hashes:
resp = post_to_aleph(account, url, amend_hashes[symbol])
print(f"Amended {symbol}: {amend_hashes[symbol]}")
else:
resp = post_to_aleph(account, url)
print(f"Posted {symbol}: {resp['item_hash']}")
hashes[symbol] = resp['item_hash']
return hashes
| 39.842857 | 113 | 0.627106 | import asyncio
import io
import ssl
import aiohttp
import aleph_client.asynchronous
import certifi
import pandas as pd
from data_upload.data_utils import clean_time_duplicates
def get_download_url(symbol, interval="hourly"):
if interval == "daily":
interval = "d"
elif interval == "hourly":
interval = "1h"
elif interval == "minutely":
interval = "minute"
return f"https://www.cryptodatadownload.com/cdd/Binance_{symbol}USDT_{interval}.csv"
async def post_to_aleph_async(account, client, symbol, interval="hourly"):
url = get_download_url(symbol, interval)
sslcontext = ssl.create_default_context(cafile=certifi.where())
async with client.get(url, ssl=sslcontext) as response:
with io.StringIO(await response.text()) as text_io:
df = pd.read_csv(text_io, header=1)
clean_time_duplicates(df)
print(df.describe())
return await aleph_client.asynchronous.create_post(account=account,
post_content=df.to_dict(),
post_type="ohlcv_timeseries",
channel="TEST-CRYPTODATADOWNLOAD")
async def post_all_to_aleph_async(account, symbols: list, interval="hourly"):
async with aiohttp.ClientSession(trust_env=True, connector=aiohttp.TCPConnector(limit_per_host=4)) as client:
futures = [post_to_aleph_async(account, client, symbol, interval) for symbol in symbols]
return await asyncio.gather(*futures)
def post_to_aleph(account, url, amend_hash=None):
df = pd.read_csv(url, header=1)
print(df.describe())
post_type = 'ohlcv_timeseries' if amend_hash is None else 'amend'
return aleph_client.create_post(account=account,
post_content=df.describe().to_dict(),
post_type=post_type,
channel="TEST-CRYPTODATADOWNLOAD",
ref=amend_hash)
def post_all_to_aleph(account, symbols: list, amend_hashes=None, interval="hourly"):
hashes = {}
for symbol in symbols:
url = get_download_url(symbol, interval)
if amend_hashes:
resp = post_to_aleph(account, url, amend_hashes[symbol])
print(f"Amended {symbol}: {amend_hashes[symbol]}")
else:
resp = post_to_aleph(account, url)
print(f"Posted {symbol}: {resp['item_hash']}")
hashes[symbol] = resp['item_hash']
return hashes
| true | true |
1c4a02071d1bd4dc5a2bf4caa5a4ce0f0c07ce3f | 3,426 | py | Python | ansible/venv/lib/python2.7/site-packages/ansible/module_utils/facts/virtual/sysctl.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 17 | 2017-06-07T23:15:01.000Z | 2021-08-30T14:32:36.000Z | ansible/venv/lib/python2.7/site-packages/ansible/module_utils/facts/virtual/sysctl.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 9 | 2017-06-25T03:31:52.000Z | 2021-05-17T23:43:12.000Z | ansible/venv/lib/python2.7/site-packages/ansible/module_utils/facts/virtual/sysctl.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 3 | 2018-05-26T21:31:22.000Z | 2019-09-28T17:00:45.000Z | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
class VirtualSysctlDetectionMixin(object):
def detect_sysctl(self):
self.sysctl_path = self.module.get_bin_path('sysctl')
def detect_virt_product(self, key):
virtual_product_facts = {}
self.detect_sysctl()
# FIXME: exit early on falsey self.sysctl_path and unindent
if self.sysctl_path:
rc, out, err = self.module.run_command("%s -n %s" % (self.sysctl_path, key))
if rc == 0:
if re.match('(KVM|kvm|Bochs|SmartDC).*', out):
virtual_product_facts['virtualization_type'] = 'kvm'
virtual_product_facts['virtualization_role'] = 'guest'
elif re.match('.*VMware.*', out):
virtual_product_facts['virtualization_type'] = 'VMware'
virtual_product_facts['virtualization_role'] = 'guest'
elif out.rstrip() == 'VirtualBox':
virtual_product_facts['virtualization_type'] = 'virtualbox'
virtual_product_facts['virtualization_role'] = 'guest'
elif out.rstrip() == 'HVM domU':
virtual_product_facts['virtualization_type'] = 'xen'
virtual_product_facts['virtualization_role'] = 'guest'
elif out.rstrip() == 'Parallels':
virtual_product_facts['virtualization_type'] = 'parallels'
virtual_product_facts['virtualization_role'] = 'guest'
elif out.rstrip() == 'RHEV Hypervisor':
virtual_product_facts['virtualization_type'] = 'RHEV'
virtual_product_facts['virtualization_role'] = 'guest'
elif (key == 'security.jail.jailed') and (out.rstrip() == '1'):
virtual_product_facts['virtualization_type'] = 'jails'
virtual_product_facts['virtualization_role'] = 'guest'
return virtual_product_facts
def detect_virt_vendor(self, key):
virtual_vendor_facts = {}
self.detect_sysctl()
# FIXME: exit early on falsey self.sysctl_path and unindent
if self.sysctl_path:
rc, out, err = self.module.run_command("%s -n %s" % (self.sysctl_path, key))
if rc == 0:
if out.rstrip() == 'QEMU':
virtual_vendor_facts['virtualization_type'] = 'kvm'
virtual_vendor_facts['virtualization_role'] = 'guest'
if out.rstrip() == 'OpenBSD':
virtual_vendor_facts['virtualization_type'] = 'vmm'
virtual_vendor_facts['virtualization_role'] = 'guest'
return virtual_vendor_facts
| 47.583333 | 88 | 0.620257 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
class VirtualSysctlDetectionMixin(object):
def detect_sysctl(self):
self.sysctl_path = self.module.get_bin_path('sysctl')
def detect_virt_product(self, key):
virtual_product_facts = {}
self.detect_sysctl()
if self.sysctl_path:
rc, out, err = self.module.run_command("%s -n %s" % (self.sysctl_path, key))
if rc == 0:
if re.match('(KVM|kvm|Bochs|SmartDC).*', out):
virtual_product_facts['virtualization_type'] = 'kvm'
virtual_product_facts['virtualization_role'] = 'guest'
elif re.match('.*VMware.*', out):
virtual_product_facts['virtualization_type'] = 'VMware'
virtual_product_facts['virtualization_role'] = 'guest'
elif out.rstrip() == 'VirtualBox':
virtual_product_facts['virtualization_type'] = 'virtualbox'
virtual_product_facts['virtualization_role'] = 'guest'
elif out.rstrip() == 'HVM domU':
virtual_product_facts['virtualization_type'] = 'xen'
virtual_product_facts['virtualization_role'] = 'guest'
elif out.rstrip() == 'Parallels':
virtual_product_facts['virtualization_type'] = 'parallels'
virtual_product_facts['virtualization_role'] = 'guest'
elif out.rstrip() == 'RHEV Hypervisor':
virtual_product_facts['virtualization_type'] = 'RHEV'
virtual_product_facts['virtualization_role'] = 'guest'
elif (key == 'security.jail.jailed') and (out.rstrip() == '1'):
virtual_product_facts['virtualization_type'] = 'jails'
virtual_product_facts['virtualization_role'] = 'guest'
return virtual_product_facts
def detect_virt_vendor(self, key):
virtual_vendor_facts = {}
self.detect_sysctl()
if self.sysctl_path:
rc, out, err = self.module.run_command("%s -n %s" % (self.sysctl_path, key))
if rc == 0:
if out.rstrip() == 'QEMU':
virtual_vendor_facts['virtualization_type'] = 'kvm'
virtual_vendor_facts['virtualization_role'] = 'guest'
if out.rstrip() == 'OpenBSD':
virtual_vendor_facts['virtualization_type'] = 'vmm'
virtual_vendor_facts['virtualization_role'] = 'guest'
return virtual_vendor_facts
| true | true |
1c4a02c8305edf5419beb0b9ec01a9f4757b6f61 | 728 | py | Python | app/test/test4.py | saint816/fishbook | 80a4b563a05086c85eb347286d28bb0e6258ff1c | [
"MIT"
] | null | null | null | app/test/test4.py | saint816/fishbook | 80a4b563a05086c85eb347286d28bb0e6258ff1c | [
"MIT"
] | null | null | null | app/test/test4.py | saint816/fishbook | 80a4b563a05086c85eb347286d28bb0e6258ff1c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: test4
Description : LocalStack线程隔离特性
Author : pengsheng
date: 2019-04-21
-------------------------------------------------
"""
import threading
import time
from werkzeug.local import LocalStack
my_stack = LocalStack()
my_stack.push(1)
print('main after push: ' + str(my_stack.top))
def worker():
print('child thread before push: ' + str(my_stack.top))
my_stack.push(2)
print('child thread after push: ' + str(my_stack.top))
child_thread = threading.Thread(target=worker, name='child_thread')
child_thread.start()
time.sleep(1)
print('finally value at main thread: ' + str(my_stack.top))
| 23.483871 | 67 | 0.582418 | import threading
import time
from werkzeug.local import LocalStack
my_stack = LocalStack()
my_stack.push(1)
print('main after push: ' + str(my_stack.top))
def worker():
print('child thread before push: ' + str(my_stack.top))
my_stack.push(2)
print('child thread after push: ' + str(my_stack.top))
child_thread = threading.Thread(target=worker, name='child_thread')
child_thread.start()
time.sleep(1)
print('finally value at main thread: ' + str(my_stack.top))
| true | true |
1c4a03d70ff26f631a6d41a2c5e4ca7dcb12136c | 3,778 | py | Python | generate_cloth_img.py | otsubo/CIFAR-ConvolutionalAutoEncoder-Chainer | bbda81dc7b52f42e07e9daaff38ce7453b24e008 | [
"MIT"
] | null | null | null | generate_cloth_img.py | otsubo/CIFAR-ConvolutionalAutoEncoder-Chainer | bbda81dc7b52f42e07e9daaff38ce7453b24e008 | [
"MIT"
] | null | null | null | generate_cloth_img.py | otsubo/CIFAR-ConvolutionalAutoEncoder-Chainer | bbda81dc7b52f42e07e9daaff38ce7453b24e008 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Jul 21 08:51:18 2018
@author: user
"""
import argparse
import os
import os.path as osp
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import chainer
from chainer import cuda
from chainer.datasets import get_cifar10
from chainer import dataset
from chainer import Variable
from chainer import serializers
import chainer.functions as F
import numpy as np
from sklearn.model_selection import train_test_split
from skimage.io import imread
import network
# Load data
class LoadDataset(dataset.DatasetMixin):
def __init__(self, split, return_image=False):
assert split in ('train', 'val')
ids = self._get_ids()
iter_train, iter_val = train_test_split(
ids, test_size=0.2, random_state=np.random.RandomState(1234))
self.ids = iter_train if split == 'train' else iter_val
self._return_image = return_image
def __len__(self):
return len(self.ids)
def _get_ids(self):
ids = []
dataset_dir = chainer.dataset.get_dataset_directory(
'2019_11_28_pr2')
for data_id in os.listdir(dataset_dir):
ids.append(osp.join(dataset_dir , data_id))
return ids
def img_to_datum(self, img):
img = img.copy()
datum = img.astype(np.float32)
datum = datum[:, :, ::-1] #RGB -> BGR
datum = datum.transpose((2, 0, 1))
return datum
def get_example(self, i):
id = self.ids[i]
image_file = osp.join(id , "image.png")
img = imread(image_file)
datum = self.img_to_datum(img)
if self._return_image:
return img
else:
return datum, datum
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', '-g', type=int, default=-1)
parser.add_argument('--model', '-m', type=str, default="./results/cloth/model")
parser.add_argument('--begin', '-b', type=int, default=0)
args = parser.parse_args()
# Set up a neural network to train.
test = LoadDataset(split='val')
model = network.CAE(3,3, return_out=True)
if args.model != None:
print( "loading model from " + args.model )
serializers.load_npz(args.model, model)
# Show 64 images
fig = plt.figure(figsize=(6,6))
plt.title("Original images: first rows,\n Predicted images: second rows")
plt.axis('off')
plt.tight_layout()
pbar = tqdm(total=8)
#import ipdb; ipdb.set_trace()
for i in range(2):
for j in range(2):
ax = fig.add_subplot(4, 2, i*4+j+1, xticks=[], yticks=[])
x, t = test[i*2+j]
xT = x.transpose(1, 2, 0)
xT = xT.astype(np.uint8)
ax.imshow(xT, cmap=plt.cm.bone, interpolation='nearest')
x = np.expand_dims(x, 0)
t = np.expand_dims(t, 0)
if args.gpu >= 0:
cuda.get_device_from_id(0).use()
model.to_gpu()
x = cuda.cupy.array(x)
t = cuda.cupy.array(t)
predicted, loss = model(Variable(x), Variable(t))
#print(predicted.shape)
#print(loss)
predicted = F.transpose(predicted[0], (1, 2, 0))
predicted = cuda.to_cpu(predicted.data) #Variable to numpy
predicted = predicted * 255
predicted = predicted.astype(np.uint8)
ax = fig.add_subplot(4, 2, i*4+j+3, xticks=[], yticks=[])
ax.imshow(predicted, cmap=plt.cm.bone, interpolation='nearest')
pbar.update(1)
pbar.close()
plt.savefig("result.png")
plt.show()
plt.close()
if __name__ == '__main__':
main()
| 28.839695 | 83 | 0.588936 |
import argparse
import os
import os.path as osp
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import chainer
from chainer import cuda
from chainer.datasets import get_cifar10
from chainer import dataset
from chainer import Variable
from chainer import serializers
import chainer.functions as F
import numpy as np
from sklearn.model_selection import train_test_split
from skimage.io import imread
import network
class LoadDataset(dataset.DatasetMixin):
def __init__(self, split, return_image=False):
assert split in ('train', 'val')
ids = self._get_ids()
iter_train, iter_val = train_test_split(
ids, test_size=0.2, random_state=np.random.RandomState(1234))
self.ids = iter_train if split == 'train' else iter_val
self._return_image = return_image
def __len__(self):
return len(self.ids)
def _get_ids(self):
ids = []
dataset_dir = chainer.dataset.get_dataset_directory(
'2019_11_28_pr2')
for data_id in os.listdir(dataset_dir):
ids.append(osp.join(dataset_dir , data_id))
return ids
def img_to_datum(self, img):
img = img.copy()
datum = img.astype(np.float32)
datum = datum[:, :, ::-1] datum = datum.transpose((2, 0, 1))
return datum
def get_example(self, i):
id = self.ids[i]
image_file = osp.join(id , "image.png")
img = imread(image_file)
datum = self.img_to_datum(img)
if self._return_image:
return img
else:
return datum, datum
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', '-g', type=int, default=-1)
parser.add_argument('--model', '-m', type=str, default="./results/cloth/model")
parser.add_argument('--begin', '-b', type=int, default=0)
args = parser.parse_args()
test = LoadDataset(split='val')
model = network.CAE(3,3, return_out=True)
if args.model != None:
print( "loading model from " + args.model )
serializers.load_npz(args.model, model)
fig = plt.figure(figsize=(6,6))
plt.title("Original images: first rows,\n Predicted images: second rows")
plt.axis('off')
plt.tight_layout()
pbar = tqdm(total=8)
for i in range(2):
for j in range(2):
ax = fig.add_subplot(4, 2, i*4+j+1, xticks=[], yticks=[])
x, t = test[i*2+j]
xT = x.transpose(1, 2, 0)
xT = xT.astype(np.uint8)
ax.imshow(xT, cmap=plt.cm.bone, interpolation='nearest')
x = np.expand_dims(x, 0)
t = np.expand_dims(t, 0)
if args.gpu >= 0:
cuda.get_device_from_id(0).use()
model.to_gpu()
x = cuda.cupy.array(x)
t = cuda.cupy.array(t)
predicted, loss = model(Variable(x), Variable(t))
predicted = F.transpose(predicted[0], (1, 2, 0))
predicted = cuda.to_cpu(predicted.data) predicted = predicted * 255
predicted = predicted.astype(np.uint8)
ax = fig.add_subplot(4, 2, i*4+j+3, xticks=[], yticks=[])
ax.imshow(predicted, cmap=plt.cm.bone, interpolation='nearest')
pbar.update(1)
pbar.close()
plt.savefig("result.png")
plt.show()
plt.close()
if __name__ == '__main__':
main()
| true | true |
1c4a0522b17523bfaff0ea4d0aee5f56a95b355e | 496 | py | Python | bookmarks/models.py | justinborek/djorg | f6aa9cb23f0476c032ac5250045879962cc11072 | [
"MIT"
] | null | null | null | bookmarks/models.py | justinborek/djorg | f6aa9cb23f0476c032ac5250045879962cc11072 | [
"MIT"
] | null | null | null | bookmarks/models.py | justinborek/djorg | f6aa9cb23f0476c032ac5250045879962cc11072 | [
"MIT"
] | null | null | null | from uuid import uuid4
from datetime import datetime
from django.db import models
class Bookmark(models.Model):
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
url = models.URLField('URL', unique=True)
name = models.CharField(max_length=200)
notes = models.TextField(blank=True)
created_at = models.DateTimeField(default=datetime.now, blank=True)
last_modified = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
| 33.066667 | 74 | 0.743952 | from uuid import uuid4
from datetime import datetime
from django.db import models
class Bookmark(models.Model):
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
url = models.URLField('URL', unique=True)
name = models.CharField(max_length=200)
notes = models.TextField(blank=True)
created_at = models.DateTimeField(default=datetime.now, blank=True)
last_modified = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
| true | true |
1c4a0541e9c6f3dabd3305439a3287d532a147dd | 1,361 | py | Python | app/core/tests/test_admin.py | kim-sun/recipe-app-api | c0c598f2188c42c820178ea7910c34ccdf641393 | [
"MIT"
] | null | null | null | app/core/tests/test_admin.py | kim-sun/recipe-app-api | c0c598f2188c42c820178ea7910c34ccdf641393 | [
"MIT"
] | null | null | null | app/core/tests/test_admin.py | kim-sun/recipe-app-api | c0c598f2188c42c820178ea7910c34ccdf641393 | [
"MIT"
] | null | null | null | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='[email protected]',
password='password123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='[email protected]',
password='password123',
name='Test user full name'
)
def test_users_listed(self):
"""Test that users are listed on user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
# /admin/core/user/{id}
res = self.client.get(url) # response
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| 31.651163 | 68 | 0.635562 | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='[email protected]',
password='password123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='[email protected]',
password='password123',
name='Test user full name'
)
def test_users_listed(self):
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| true | true |
1c4a0676208607006d811b2a23a60b460dd13518 | 2,403 | py | Python | tests/api_tests/abstrac_api_test.py | kirillskor/dedoc | 7793a1be2220a26e7520521306351dfc0a9c8d98 | [
"Apache-2.0"
] | null | null | null | tests/api_tests/abstrac_api_test.py | kirillskor/dedoc | 7793a1be2220a26e7520521306351dfc0a9c8d98 | [
"Apache-2.0"
] | null | null | null | tests/api_tests/abstrac_api_test.py | kirillskor/dedoc | 7793a1be2220a26e7520521306351dfc0a9c8d98 | [
"Apache-2.0"
] | null | null | null | import json
import os
import requests
import unittest
class AbstractTestApiDocReader(unittest.TestCase):
data_directory_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "data"))
def _check_metainfo(self, metainfo, actual_type: str, actual_name: str):
self.assertEqual(metainfo['file_type'], actual_type)
self.assertEqual(metainfo['file_name'], actual_name)
def _get_host(self):
host = os.environ.get('DOC_READER_HOST', 'localhost')
return host
def _get_port(self):
port = int(os.environ.get('DOCREADER_PORT', '1231'))
return port
def _get_abs_path(self, file_name: str) -> str:
return os.path.join(self.data_directory_path, file_name)
def _send_request(self, file_name: str, data: dict = None, expected_code: int = 200):
"""
send file `file_name` in post request with `data` as parameters. Expects that response return code
`expected_code`
:param file_name: name of file (should lie dedoc/tests/data folder
:param data: parameter dictionary (here you can put language for example)
:param expected_code: expected http response code. 200 for normal request
:return: result from json
"""
if data is None:
data = {}
host = self._get_host()
port = self._get_port()
abs_path = self._get_abs_path(file_name)
with open(abs_path, 'rb') as file:
files = {'file': (file_name, file)}
r = requests.post("http://{host}:{port}/upload".format(host=host, port=port), files=files, data=data)
self.assertEqual(expected_code, r.status_code)
if expected_code != 200:
return None
if "return_html" in data and data["return_html"]:
return r.content.decode()
else:
return json.loads(r.content.decode())
def _send_request_wo_file(self, data: dict = None, expected_code: int = 200):
host = self._get_host()
port = self._get_port()
if data is None:
data = {}
r = requests.post("http://{host}:{port}/upload".format(host=host, port=port), data=data)
self.assertEqual(expected_code, r.status_code)
if expected_code != 200:
return None
result = json.loads(r.content.decode())
return result
| 34.826087 | 113 | 0.62422 | import json
import os
import requests
import unittest
class AbstractTestApiDocReader(unittest.TestCase):
data_directory_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "data"))
def _check_metainfo(self, metainfo, actual_type: str, actual_name: str):
self.assertEqual(metainfo['file_type'], actual_type)
self.assertEqual(metainfo['file_name'], actual_name)
def _get_host(self):
host = os.environ.get('DOC_READER_HOST', 'localhost')
return host
def _get_port(self):
port = int(os.environ.get('DOCREADER_PORT', '1231'))
return port
def _get_abs_path(self, file_name: str) -> str:
return os.path.join(self.data_directory_path, file_name)
def _send_request(self, file_name: str, data: dict = None, expected_code: int = 200):
if data is None:
data = {}
host = self._get_host()
port = self._get_port()
abs_path = self._get_abs_path(file_name)
with open(abs_path, 'rb') as file:
files = {'file': (file_name, file)}
r = requests.post("http://{host}:{port}/upload".format(host=host, port=port), files=files, data=data)
self.assertEqual(expected_code, r.status_code)
if expected_code != 200:
return None
if "return_html" in data and data["return_html"]:
return r.content.decode()
else:
return json.loads(r.content.decode())
def _send_request_wo_file(self, data: dict = None, expected_code: int = 200):
host = self._get_host()
port = self._get_port()
if data is None:
data = {}
r = requests.post("http://{host}:{port}/upload".format(host=host, port=port), data=data)
self.assertEqual(expected_code, r.status_code)
if expected_code != 200:
return None
result = json.loads(r.content.decode())
return result
| true | true |
1c4a074100935fa59bbb9f0995aad8db11245ba3 | 6,180 | py | Python | mne/io/array/tests/test_array.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | 1 | 2019-12-11T05:07:08.000Z | 2019-12-11T05:07:08.000Z | mne/io/array/tests/test_array.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | 23 | 2017-09-12T11:08:26.000Z | 2019-10-04T11:11:29.000Z | mne/io/array/tests/test_array.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | 3 | 2019-01-28T13:48:00.000Z | 2019-07-10T16:02:11.000Z | # Author: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_allclose,
assert_equal)
import pytest
import matplotlib.pyplot as plt
from mne import find_events, Epochs, pick_types
from mne.io import read_raw_fif
from mne.io.array import RawArray
from mne.io.tests.test_raw import _test_raw_reader
from mne.io.meas_info import create_info, _kind_dict
from mne.utils import requires_version, run_tests_if_main
from mne.channels import make_dig_montage
base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data')
fif_fname = op.join(base_dir, 'test_raw.fif')
def test_long_names():
"""Test long name support."""
info = create_info(['a' * 15 + 'b', 'a' * 16], 1000., verbose='error')
data = np.empty((2, 1000))
raw = RawArray(data, info)
assert raw.ch_names == ['a' * 13 + '-0', 'a' * 13 + '-1']
info = create_info(['a' * 16] * 11, 1000., verbose='error')
data = np.empty((11, 1000))
raw = RawArray(data, info)
assert raw.ch_names == ['a' * 12 + '-%s' % ii for ii in range(11)]
def test_array_copy():
"""Test copying during construction."""
info = create_info(1, 1000.)
data = np.empty((1, 1000))
# 'auto' (default)
raw = RawArray(data, info)
assert raw._data is data
assert raw.info is not info
raw = RawArray(data.astype(np.float32), info)
assert raw._data is not data
assert raw.info is not info
# 'info' (more restrictive)
raw = RawArray(data, info, copy='info')
assert raw._data is data
assert raw.info is not info
with pytest.raises(ValueError, match="data copying was not .* copy='info"):
RawArray(data.astype(np.float32), info, copy='info')
# 'data'
raw = RawArray(data, info, copy='data')
assert raw._data is not data
assert raw.info is info
# 'both'
raw = RawArray(data, info, copy='both')
assert raw._data is not data
assert raw.info is not info
raw = RawArray(data.astype(np.float32), info, copy='both')
assert raw._data is not data
assert raw.info is not info
# None
raw = RawArray(data, info, copy=None)
assert raw._data is data
assert raw.info is info
with pytest.raises(ValueError, match='data copying was not .* copy=None'):
RawArray(data.astype(np.float32), info, copy=None)
@pytest.mark.slowtest
@requires_version('scipy', '0.12')
def test_array_raw():
"""Test creating raw from array."""
# creating
raw = read_raw_fif(fif_fname).crop(2, 5)
data, times = raw[:, :]
sfreq = raw.info['sfreq']
ch_names = [(ch[4:] if 'STI' not in ch else ch)
for ch in raw.info['ch_names']] # change them, why not
types = list()
for ci in range(101):
types.extend(('grad', 'grad', 'mag'))
types.extend(['ecog', 'seeg', 'hbo']) # really 3 meg channels
types.extend(['stim'] * 9)
types.extend(['eeg'] * 60)
picks = np.concatenate([pick_types(raw.info)[::20],
pick_types(raw.info, meg=False, stim=True),
pick_types(raw.info, meg=False, eeg=True)[::20]])
del raw
data = data[picks]
ch_names = np.array(ch_names)[picks].tolist()
types = np.array(types)[picks].tolist()
types.pop(-1)
# wrong length
pytest.raises(ValueError, create_info, ch_names, sfreq, types)
# bad entry
types.append('foo')
pytest.raises(KeyError, create_info, ch_names, sfreq, types)
types[-1] = 'eog'
# default type
info = create_info(ch_names, sfreq)
assert_equal(info['chs'][0]['kind'], _kind_dict['misc'][0])
# use real types
info = create_info(ch_names, sfreq, types)
raw2 = _test_raw_reader(RawArray, test_preloading=False,
data=data, info=info, first_samp=2 * data.shape[1])
data2, times2 = raw2[:, :]
assert_allclose(data, data2)
assert_allclose(times, times2)
assert ('RawArray' in repr(raw2))
pytest.raises(TypeError, RawArray, info, data)
# filtering
picks = pick_types(raw2.info, misc=True, exclude='bads')[:4]
assert_equal(len(picks), 4)
raw_lp = raw2.copy()
kwargs = dict(fir_design='firwin', picks=picks)
raw_lp.filter(None, 4.0, h_trans_bandwidth=4., **kwargs)
raw_hp = raw2.copy()
raw_hp.filter(16.0, None, l_trans_bandwidth=4., **kwargs)
raw_bp = raw2.copy()
raw_bp.filter(8.0, 12.0, l_trans_bandwidth=4., h_trans_bandwidth=4.,
**kwargs)
raw_bs = raw2.copy()
raw_bs.filter(16.0, 4.0, l_trans_bandwidth=4., h_trans_bandwidth=4.,
**kwargs)
data, _ = raw2[picks, :]
lp_data, _ = raw_lp[picks, :]
hp_data, _ = raw_hp[picks, :]
bp_data, _ = raw_bp[picks, :]
bs_data, _ = raw_bs[picks, :]
sig_dec = 15
assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec)
assert_array_almost_equal(data, bp_data + bs_data, sig_dec)
# plotting
raw2.plot()
raw2.plot_psd(tmax=2., average=True, n_fft=1024, spatial_colors=False)
plt.close('all')
# epoching
events = find_events(raw2, stim_channel='STI 014')
events[:, 2] = 1
assert len(events) > 2
epochs = Epochs(raw2, events, 1, -0.2, 0.4, preload=True)
evoked = epochs.average()
assert_equal(evoked.nave, len(events) - 1)
# complex data
rng = np.random.RandomState(0)
data = rng.randn(1, 100) + 1j * rng.randn(1, 100)
raw = RawArray(data, create_info(1, 1000., 'eeg'))
assert_allclose(raw._data, data)
# Using digital montage to give MNI electrode coordinates
n_elec = 10
ts_size = 10000
Fs = 512.
ch_names = [str(i) for i in range(n_elec)]
ch_pos_loc = np.random.randint(60, size=(n_elec, 3)).tolist()
data = np.random.rand(n_elec, ts_size)
montage = make_dig_montage(
ch_pos=dict(zip(ch_names, ch_pos_loc)),
coord_frame='head'
)
info = create_info(ch_names, Fs, 'ecog', montage=montage)
raw = RawArray(data, info)
raw.plot_psd(average=False) # looking for inexistent layout
raw.plot_psd_topo()
run_tests_if_main()
| 34.719101 | 79 | 0.637379 |
import os.path as op
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_allclose,
assert_equal)
import pytest
import matplotlib.pyplot as plt
from mne import find_events, Epochs, pick_types
from mne.io import read_raw_fif
from mne.io.array import RawArray
from mne.io.tests.test_raw import _test_raw_reader
from mne.io.meas_info import create_info, _kind_dict
from mne.utils import requires_version, run_tests_if_main
from mne.channels import make_dig_montage
base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data')
fif_fname = op.join(base_dir, 'test_raw.fif')
def test_long_names():
info = create_info(['a' * 15 + 'b', 'a' * 16], 1000., verbose='error')
data = np.empty((2, 1000))
raw = RawArray(data, info)
assert raw.ch_names == ['a' * 13 + '-0', 'a' * 13 + '-1']
info = create_info(['a' * 16] * 11, 1000., verbose='error')
data = np.empty((11, 1000))
raw = RawArray(data, info)
assert raw.ch_names == ['a' * 12 + '-%s' % ii for ii in range(11)]
def test_array_copy():
info = create_info(1, 1000.)
data = np.empty((1, 1000))
raw = RawArray(data, info)
assert raw._data is data
assert raw.info is not info
raw = RawArray(data.astype(np.float32), info)
assert raw._data is not data
assert raw.info is not info
raw = RawArray(data, info, copy='info')
assert raw._data is data
assert raw.info is not info
with pytest.raises(ValueError, match="data copying was not .* copy='info"):
RawArray(data.astype(np.float32), info, copy='info')
# 'data'
raw = RawArray(data, info, copy='data')
assert raw._data is not data
assert raw.info is info
# 'both'
raw = RawArray(data, info, copy='both')
assert raw._data is not data
assert raw.info is not info
raw = RawArray(data.astype(np.float32), info, copy='both')
assert raw._data is not data
assert raw.info is not info
# None
raw = RawArray(data, info, copy=None)
assert raw._data is data
assert raw.info is info
with pytest.raises(ValueError, match='data copying was not .* copy=None'):
RawArray(data.astype(np.float32), info, copy=None)
@pytest.mark.slowtest
@requires_version('scipy', '0.12')
def test_array_raw():
# creating
raw = read_raw_fif(fif_fname).crop(2, 5)
data, times = raw[:, :]
sfreq = raw.info['sfreq']
ch_names = [(ch[4:] if 'STI' not in ch else ch)
for ch in raw.info['ch_names']] # change them, why not
types = list()
for ci in range(101):
types.extend(('grad', 'grad', 'mag'))
types.extend(['ecog', 'seeg', 'hbo']) # really 3 meg channels
types.extend(['stim'] * 9)
types.extend(['eeg'] * 60)
picks = np.concatenate([pick_types(raw.info)[::20],
pick_types(raw.info, meg=False, stim=True),
pick_types(raw.info, meg=False, eeg=True)[::20]])
del raw
data = data[picks]
ch_names = np.array(ch_names)[picks].tolist()
types = np.array(types)[picks].tolist()
types.pop(-1)
# wrong length
pytest.raises(ValueError, create_info, ch_names, sfreq, types)
# bad entry
types.append('foo')
pytest.raises(KeyError, create_info, ch_names, sfreq, types)
types[-1] = 'eog'
# default type
info = create_info(ch_names, sfreq)
assert_equal(info['chs'][0]['kind'], _kind_dict['misc'][0])
# use real types
info = create_info(ch_names, sfreq, types)
raw2 = _test_raw_reader(RawArray, test_preloading=False,
data=data, info=info, first_samp=2 * data.shape[1])
data2, times2 = raw2[:, :]
assert_allclose(data, data2)
assert_allclose(times, times2)
assert ('RawArray' in repr(raw2))
pytest.raises(TypeError, RawArray, info, data)
# filtering
picks = pick_types(raw2.info, misc=True, exclude='bads')[:4]
assert_equal(len(picks), 4)
raw_lp = raw2.copy()
kwargs = dict(fir_design='firwin', picks=picks)
raw_lp.filter(None, 4.0, h_trans_bandwidth=4., **kwargs)
raw_hp = raw2.copy()
raw_hp.filter(16.0, None, l_trans_bandwidth=4., **kwargs)
raw_bp = raw2.copy()
raw_bp.filter(8.0, 12.0, l_trans_bandwidth=4., h_trans_bandwidth=4.,
**kwargs)
raw_bs = raw2.copy()
raw_bs.filter(16.0, 4.0, l_trans_bandwidth=4., h_trans_bandwidth=4.,
**kwargs)
data, _ = raw2[picks, :]
lp_data, _ = raw_lp[picks, :]
hp_data, _ = raw_hp[picks, :]
bp_data, _ = raw_bp[picks, :]
bs_data, _ = raw_bs[picks, :]
sig_dec = 15
assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec)
assert_array_almost_equal(data, bp_data + bs_data, sig_dec)
# plotting
raw2.plot()
raw2.plot_psd(tmax=2., average=True, n_fft=1024, spatial_colors=False)
plt.close('all')
# epoching
events = find_events(raw2, stim_channel='STI 014')
events[:, 2] = 1
assert len(events) > 2
epochs = Epochs(raw2, events, 1, -0.2, 0.4, preload=True)
evoked = epochs.average()
assert_equal(evoked.nave, len(events) - 1)
# complex data
rng = np.random.RandomState(0)
data = rng.randn(1, 100) + 1j * rng.randn(1, 100)
raw = RawArray(data, create_info(1, 1000., 'eeg'))
assert_allclose(raw._data, data)
# Using digital montage to give MNI electrode coordinates
n_elec = 10
ts_size = 10000
Fs = 512.
ch_names = [str(i) for i in range(n_elec)]
ch_pos_loc = np.random.randint(60, size=(n_elec, 3)).tolist()
data = np.random.rand(n_elec, ts_size)
montage = make_dig_montage(
ch_pos=dict(zip(ch_names, ch_pos_loc)),
coord_frame='head'
)
info = create_info(ch_names, Fs, 'ecog', montage=montage)
raw = RawArray(data, info)
raw.plot_psd(average=False) # looking for inexistent layout
raw.plot_psd_topo()
run_tests_if_main()
| true | true |
1c4a0802008b790d1c97611c1cf3739497f6082d | 1,992 | py | Python | scraping/norcleanser1.py | Asyikin98/SkinFerm | 72fd1ad6339c96adf5ec154bde566de9eb1472c3 | [
"MIT"
] | null | null | null | scraping/norcleanser1.py | Asyikin98/SkinFerm | 72fd1ad6339c96adf5ec154bde566de9eb1472c3 | [
"MIT"
] | 2 | 2021-02-03T01:55:13.000Z | 2021-04-30T12:46:33.000Z | scraping/norcleanser1.py | Asyikin98/SkinFerm | 72fd1ad6339c96adf5ec154bde566de9eb1472c3 | [
"MIT"
] | null | null | null | import urllib.request
import random
from bs4 import BeautifulSoup
from requests import get
import mysql.connector
conn = mysql.connector.connect(user="root", passwd="",host="localhost", database="product")
cursor = conn.cursor()
sql = """INSERT INTO norcleanser (image, name, price, rating) VALUES (%s, %s, %s, %s)"""
def crawl_url(pageUrl, cleansernor_arr):
url = 'https://www.skinstore.com/skin-care/skincare-concern/normal-combination.list?pageNumber=1&facetFilters=averageReviewScore_auto_content:%5B4+TO+5%5D|en_brand_content:Balance+Me|en_brand_content:Daily+Concepts|en_brand_content:DERMAdoctor|en_brand_content:Epionce|en_brand_content:First+Aid+Beauty|en_skincareproducttype_content:Cleanser|en_brand_content:FOREO'
page = get(url)
soup = BeautifulSoup(page.text, 'html.parser')
type(soup)
#######################################################for product 1############################################################################
cleanser = soup.find_all('li', class_='productListProducts_product')
try:
for cleansers in cleanser :
first_product_image = cleansers.find('img')['src']
img_name = random.randrange(1,500)
full_name = str(img_name) + ".jpg"
urllib.request.urlretrieve(first_product_image, full_name)
first_product_name = cleansers.find("h3",{"class":"productBlock_productName"}).get_text().strip()
first_product_price = cleansers.find("div",{"class":"productBlock_price"}).get_text().strip()
first_product_rating = cleansers.find("span",{"class":"visually-hidden productBlock_rating_hiddenLabel"}).get_text().strip()
cleansernor_arr.append((first_product_image, first_product_name, first_product_price, first_product_rating))
finally:
return cleansernor_arr
cleansernor_arr = crawl_url("", [])
print(len(cleansernor_arr))
cursor.executemany(sql, cleansernor_arr)
conn.commit()
cursor.close()
conn.close()
| 43.304348 | 370 | 0.681727 | import urllib.request
import random
from bs4 import BeautifulSoup
from requests import get
import mysql.connector
conn = mysql.connector.connect(user="root", passwd="",host="localhost", database="product")
cursor = conn.cursor()
sql = """INSERT INTO norcleanser (image, name, price, rating) VALUES (%s, %s, %s, %s)"""
def crawl_url(pageUrl, cleansernor_arr):
url = 'https://www.skinstore.com/skin-care/skincare-concern/normal-combination.list?pageNumber=1&facetFilters=averageReviewScore_auto_content:%5B4+TO+5%5D|en_brand_content:Balance+Me|en_brand_content:Daily+Concepts|en_brand_content:DERMAdoctor|en_brand_content:Epionce|en_brand_content:First+Aid+Beauty|en_skincareproducttype_content:Cleanser|en_brand_content:FOREO'
page = get(url)
soup = BeautifulSoup(page.text, 'html.parser')
type(soup)
cleanser = soup.find_all('li', class_='productListProducts_product')
try:
for cleansers in cleanser :
first_product_image = cleansers.find('img')['src']
img_name = random.randrange(1,500)
full_name = str(img_name) + ".jpg"
urllib.request.urlretrieve(first_product_image, full_name)
first_product_name = cleansers.find("h3",{"class":"productBlock_productName"}).get_text().strip()
first_product_price = cleansers.find("div",{"class":"productBlock_price"}).get_text().strip()
first_product_rating = cleansers.find("span",{"class":"visually-hidden productBlock_rating_hiddenLabel"}).get_text().strip()
cleansernor_arr.append((first_product_image, first_product_name, first_product_price, first_product_rating))
finally:
return cleansernor_arr
cleansernor_arr = crawl_url("", [])
print(len(cleansernor_arr))
cursor.executemany(sql, cleansernor_arr)
conn.commit()
cursor.close()
conn.close()
| true | true |
1c4a085e1b8dce8dacaca64ca275241b95642545 | 396 | py | Python | Easy/1475 Final Prices With a Special Discount in a Shop.py | raj713335/LeetCode | e60e145d90f45d37e148e8307a3d97f5f0741de0 | [
"Apache-2.0"
] | null | null | null | Easy/1475 Final Prices With a Special Discount in a Shop.py | raj713335/LeetCode | e60e145d90f45d37e148e8307a3d97f5f0741de0 | [
"Apache-2.0"
] | null | null | null | Easy/1475 Final Prices With a Special Discount in a Shop.py | raj713335/LeetCode | e60e145d90f45d37e148e8307a3d97f5f0741de0 | [
"Apache-2.0"
] | null | null | null | # https://leetcode.com/problems/final-prices-with-a-special-discount-in-a-shop/
class Solution:
def finalPrices(self, prices: List[int]) -> List[int]:
for i in range(0, len(prices)-1):
for j in range(i+1, len(prices)):
if prices[i] >= prices[j]:
prices[i] -= prices[j]
break
return prices
| 30.461538 | 79 | 0.515152 |
class Solution:
def finalPrices(self, prices: List[int]) -> List[int]:
for i in range(0, len(prices)-1):
for j in range(i+1, len(prices)):
if prices[i] >= prices[j]:
prices[i] -= prices[j]
break
return prices
| true | true |
1c4a08a1665f5f9ea06e9064cfd4079c18d6dbd6 | 4,417 | py | Python | src/tracker/tracker.py | prashkr/BitTorrent-Protocol | 66eec7be82ed97d3315f07c072971380ac1106fb | [
"MIT"
] | null | null | null | src/tracker/tracker.py | prashkr/BitTorrent-Protocol | 66eec7be82ed97d3315f07c072971380ac1106fb | [
"MIT"
] | null | null | null | src/tracker/tracker.py | prashkr/BitTorrent-Protocol | 66eec7be82ed97d3315f07c072971380ac1106fb | [
"MIT"
] | null | null | null | import Queue
import select
import socket
import sys
def process_msg(input_msg):
"""
:param input_msg:
:return:
"""
flag = True
peer_list_reply = "TRACKER_RESPONSE-"
input_msg = input_msg.strip('\n')
input_msg = input_msg.strip(' ')
inputs = input_msg.split('-')
if inputs[0] == 'REQUEST_PEERS':
msg_body = inputs[1].split(',')
# filename = msgBody[1].split(':')[1]
filename = "tracker-ips"
host, port = msg_body[0].split(':')
with open(filename, 'rw') as f:
for line in f:
line = line.strip('\n')
print line
if flag:
flag = False
peer_list_reply += line
else:
peer_list_reply = peer_list_reply + ',' + line
# createFile(filename,host+':'+port)
return peer_list_reply
def create_file(file_name, peer):
"""
:param file_name:
:param peer:
"""
f = open(file_name, 'a')
f.write(peer)
f.close()
if __name__ == '__main__':
host = sys.argv[1]
port = int(sys.argv[2])
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setblocking(0)
# Bind the socket to the port
server_address = (host, port)
print >> sys.stderr, 'starting up on %s port %s' % server_address
server.bind(server_address)
# Listen for incoming connections
server.listen(5)
# Sockets from which we expect to read
inputs = [server]
# Sockets to which we expect to write
outputs = []
# Outgoing message queues (socket:Queue)
message_queues = {}
while inputs:
# Wait for at least one of the sockets to be ready for processing
print >> sys.stderr, '\nwaiting for the next event'
readable, writable, exceptional = select.select(inputs, outputs, inputs)
# Handle inputs
for s in readable:
if s is server:
# A "readable" server socket is ready to accept a connection
connection, client_address = s.accept()
print >> sys.stderr, 'new connection from', client_address
connection.setblocking(0)
inputs.append(connection)
# Give the connection a queue for data we want to send
message_queues[connection] = Queue.Queue()
else:
# receive data from connection
data = s.recv(1024)
if data:
# A readable client socket has data
print >> sys.stderr, 'received "%s" from %s' % (data, s.getpeername())
# Add output channel for response
if s not in outputs:
outputs.append(s)
# add reply to queue
reply_message = process_msg(data)
message_queues[s].put(reply_message)
else:
# Interpret empty result as closed connection
print >> sys.stderr, 'closing', client_address, 'after reading no data'
# Stop listening for input on the connection
if s in outputs:
outputs.remove(s)
inputs.remove(s)
s.close()
# Remove message queue
del message_queues[s]
# Handle outputs
for s in writable:
try:
# get message from queue
next_msg = message_queues[s].get_nowait()
except Queue.Empty:
# No messages waiting so stop checking for writes.
print >> sys.stderr, 'output queue for', s.getpeername(), 'is empty'
outputs.remove(s)
else:
# send message on connection
print >> sys.stderr, 'sending "%s" to %s' % (next_msg, s.getpeername())
s.send(next_msg)
# Handle "exceptional conditions"
for s in exceptional:
print >> sys.stderr, 'handling exceptional condition for', s.getpeername()
# Stop listening for input on the connection
inputs.remove(s)
if s in outputs:
outputs.remove(s)
s.close()
# Remove message queue
del message_queues[s]
| 30.462069 | 91 | 0.530224 | import Queue
import select
import socket
import sys
def process_msg(input_msg):
"""
:param input_msg:
:return:
"""
flag = True
peer_list_reply = "TRACKER_RESPONSE-"
input_msg = input_msg.strip('\n')
input_msg = input_msg.strip(' ')
inputs = input_msg.split('-')
if inputs[0] == 'REQUEST_PEERS':
msg_body = inputs[1].split(',')
filename = "tracker-ips"
host, port = msg_body[0].split(':')
with open(filename, 'rw') as f:
for line in f:
line = line.strip('\n')
print line
if flag:
flag = False
peer_list_reply += line
else:
peer_list_reply = peer_list_reply + ',' + line
return peer_list_reply
def create_file(file_name, peer):
"""
:param file_name:
:param peer:
"""
f = open(file_name, 'a')
f.write(peer)
f.close()
if __name__ == '__main__':
host = sys.argv[1]
port = int(sys.argv[2])
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setblocking(0)
server_address = (host, port)
print >> sys.stderr, 'starting up on %s port %s' % server_address
server.bind(server_address)
server.listen(5)
inputs = [server]
outputs = []
message_queues = {}
while inputs:
print >> sys.stderr, '\nwaiting for the next event'
readable, writable, exceptional = select.select(inputs, outputs, inputs)
for s in readable:
if s is server:
connection, client_address = s.accept()
print >> sys.stderr, 'new connection from', client_address
connection.setblocking(0)
inputs.append(connection)
message_queues[connection] = Queue.Queue()
else:
data = s.recv(1024)
if data:
print >> sys.stderr, 'received "%s" from %s' % (data, s.getpeername())
if s not in outputs:
outputs.append(s)
reply_message = process_msg(data)
message_queues[s].put(reply_message)
else:
print >> sys.stderr, 'closing', client_address, 'after reading no data'
if s in outputs:
outputs.remove(s)
inputs.remove(s)
s.close()
del message_queues[s]
for s in writable:
try:
next_msg = message_queues[s].get_nowait()
except Queue.Empty:
print >> sys.stderr, 'output queue for', s.getpeername(), 'is empty'
outputs.remove(s)
else:
print >> sys.stderr, 'sending "%s" to %s' % (next_msg, s.getpeername())
s.send(next_msg)
for s in exceptional:
print >> sys.stderr, 'handling exceptional condition for', s.getpeername()
inputs.remove(s)
if s in outputs:
outputs.remove(s)
s.close()
del message_queues[s]
| false | true |
1c4a08af26630f0f1eb8dd09eb2a7c42527d7a98 | 994 | py | Python | azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/apply_artifacts_request.py | SUSE/azure-sdk-for-python | 324f99d26dd6f4ee9793b9bf1d4d5f928e4b6c2f | [
"MIT"
] | 2 | 2020-07-29T14:22:17.000Z | 2020-11-06T18:47:40.000Z | azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/apply_artifacts_request.py | SUSE/azure-sdk-for-python | 324f99d26dd6f4ee9793b9bf1d4d5f928e4b6c2f | [
"MIT"
] | 1 | 2016-08-01T07:37:04.000Z | 2016-08-01T07:37:04.000Z | azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/apply_artifacts_request.py | SUSE/azure-sdk-for-python | 324f99d26dd6f4ee9793b9bf1d4d5f928e4b6c2f | [
"MIT"
] | 1 | 2020-12-12T21:04:41.000Z | 2020-12-12T21:04:41.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApplyArtifactsRequest(Model):
"""Request body for applying artifacts to a virtual machine.
:param artifacts: The list of artifacts to apply.
:type artifacts: list of :class:`ArtifactInstallProperties
<azure.mgmt.devtestlabs.models.ArtifactInstallProperties>`
"""
_attribute_map = {
'artifacts': {'key': 'artifacts', 'type': '[ArtifactInstallProperties]'},
}
def __init__(self, artifacts=None):
self.artifacts = artifacts
| 34.275862 | 81 | 0.615694 |
from msrest.serialization import Model
class ApplyArtifactsRequest(Model):
_attribute_map = {
'artifacts': {'key': 'artifacts', 'type': '[ArtifactInstallProperties]'},
}
def __init__(self, artifacts=None):
self.artifacts = artifacts
| true | true |
1c4a08b855151a6840c0b86aa222ceed3a904014 | 4,712 | py | Python | SVS/model/archive/preprocessing/ch_asr/local/data_prep.py | Kirinel/SVS_system | 261b80d69578bc3c407bc927750d64858c42a24c | [
"Apache-2.0"
] | null | null | null | SVS/model/archive/preprocessing/ch_asr/local/data_prep.py | Kirinel/SVS_system | 261b80d69578bc3c407bc927750d64858c42a24c | [
"Apache-2.0"
] | null | null | null | SVS/model/archive/preprocessing/ch_asr/local/data_prep.py | Kirinel/SVS_system | 261b80d69578bc3c407bc927750d64858c42a24c | [
"Apache-2.0"
] | null | null | null | """Copyright [2020] [Jiatong Shi & Shuai Guo].
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import os
import re
def add_zero(number, size):
"""add_zero."""
out = str(number)
for i in range(size - len(out)):
out = "0" + out
return out
single_pron = [
"a",
"ai",
"ao",
"an",
"ang",
"o",
"ou",
"ong",
"e",
"ei",
"er",
"en",
"eng",
]
double_starter = ["zh", "ch", "sh", "ii", "aa", "ee", "oo", "vv", "uu"]
starter = [
"b",
"p",
"m",
"f",
"d",
"t",
"n",
"l",
"g",
"k",
"h",
"j",
"q",
"x",
"r",
"z",
"c",
"s",
]
def text_refactor(text):
"""text_refactor."""
text = re.sub(" +", " ", text)
units = text.split(" ")
# add a e o u i
for i in range(len(units)):
if len(units[i]) < 1:
print("error")
print(units)
print(text)
if units[i] in single_pron:
begin = units[i][0]
units[i] = begin + begin + units[i]
elif units[i] == "jue":
units[i] = "jve"
elif units[i] == "que":
units[i] = "qve"
elif units[i] == "xue":
units[i] = "xve"
elif units[i] == "wen":
units[i] = "uuun"
elif units[i] == "wei":
units[i] = "uuui"
elif "w" == units[i][0]:
units[i] = "uuu" + units[i][1:]
elif len(units[i]) > 1 and (
"yu" == units[i][:2] or "yv" == units[i][:2]
):
units[i] = "vvv" + units[i][2:]
elif "y" == units[i][0]:
units[i] = "iii" + units[i][1:]
# further refine
if units[i] == "iiiou":
units[i] = "iiiu"
elif units[i] == "iiiin":
units[i] = "iiin"
elif units[i] == "iiiing":
units[i] = "iiing"
spe = []
for unit in units:
if unit[:2] in double_starter:
spe.extend([unit[:2], unit[2:]])
else:
spe.extend([unit[:1], unit[1:]])
return " ".join(spe)
parser = argparse.ArgumentParser()
parser.add_argument("datadir", type=str, help="data directory")
parser.add_argument("outdir", type=str, help="output directory")
args = parser.parse_args()
if not os.path.exists("data"):
os.mkdir("data")
if not os.path.exists("data/" + args.outdir):
os.mkdir("data/" + args.outdir)
basedir = os.path.join("data", args.outdir)
kaldi_text = open(os.path.join(basedir, "text"), "w")
kaldi_wav_scp = open(os.path.join(basedir, "wav.scp"), "w")
kaldi_utt2spk = open(os.path.join(basedir, "utt2spk"), "w")
kaldi_spk2utt = open(os.path.join(basedir, "spk2utt"), "w")
for root, dirs, files in os.walk(args.datadir):
wav_storing = {}
text_storing = {}
piece_info = add_zero(root.split("/")[-1], 4)
for f in files:
if f.startswith("yll"):
os.system(
"mv %s %s" % (os.path.join(root, f), os.path.join(root, f[4:]))
)
f = f[4:]
name, suffix = f.split(".")
if suffix == "wav":
wav_storing[piece_info + name] = os.path.join(root, f)
if suffix == "txt" and f != "text.txt":
count = 1
text = open(os.path.join(root, f), "r")
while True:
line = text.readline()
if not line:
break
line = line.strip()
if len(line) > 0:
text_storing[
piece_info + add_zero(count, 4)
] = text_refactor(line)
count += 1
for key in text_storing.keys():
if len(text_storing[key]) == 0 or text_storing[key][0] == "#":
continue
kaldi_text.write("%s %s\n" % (key, text_storing[key]))
kaldi_wav_scp.write(
(
"%s sox -t wavpcm %s -c 1 -r 16000 -t wavpcm - |\n"
% (key, wav_storing[key])
)
)
kaldi_utt2spk.write("%s %s\n" % (key, key))
kaldi_spk2utt.write("%s %s\n" % (key, key))
kaldi_text.close()
kaldi_wav_scp.close()
kaldi_utt2spk.close()
kaldi_spk2utt.close()
# os.system("export LC_ALL=C")
| 26.772727 | 79 | 0.503608 |
import argparse
import os
import re
def add_zero(number, size):
out = str(number)
for i in range(size - len(out)):
out = "0" + out
return out
single_pron = [
"a",
"ai",
"ao",
"an",
"ang",
"o",
"ou",
"ong",
"e",
"ei",
"er",
"en",
"eng",
]
double_starter = ["zh", "ch", "sh", "ii", "aa", "ee", "oo", "vv", "uu"]
starter = [
"b",
"p",
"m",
"f",
"d",
"t",
"n",
"l",
"g",
"k",
"h",
"j",
"q",
"x",
"r",
"z",
"c",
"s",
]
def text_refactor(text):
text = re.sub(" +", " ", text)
units = text.split(" ")
for i in range(len(units)):
if len(units[i]) < 1:
print("error")
print(units)
print(text)
if units[i] in single_pron:
begin = units[i][0]
units[i] = begin + begin + units[i]
elif units[i] == "jue":
units[i] = "jve"
elif units[i] == "que":
units[i] = "qve"
elif units[i] == "xue":
units[i] = "xve"
elif units[i] == "wen":
units[i] = "uuun"
elif units[i] == "wei":
units[i] = "uuui"
elif "w" == units[i][0]:
units[i] = "uuu" + units[i][1:]
elif len(units[i]) > 1 and (
"yu" == units[i][:2] or "yv" == units[i][:2]
):
units[i] = "vvv" + units[i][2:]
elif "y" == units[i][0]:
units[i] = "iii" + units[i][1:]
if units[i] == "iiiou":
units[i] = "iiiu"
elif units[i] == "iiiin":
units[i] = "iiin"
elif units[i] == "iiiing":
units[i] = "iiing"
spe = []
for unit in units:
if unit[:2] in double_starter:
spe.extend([unit[:2], unit[2:]])
else:
spe.extend([unit[:1], unit[1:]])
return " ".join(spe)
parser = argparse.ArgumentParser()
parser.add_argument("datadir", type=str, help="data directory")
parser.add_argument("outdir", type=str, help="output directory")
args = parser.parse_args()
if not os.path.exists("data"):
os.mkdir("data")
if not os.path.exists("data/" + args.outdir):
os.mkdir("data/" + args.outdir)
basedir = os.path.join("data", args.outdir)
kaldi_text = open(os.path.join(basedir, "text"), "w")
kaldi_wav_scp = open(os.path.join(basedir, "wav.scp"), "w")
kaldi_utt2spk = open(os.path.join(basedir, "utt2spk"), "w")
kaldi_spk2utt = open(os.path.join(basedir, "spk2utt"), "w")
for root, dirs, files in os.walk(args.datadir):
wav_storing = {}
text_storing = {}
piece_info = add_zero(root.split("/")[-1], 4)
for f in files:
if f.startswith("yll"):
os.system(
"mv %s %s" % (os.path.join(root, f), os.path.join(root, f[4:]))
)
f = f[4:]
name, suffix = f.split(".")
if suffix == "wav":
wav_storing[piece_info + name] = os.path.join(root, f)
if suffix == "txt" and f != "text.txt":
count = 1
text = open(os.path.join(root, f), "r")
while True:
line = text.readline()
if not line:
break
line = line.strip()
if len(line) > 0:
text_storing[
piece_info + add_zero(count, 4)
] = text_refactor(line)
count += 1
for key in text_storing.keys():
if len(text_storing[key]) == 0 or text_storing[key][0] == "#":
continue
kaldi_text.write("%s %s\n" % (key, text_storing[key]))
kaldi_wav_scp.write(
(
"%s sox -t wavpcm %s -c 1 -r 16000 -t wavpcm - |\n"
% (key, wav_storing[key])
)
)
kaldi_utt2spk.write("%s %s\n" % (key, key))
kaldi_spk2utt.write("%s %s\n" % (key, key))
kaldi_text.close()
kaldi_wav_scp.close()
kaldi_utt2spk.close()
kaldi_spk2utt.close()
| true | true |
1c4a0a16894c5126858c0aa112f30b01f145fbcf | 4,964 | py | Python | src/tstoolbox/functions/expanding_window.py | timcera/tstoolbox | a32fa399d96082f01b7eedfd6c8893bdb881845c | [
"BSD-3-Clause"
] | 5 | 2016-10-13T18:06:41.000Z | 2021-06-29T19:47:36.000Z | src/tstoolbox/functions/expanding_window.py | timcera/tstoolbox | a32fa399d96082f01b7eedfd6c8893bdb881845c | [
"BSD-3-Clause"
] | 21 | 2016-04-28T16:48:03.000Z | 2021-12-16T18:07:07.000Z | src/tstoolbox/functions/expanding_window.py | timcera/tstoolbox | a32fa399d96082f01b7eedfd6c8893bdb881845c | [
"BSD-3-Clause"
] | 3 | 2018-03-21T21:07:52.000Z | 2021-01-22T20:07:49.000Z | # -*- coding: utf-8 -*-
"""Collection of functions for the manipulation of time series."""
from __future__ import absolute_import, division, print_function
from typing import List, Optional
import mando
import pandas as pd
import typic
from mando.rst_text_formatter import RSTHelpFormatter
from .. import tsutils
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
@mando.command("expanding_window", formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(tsutils.docstrings)
def expanding_window_cli(
input_ts="-",
columns=None,
start_date=None,
end_date=None,
dropna="no",
skiprows=None,
index_type="datetime",
names=None,
clean=False,
statistic="",
min_periods=1,
center=False,
source_units=None,
target_units=None,
print_input=False,
tablefmt="csv",
):
"""Calculate an expanding window statistic.
Parameters
----------
statistic : str
[optional, default is '']
+-----------+----------------------+
| statistic | Meaning |
+===========+======================+
| corr | correlation |
+-----------+----------------------+
| count | count of real values |
+-----------+----------------------+
| cov | covariance |
+-----------+----------------------+
| kurt | kurtosis |
+-----------+----------------------+
| max | maximum |
+-----------+----------------------+
| mean | mean |
+-----------+----------------------+
| median | median |
+-----------+----------------------+
| min | minimum |
+-----------+----------------------+
| skew | skew |
+-----------+----------------------+
| std | standard deviation |
+-----------+----------------------+
| sum | sum |
+-----------+----------------------+
| var | variance |
+-----------+----------------------+
min_periods : int
[optional, default is 1]
Minimum number of observations in window required to have a value
center : boolean
[optional, default is False]
Set the labels at the center of the window.
{input_ts}
{columns}
{start_date}
{end_date}
{dropna}
{skiprows}
{index_type}
{names}
{clean}
{source_units}
{target_units}
{print_input}
{tablefmt}
"""
tsutils.printiso(
expanding_window(
input_ts=input_ts,
columns=columns,
start_date=start_date,
end_date=end_date,
dropna=dropna,
skiprows=skiprows,
index_type=index_type,
names=names,
clean=clean,
statistic=statistic,
min_periods=min_periods,
center=center,
source_units=source_units,
target_units=target_units,
print_input=print_input,
),
tablefmt=tablefmt,
)
@tsutils.transform_args(statistic=tsutils.make_list)
@typic.al
def expanding_window(
input_ts="-",
columns=None,
start_date=None,
end_date=None,
dropna="no",
skiprows=None,
index_type="datetime",
names=None,
clean=False,
statistic: Optional[
List[
Literal[
"corr",
"count",
"cov",
"kurt",
"max",
"mean",
"median",
"min",
"skew",
"std",
"sum",
"var",
]
]
] = None,
min_periods: tsutils.IntGreaterEqualToZero = 1,
center: bool = False,
source_units=None,
target_units=None,
print_input=False,
):
"""Calculate an expanding window statistic."""
tsd = tsutils.common_kwds(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
start_date=start_date,
end_date=end_date,
pick=columns,
dropna=dropna,
source_units=source_units,
target_units=target_units,
clean=clean,
)
ntsd = tsd.expanding(min_periods=min_periods, center=center)
if statistic:
nntsd = pd.DataFrame()
for stat in statistic:
ntsd = eval("ntsd.{}()".format(stat))
ntsd.columns = [
tsutils.renamer(i, "expanding.{}".format(stat)) for i in ntsd.columns
]
nntsd = nntsd.join(ntsd, how="outer")
else:
nntsd = ntsd
return tsutils.return_input(print_input, tsd, nntsd)
expanding_window.__doc__ = expanding_window_cli.__doc__
| 24.453202 | 85 | 0.471394 |
from __future__ import absolute_import, division, print_function
from typing import List, Optional
import mando
import pandas as pd
import typic
from mando.rst_text_formatter import RSTHelpFormatter
from .. import tsutils
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
@mando.command("expanding_window", formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(tsutils.docstrings)
def expanding_window_cli(
input_ts="-",
columns=None,
start_date=None,
end_date=None,
dropna="no",
skiprows=None,
index_type="datetime",
names=None,
clean=False,
statistic="",
min_periods=1,
center=False,
source_units=None,
target_units=None,
print_input=False,
tablefmt="csv",
):
tsutils.printiso(
expanding_window(
input_ts=input_ts,
columns=columns,
start_date=start_date,
end_date=end_date,
dropna=dropna,
skiprows=skiprows,
index_type=index_type,
names=names,
clean=clean,
statistic=statistic,
min_periods=min_periods,
center=center,
source_units=source_units,
target_units=target_units,
print_input=print_input,
),
tablefmt=tablefmt,
)
@tsutils.transform_args(statistic=tsutils.make_list)
@typic.al
def expanding_window(
input_ts="-",
columns=None,
start_date=None,
end_date=None,
dropna="no",
skiprows=None,
index_type="datetime",
names=None,
clean=False,
statistic: Optional[
List[
Literal[
"corr",
"count",
"cov",
"kurt",
"max",
"mean",
"median",
"min",
"skew",
"std",
"sum",
"var",
]
]
] = None,
min_periods: tsutils.IntGreaterEqualToZero = 1,
center: bool = False,
source_units=None,
target_units=None,
print_input=False,
):
tsd = tsutils.common_kwds(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
start_date=start_date,
end_date=end_date,
pick=columns,
dropna=dropna,
source_units=source_units,
target_units=target_units,
clean=clean,
)
ntsd = tsd.expanding(min_periods=min_periods, center=center)
if statistic:
nntsd = pd.DataFrame()
for stat in statistic:
ntsd = eval("ntsd.{}()".format(stat))
ntsd.columns = [
tsutils.renamer(i, "expanding.{}".format(stat)) for i in ntsd.columns
]
nntsd = nntsd.join(ntsd, how="outer")
else:
nntsd = ntsd
return tsutils.return_input(print_input, tsd, nntsd)
expanding_window.__doc__ = expanding_window_cli.__doc__
| true | true |
1c4a0c8f83c03c4ee8d31d036c9293db10cf3d62 | 1,215 | bzl | Python | deps.bzl | abrisco/cargo-bazel | 1bb0b7f295e89441b5b7e90898c8b9abdab38402 | [
"MIT"
] | 4 | 2021-11-08T14:53:23.000Z | 2022-02-25T03:32:32.000Z | deps.bzl | abrisco/cargo-bazel | 1bb0b7f295e89441b5b7e90898c8b9abdab38402 | [
"MIT"
] | 23 | 2021-10-13T18:53:05.000Z | 2022-03-07T00:57:25.000Z | deps.bzl | abrisco/cargo-bazel | 1bb0b7f295e89441b5b7e90898c8b9abdab38402 | [
"MIT"
] | 1 | 2021-12-09T17:11:39.000Z | 2021-12-09T17:11:39.000Z | """Dependencies required by the `cargo-bazel` rules"""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
load("//3rdparty:third_party_deps.bzl", "third_party_deps")
load("//private:vendor_utils.bzl", "crates_vendor_deps")
def cargo_bazel_deps():
maybe(
http_archive,
name = "rules_rust",
sha256 = "7826dbbbf617da8645d2cdd9a944e7948cc9cf87e7242c54cc0c53110495d1c7",
strip_prefix = "rules_rust-acca6f400003b9ae097b69ba8f44878aaf65beed",
urls = [
# `main` branch as of 2022-03-01
"https://github.com/bazelbuild/rules_rust/archive/acca6f400003b9ae097b69ba8f44878aaf65beed.tar.gz",
],
)
maybe(
http_archive,
name = "bazel_skylib",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.2.0/bazel-skylib-1.2.0.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.2.0/bazel-skylib-1.2.0.tar.gz",
],
sha256 = "af87959afe497dc8dfd4c6cb66e1279cb98ccc84284619ebfec27d9c09a903de",
)
third_party_deps()
crates_vendor_deps()
| 36.818182 | 126 | 0.681481 |
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
load("//3rdparty:third_party_deps.bzl", "third_party_deps")
load("//private:vendor_utils.bzl", "crates_vendor_deps")
def cargo_bazel_deps():
maybe(
http_archive,
name = "rules_rust",
sha256 = "7826dbbbf617da8645d2cdd9a944e7948cc9cf87e7242c54cc0c53110495d1c7",
strip_prefix = "rules_rust-acca6f400003b9ae097b69ba8f44878aaf65beed",
urls = [
"https://github.com/bazelbuild/rules_rust/archive/acca6f400003b9ae097b69ba8f44878aaf65beed.tar.gz",
],
)
maybe(
http_archive,
name = "bazel_skylib",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.2.0/bazel-skylib-1.2.0.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.2.0/bazel-skylib-1.2.0.tar.gz",
],
sha256 = "af87959afe497dc8dfd4c6cb66e1279cb98ccc84284619ebfec27d9c09a903de",
)
third_party_deps()
crates_vendor_deps()
| true | true |
1c4a0cbe390bf014cfe06f318b4c150143ca2c65 | 8,847 | py | Python | ezblog/blog/views.py | zeropol2/ezblog | a43d231d454b32be35f5811a6ca63d17d654f59d | [
"Apache-2.0"
] | 4 | 2016-08-04T04:30:53.000Z | 2016-08-31T08:51:30.000Z | ezblog/blog/views.py | zeropol2/ezblog | a43d231d454b32be35f5811a6ca63d17d654f59d | [
"Apache-2.0"
] | null | null | null | ezblog/blog/views.py | zeropol2/ezblog | a43d231d454b32be35f5811a6ca63d17d654f59d | [
"Apache-2.0"
] | null | null | null | from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import Http404, HttpResponse
from django.shortcuts import render, redirect, get_object_or_404
from .models import Post, Category, Tag
# index
def index(request):
if request.method == 'GET':
per_page = 2
page = request.GET.get('page', 1)
if request.user.is_authenticated():
pg = Paginator(Post.objects.all(), per_page)
else:
pg = Paginator(Post.objects.filter(status='public'), per_page)
return __render_index(request, pg, page)
else:
raise Http404
# posts
def process_post(request, pk):
if request.method == 'GET':
return __get_post(request, pk)
elif request.method == 'PUT':
return __update_post(request, pk)
elif request.method == 'DELETE':
return __delete_post(request, pk)
else:
raise Http404
def __get_post(request, pk):
post = get_object_or_404(Post, pk=pk)
ctx = {
'post': post,
'categories': __get_categories(request),
'archives': __get_archives(request)
}
return render(request, 'detail_post.html', ctx)
@login_required
def __update_post(request, pk):
title = request.PUT.get('title')
content = request.PUT.get('content')
category_pk = request.PUT.get('category')
status = request.PUT.get('status')
tags = request.PUT.get('tags')
if tags:
tags = request.PUT.get('tags').split(',')
post = get_object_or_404(Post, pk=pk)
post.title = title
post.content = content
if category_pk:
post.category = Category.objects.get(pk=category_pk)
post.status = status
post.save()
if tags:
for name in tags:
name = name.strip()
if name:
try:
tag = Tag.objects.get(name=name)
except Tag.DoesNotExist:
tag = Tag()
tag.name = name
tag.save()
post.tags.add(tag)
post.save()
response = HttpResponse()
response.status_code = 200
return response
@login_required
def __delete_post(request, pk):
post = get_object_or_404(Post, pk=pk)
post.delete()
response = HttpResponse()
response.status_code = 200
return response
# create_post
def create_post_or_list_posts(request):
if request.method == 'POST':
return __create_post(request)
elif request.method == 'GET':
return index(request)
else:
raise Http404
@login_required
def __create_post(request):
title = request.POST.get('title')
content = request.POST.get('content')
category_pk = request.POST.get('category')
status = request.POST.get('status')
tags = request.POST.get('tags')
if tags:
tags = request.POST.get('tags').split(',')
new_post = Post()
new_post.title = title
new_post.content = content
if category_pk:
new_post.category = Category.objects.get(pk=category_pk)
new_post.status = status
new_post.user = request.user
new_post.save()
if tags:
for name in tags:
name = name.strip()
if name:
try:
tag = Tag.objects.get(name=name)
except Tag.DoesNotExist:
tag = Tag()
tag.name = name
tag.save()
new_post.tags.add(tag)
new_post.save()
url = reverse('blog:post', kwargs={'pk': new_post.pk})
return redirect(url)
# create_post_form
@login_required
def create_post_form(request):
if request.method == 'GET':
return __create_post_form(request)
else:
raise Http404
@login_required
def __create_post_form(request):
post = Post()
status_choices = post.get_status_choices()
ctx = {
'categories': __get_categories(request),
'status_choices': status_choices,
'archives': __get_archives(request)
}
return render(request, 'create_post.html', ctx)
# update_post_form
@login_required
def update_post_form(request, pk):
if request.method == 'GET':
return __update_post_form(request, pk)
else:
raise Http404
@login_required
def __update_post_form(request, pk):
post = Post.objects.get(pk=pk)
status_choices = post.get_status_choices()
ctx = {
'post': post,
'categories': __get_categories(request),
'status_choices': status_choices,
'archives': __get_archives(request)
}
return render(request, 'update_post.html', ctx)
# list
def posts_by_tag(request, tag_pk):
if request.method == 'GET':
target_tag = Tag.objects.get(pk=tag_pk)
if not target_tag:
url = reverse('blog:index')
return redirect(url)
per_page = 15
page = request.GET.get('page', 1)
if request.user.is_authenticated():
pg = Paginator(Post.objects.filter(tags__in=[target_tag]).distinct(), per_page)
else:
pg = Paginator(Post.objects.filter(status='public', tags__in=[target_tag]).distinct(), per_page)
return __render_index(request, pg, page)
else:
raise Http404
def posts_by_category(request, category_pk):
if request.method == 'GET':
target_category = Category.objects.get(pk=category_pk)
if not target_category:
url = reverse('blog:index')
return redirect(url)
per_page = 15
page = request.GET.get('page', 1)
if request.user.is_authenticated():
pg = Paginator(Post.objects.filter(category=target_category).distinct(), per_page)
else:
pg = Paginator(Post.objects.filter(status='public', category=target_category).distinct(), per_page)
return __render_index(request, pg, page)
else:
raise Http404
def posts_by_keyword(request):
if request.method == 'GET':
keyword = request.GET.get('keyword')
if not keyword:
url = reverse('blog:index')
return redirect(url)
per_page = 15
page = request.GET.get('page', 1)
where_func = Q()
for keyword_item in keyword.split(' '):
target_tags = Tag.objects.filter(name__contains=keyword_item)
target_categories = Category.objects.filter(name__contains=keyword_item)
where_func = Q(where_func |
Q(title__contains=keyword_item) |
Q(content__contains=keyword_item) |
Q(tags__in=target_tags) | Q(category__in=target_categories))
if request.user.is_authenticated():
pg = Paginator(Post.objects.filter(where_func).distinct(), per_page)
else:
pg = Paginator(Post.objects.filter(Q(status='public') & where_func).distinct(), per_page)
return __render_index(request, pg, page, keyword=keyword)
else:
raise Http404
def posts_by_year(request, year):
if request.method == 'GET':
if not year:
url = reverse('blog:index')
return redirect(url)
per_page = 15
page = request.GET.get('page', 1)
if request.user.is_authenticated():
pg = Paginator(Post.objects.filter(created_at__year=year).distinct(), per_page)
else:
pg = Paginator(Post.objects.filter(status='public', created_at__year=year).distinct(), per_page)
return __render_index(request, pg, page)
else:
raise Http404
def __render_index(request, pg, page, **kwargs):
try:
contents = pg.page(page)
except PageNotAnInteger:
contents = pg.page(1)
except EmptyPage:
contents = []
ctx = {
'posts': contents,
'categories': __get_categories(request),
'archives': __get_archives(request),
'keyword': kwargs.get('keyword')
}
return render(request, 'index.html', ctx)
def __get_categories(request):
categories = Category.objects.all()
for category in categories:
if request.user.is_authenticated():
category.count = Post.objects.filter(category=category).count()
else:
category.count = Post.objects.filter(category=category, status='public').count()
return categories
def __get_archives(request):
if request.user.is_authenticated():
all_posts = Post.objects.all()
else:
all_posts = Post.objects.filter(status='public')
result = {}
for item in all_posts:
year = item.created_at.year
count = result.get(year, 0)
result[year] = count+1
return result
| 27.646875 | 111 | 0.61648 | from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import Http404, HttpResponse
from django.shortcuts import render, redirect, get_object_or_404
from .models import Post, Category, Tag
def index(request):
if request.method == 'GET':
per_page = 2
page = request.GET.get('page', 1)
if request.user.is_authenticated():
pg = Paginator(Post.objects.all(), per_page)
else:
pg = Paginator(Post.objects.filter(status='public'), per_page)
return __render_index(request, pg, page)
else:
raise Http404
def process_post(request, pk):
if request.method == 'GET':
return __get_post(request, pk)
elif request.method == 'PUT':
return __update_post(request, pk)
elif request.method == 'DELETE':
return __delete_post(request, pk)
else:
raise Http404
def __get_post(request, pk):
post = get_object_or_404(Post, pk=pk)
ctx = {
'post': post,
'categories': __get_categories(request),
'archives': __get_archives(request)
}
return render(request, 'detail_post.html', ctx)
@login_required
def __update_post(request, pk):
title = request.PUT.get('title')
content = request.PUT.get('content')
category_pk = request.PUT.get('category')
status = request.PUT.get('status')
tags = request.PUT.get('tags')
if tags:
tags = request.PUT.get('tags').split(',')
post = get_object_or_404(Post, pk=pk)
post.title = title
post.content = content
if category_pk:
post.category = Category.objects.get(pk=category_pk)
post.status = status
post.save()
if tags:
for name in tags:
name = name.strip()
if name:
try:
tag = Tag.objects.get(name=name)
except Tag.DoesNotExist:
tag = Tag()
tag.name = name
tag.save()
post.tags.add(tag)
post.save()
response = HttpResponse()
response.status_code = 200
return response
@login_required
def __delete_post(request, pk):
post = get_object_or_404(Post, pk=pk)
post.delete()
response = HttpResponse()
response.status_code = 200
return response
def create_post_or_list_posts(request):
if request.method == 'POST':
return __create_post(request)
elif request.method == 'GET':
return index(request)
else:
raise Http404
@login_required
def __create_post(request):
title = request.POST.get('title')
content = request.POST.get('content')
category_pk = request.POST.get('category')
status = request.POST.get('status')
tags = request.POST.get('tags')
if tags:
tags = request.POST.get('tags').split(',')
new_post = Post()
new_post.title = title
new_post.content = content
if category_pk:
new_post.category = Category.objects.get(pk=category_pk)
new_post.status = status
new_post.user = request.user
new_post.save()
if tags:
for name in tags:
name = name.strip()
if name:
try:
tag = Tag.objects.get(name=name)
except Tag.DoesNotExist:
tag = Tag()
tag.name = name
tag.save()
new_post.tags.add(tag)
new_post.save()
url = reverse('blog:post', kwargs={'pk': new_post.pk})
return redirect(url)
@login_required
def create_post_form(request):
if request.method == 'GET':
return __create_post_form(request)
else:
raise Http404
@login_required
def __create_post_form(request):
post = Post()
status_choices = post.get_status_choices()
ctx = {
'categories': __get_categories(request),
'status_choices': status_choices,
'archives': __get_archives(request)
}
return render(request, 'create_post.html', ctx)
@login_required
def update_post_form(request, pk):
if request.method == 'GET':
return __update_post_form(request, pk)
else:
raise Http404
@login_required
def __update_post_form(request, pk):
post = Post.objects.get(pk=pk)
status_choices = post.get_status_choices()
ctx = {
'post': post,
'categories': __get_categories(request),
'status_choices': status_choices,
'archives': __get_archives(request)
}
return render(request, 'update_post.html', ctx)
def posts_by_tag(request, tag_pk):
if request.method == 'GET':
target_tag = Tag.objects.get(pk=tag_pk)
if not target_tag:
url = reverse('blog:index')
return redirect(url)
per_page = 15
page = request.GET.get('page', 1)
if request.user.is_authenticated():
pg = Paginator(Post.objects.filter(tags__in=[target_tag]).distinct(), per_page)
else:
pg = Paginator(Post.objects.filter(status='public', tags__in=[target_tag]).distinct(), per_page)
return __render_index(request, pg, page)
else:
raise Http404
def posts_by_category(request, category_pk):
if request.method == 'GET':
target_category = Category.objects.get(pk=category_pk)
if not target_category:
url = reverse('blog:index')
return redirect(url)
per_page = 15
page = request.GET.get('page', 1)
if request.user.is_authenticated():
pg = Paginator(Post.objects.filter(category=target_category).distinct(), per_page)
else:
pg = Paginator(Post.objects.filter(status='public', category=target_category).distinct(), per_page)
return __render_index(request, pg, page)
else:
raise Http404
def posts_by_keyword(request):
if request.method == 'GET':
keyword = request.GET.get('keyword')
if not keyword:
url = reverse('blog:index')
return redirect(url)
per_page = 15
page = request.GET.get('page', 1)
where_func = Q()
for keyword_item in keyword.split(' '):
target_tags = Tag.objects.filter(name__contains=keyword_item)
target_categories = Category.objects.filter(name__contains=keyword_item)
where_func = Q(where_func |
Q(title__contains=keyword_item) |
Q(content__contains=keyword_item) |
Q(tags__in=target_tags) | Q(category__in=target_categories))
if request.user.is_authenticated():
pg = Paginator(Post.objects.filter(where_func).distinct(), per_page)
else:
pg = Paginator(Post.objects.filter(Q(status='public') & where_func).distinct(), per_page)
return __render_index(request, pg, page, keyword=keyword)
else:
raise Http404
def posts_by_year(request, year):
if request.method == 'GET':
if not year:
url = reverse('blog:index')
return redirect(url)
per_page = 15
page = request.GET.get('page', 1)
if request.user.is_authenticated():
pg = Paginator(Post.objects.filter(created_at__year=year).distinct(), per_page)
else:
pg = Paginator(Post.objects.filter(status='public', created_at__year=year).distinct(), per_page)
return __render_index(request, pg, page)
else:
raise Http404
def __render_index(request, pg, page, **kwargs):
try:
contents = pg.page(page)
except PageNotAnInteger:
contents = pg.page(1)
except EmptyPage:
contents = []
ctx = {
'posts': contents,
'categories': __get_categories(request),
'archives': __get_archives(request),
'keyword': kwargs.get('keyword')
}
return render(request, 'index.html', ctx)
def __get_categories(request):
categories = Category.objects.all()
for category in categories:
if request.user.is_authenticated():
category.count = Post.objects.filter(category=category).count()
else:
category.count = Post.objects.filter(category=category, status='public').count()
return categories
def __get_archives(request):
if request.user.is_authenticated():
all_posts = Post.objects.all()
else:
all_posts = Post.objects.filter(status='public')
result = {}
for item in all_posts:
year = item.created_at.year
count = result.get(year, 0)
result[year] = count+1
return result
| true | true |
1c4a0cde0a499635d87048fe9f94b9177b5680fc | 5,529 | py | Python | cms/utils/i18n.py | 360youlun/django-cms | bc1240fd46de4c04f3b5402be99a81728a4a324c | [
"BSD-3-Clause"
] | 1 | 2019-04-15T10:28:46.000Z | 2019-04-15T10:28:46.000Z | cms/utils/i18n.py | damianmoore/django-cms | 2d3e10a01e792ec7da5c1418811c1be5ac84e5e2 | [
"BSD-3-Clause"
] | 5 | 2021-03-19T15:39:27.000Z | 2021-09-08T02:47:21.000Z | cms/utils/i18n.py | Acidburn0zzz/django-cms | 5a105a1c75eeb4c8a4c1c34301d93855e6724407 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from contextlib import contextmanager
from django.core.urlresolvers import get_resolver, LocaleRegexURLResolver
from django.conf import settings
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from cms.exceptions import LanguageError
from cms.utils.conf import get_cms_setting, get_site_id
@contextmanager
def force_language(new_lang):
old_lang = get_current_language()
if old_lang != new_lang:
translation.activate(new_lang)
yield
translation.activate(old_lang)
def get_languages(site_id=None):
site_id = get_site_id(site_id)
result = get_cms_setting('LANGUAGES').get(site_id)
if not result:
result = []
defaults = get_cms_setting('LANGUAGES').get('default', {})
for code, name in settings.LANGUAGES:
lang = {'code': code, 'name': _(name)}
lang.update(defaults)
result.append(lang)
get_cms_setting('LANGUAGES')[site_id] = result
return result
def get_language_code(language_code):
"""
Returns language code while making sure it's in LANGUAGES
"""
if not language_code:
return None
languages = get_language_list()
if language_code in languages: # direct hit
return language_code
for lang in languages:
if language_code.split('-')[0] == lang: # base language hit
return lang
if lang.split('-')[0] == language_code: # base language hit
return lang
return language_code
def get_current_language():
"""
Returns the currently active language
It's a replacement for Django's translation.get_language() to make sure the LANGUAGE_CODE will be found in LANGUAGES.
Overcomes this issue: https://code.djangoproject.com/ticket/9340
"""
language_code = translation.get_language()
return get_language_code(language_code)
def get_language_list(site_id=None):
"""
:return: returns a list of iso2codes for this site
"""
if not settings.USE_I18N:
return [settings.LANGUAGE_CODE]
languages = []
for language in get_languages(site_id):
languages.append(language['code'])
return languages
def get_language_tuple(site_id=None):
"""
:return: returns an list of tuples like the old CMS_LANGUAGES or the LANGUAGES for this site
"""
languages = []
for language in get_languages(site_id):
languages.append((language['code'], language['name']))
return languages
def get_language_dict(site_id=None):
"""
:return: returns an dict of cms languages
"""
languages = {}
for language in get_languages(site_id):
languages[language['code']] = language['name']
return languages
def get_public_languages(site_id=None):
"""
:return: list of iso2codes of public languages for this site
"""
languages = []
for language in get_language_objects(site_id):
if language.get("public", True):
languages.append(language['code'])
return languages
def get_language_object(language_code, site_id=None):
"""
:param language_code: RFC5646 language code
:return: the language object filled up by defaults
"""
for language in get_languages(site_id):
if language['code'] == get_language_code(language_code):
return language
raise LanguageError('Language not found: %s' % language_code)
def get_language_objects(site_id=None):
"""
returns list of all language objects filled up by default values
"""
return list(get_languages(site_id))
def get_default_language(language_code=None, site_id=None):
"""
Returns default language depending on settings.LANGUAGE_CODE merged with
best match from get_cms_setting('LANGUAGES')
Returns: language_code
"""
if not language_code:
language_code = get_language_code(settings.LANGUAGE_CODE)
languages = get_language_list(site_id)
# first try if there is an exact language
if language_code in languages:
return language_code
# otherwise split the language code if possible, so iso3
language_code = language_code.split("-")[0]
if not language_code in languages:
return settings.LANGUAGE_CODE
return language_code
def get_fallback_languages(language, site_id=None):
"""
returns a list of fallback languages for the given language
"""
try:
language = get_language_object(language, site_id)
except LanguageError:
language = get_languages(site_id)[0]
return language.get('fallbacks', [])
def get_redirect_on_fallback(language, site_id=None):
"""
returns if you should redirect on language fallback
:param language:
:param site_id:
:return: Boolean
"""
language = get_language_object(language, site_id)
return language.get('redirect_on_fallback', True)
def hide_untranslated(language, site_id=None):
"""
Should untranslated pages in this language be hidden?
:param language:
:param site_id:
:return: A Boolean
"""
obj = get_language_object(language, site_id)
return obj.get('hide_untranslated', True)
def is_language_prefix_patterns_used():
"""
Returns `True` if the `LocaleRegexURLResolver` is used
at root level of the urlpatterns, else it returns `False`.
"""
for url_pattern in get_resolver(None).url_patterns:
if isinstance(url_pattern, LocaleRegexURLResolver):
return True
return False
| 28.647668 | 121 | 0.691988 | from contextlib import contextmanager
from django.core.urlresolvers import get_resolver, LocaleRegexURLResolver
from django.conf import settings
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from cms.exceptions import LanguageError
from cms.utils.conf import get_cms_setting, get_site_id
@contextmanager
def force_language(new_lang):
old_lang = get_current_language()
if old_lang != new_lang:
translation.activate(new_lang)
yield
translation.activate(old_lang)
def get_languages(site_id=None):
site_id = get_site_id(site_id)
result = get_cms_setting('LANGUAGES').get(site_id)
if not result:
result = []
defaults = get_cms_setting('LANGUAGES').get('default', {})
for code, name in settings.LANGUAGES:
lang = {'code': code, 'name': _(name)}
lang.update(defaults)
result.append(lang)
get_cms_setting('LANGUAGES')[site_id] = result
return result
def get_language_code(language_code):
if not language_code:
return None
languages = get_language_list()
if language_code in languages: return language_code
for lang in languages:
if language_code.split('-')[0] == lang: return lang
if lang.split('-')[0] == language_code: return lang
return language_code
def get_current_language():
language_code = translation.get_language()
return get_language_code(language_code)
def get_language_list(site_id=None):
if not settings.USE_I18N:
return [settings.LANGUAGE_CODE]
languages = []
for language in get_languages(site_id):
languages.append(language['code'])
return languages
def get_language_tuple(site_id=None):
languages = []
for language in get_languages(site_id):
languages.append((language['code'], language['name']))
return languages
def get_language_dict(site_id=None):
languages = {}
for language in get_languages(site_id):
languages[language['code']] = language['name']
return languages
def get_public_languages(site_id=None):
languages = []
for language in get_language_objects(site_id):
if language.get("public", True):
languages.append(language['code'])
return languages
def get_language_object(language_code, site_id=None):
for language in get_languages(site_id):
if language['code'] == get_language_code(language_code):
return language
raise LanguageError('Language not found: %s' % language_code)
def get_language_objects(site_id=None):
return list(get_languages(site_id))
def get_default_language(language_code=None, site_id=None):
if not language_code:
language_code = get_language_code(settings.LANGUAGE_CODE)
languages = get_language_list(site_id)
if language_code in languages:
return language_code
language_code = language_code.split("-")[0]
if not language_code in languages:
return settings.LANGUAGE_CODE
return language_code
def get_fallback_languages(language, site_id=None):
try:
language = get_language_object(language, site_id)
except LanguageError:
language = get_languages(site_id)[0]
return language.get('fallbacks', [])
def get_redirect_on_fallback(language, site_id=None):
language = get_language_object(language, site_id)
return language.get('redirect_on_fallback', True)
def hide_untranslated(language, site_id=None):
obj = get_language_object(language, site_id)
return obj.get('hide_untranslated', True)
def is_language_prefix_patterns_used():
for url_pattern in get_resolver(None).url_patterns:
if isinstance(url_pattern, LocaleRegexURLResolver):
return True
return False
| true | true |
1c4a0d26b1b1f83eea8c7a0005822c6d1ced6f53 | 36,374 | py | Python | adafruit_minimqtt/adafruit_minimqtt.py | Eason010212/Adafruit_CircuitPython_MiniMQTT | eccc36f41c973c3155bd633716670e1925d51bae | [
"MIT",
"Unlicense"
] | null | null | null | adafruit_minimqtt/adafruit_minimqtt.py | Eason010212/Adafruit_CircuitPython_MiniMQTT | eccc36f41c973c3155bd633716670e1925d51bae | [
"MIT",
"Unlicense"
] | null | null | null | adafruit_minimqtt/adafruit_minimqtt.py | Eason010212/Adafruit_CircuitPython_MiniMQTT | eccc36f41c973c3155bd633716670e1925d51bae | [
"MIT",
"Unlicense"
] | null | null | null | # SPDX-FileCopyrightText: 2019-2021 Brent Rubell for Adafruit Industries
#
# SPDX-License-Identifier: MIT
# Original Work Copyright (c) 2016 Paul Sokolovsky, uMQTT
# Modified Work Copyright (c) 2019 Bradley Beach, esp32spi_mqtt
# Modified Work Copyright (c) 2012-2019 Roger Light and others, Paho MQTT Python
"""
`adafruit_minimqtt`
================================================================================
A minimal MQTT Library for CircuitPython.
* Author(s): Brent Rubell
Implementation Notes
--------------------
Adapted from https://github.com/micropython/micropython-lib/tree/master/umqtt.simple/umqtt
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
import errno
import struct
import time
from random import randint
from micropython import const
from .matcher import MQTTMatcher
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_MiniMQTT.git"
# Client-specific variables
MQTT_MSG_MAX_SZ = const(268435455)
MQTT_MSG_SZ_LIM = const(10000000)
MQTT_TOPIC_LENGTH_LIMIT = const(65535)
MQTT_TCP_PORT = const(1883)
MQTT_TLS_PORT = const(8883)
# MQTT Commands
MQTT_PINGREQ = b"\xc0\0"
MQTT_PINGRESP = const(0xD0)
MQTT_SUB = b"\x82"
MQTT_UNSUB = b"\xA2"
MQTT_DISCONNECT = b"\xe0\0"
# Variable CONNECT header [MQTT 3.1.2]
MQTT_HDR_CONNECT = bytearray(b"\x04MQTT\x04\x02\0\0")
CONNACK_ERRORS = {
const(0x01): "Connection Refused - Incorrect Protocol Version",
const(0x02): "Connection Refused - ID Rejected",
const(0x03): "Connection Refused - Server unavailable",
const(0x04): "Connection Refused - Incorrect username/password",
const(0x05): "Connection Refused - Unauthorized",
}
_default_sock = None # pylint: disable=invalid-name
_fake_context = None # pylint: disable=invalid-name
class MMQTTException(Exception):
"""MiniMQTT Exception class."""
# pylint: disable=unnecessary-pass
# pass
# Legacy ESP32SPI Socket API
def set_socket(sock, iface=None):
"""Legacy API for setting the socket and network interface.
:param sock: socket object.
:param iface: internet interface object
"""
global _default_sock # pylint: disable=invalid-name, global-statement
global _fake_context # pylint: disable=invalid-name, global-statement
_default_sock = sock
if iface:
_default_sock.set_interface(iface)
_fake_context = _FakeSSLContext(iface)
class _FakeSSLSocket:
def __init__(self, socket, tls_mode):
self._socket = socket
self._mode = tls_mode
self.settimeout = socket.settimeout
self.send = socket.send
self.recv = socket.recv
self.close = socket.close
def connect(self, address):
"""connect wrapper to add non-standard mode parameter"""
try:
return self._socket.connect(address, self._mode)
except RuntimeError as error:
raise OSError(errno.ENOMEM) from error
class _FakeSSLContext:
def __init__(self, iface):
self._iface = iface
def wrap_socket(self, socket, server_hostname=None):
"""Return the same socket"""
# pylint: disable=unused-argument
return _FakeSSLSocket(socket, self._iface.TLS_MODE)
class MQTT:
"""MQTT Client for CircuitPython.
:param str broker: MQTT Broker URL or IP Address.
:param int port: Optional port definition, defaults to 8883.
:param str username: Username for broker authentication.
:param str password: Password for broker authentication.
:param network_manager: NetworkManager object, such as WiFiManager from ESPSPI_WiFiManager.
:param str client_id: Optional client identifier, defaults to a unique, generated string.
:param bool is_ssl: Sets a secure or insecure connection with the broker.
:param int keep_alive: KeepAlive interval between the broker and the MiniMQTT client.
:param socket socket_pool: A pool of socket resources available for the given radio.
:param ssl_context: SSL context for long-lived SSL connections.
"""
# pylint: disable=too-many-arguments,too-many-instance-attributes, not-callable, invalid-name, no-member
def __init__(
self,
broker,
port=None,
username=None,
password=None,
client_id=None,
is_ssl=True,
keep_alive=60,
socket_pool=None,
ssl_context=None,
):
self._socket_pool = socket_pool
self._ssl_context = ssl_context
self._sock = None
self._backwards_compatible_sock = False
self.keep_alive = keep_alive
self._user_data = None
self._is_connected = False
self._msg_size_lim = MQTT_MSG_SZ_LIM
self._pid = 0
self._timestamp = 0
self.logger = None
self.broker = broker
self._username = username
self._password = password
if (
self._password and len(password.encode("utf-8")) > MQTT_TOPIC_LENGTH_LIMIT
): # [MQTT-3.1.3.5]
raise MMQTTException("Password length is too large.")
self.port = MQTT_TCP_PORT
if is_ssl:
self.port = MQTT_TLS_PORT
if port:
self.port = port
# define client identifer
if client_id:
# user-defined client_id MAY allow client_id's > 23 bytes or
# non-alpha-numeric characters
self.client_id = client_id
else:
# assign a unique client_id
self.client_id = "cpy{0}{1}".format(
randint(0, int(time.monotonic() * 100) % 1000), randint(0, 99)
)
# generated client_id's enforce spec.'s length rules
if len(self.client_id) > 23 or not self.client_id:
raise ValueError("MQTT Client ID must be between 1 and 23 bytes")
# LWT
self._lw_topic = None
self._lw_qos = 0
self._lw_topic = None
self._lw_msg = None
self._lw_retain = False
# List of subscribed topics, used for tracking
self._subscribed_topics = []
self._on_message_filtered = MQTTMatcher()
# Default topic callback methods
self._on_message = None
self.on_connect = None
self.on_disconnect = None
self.on_publish = None
self.on_subscribe = None
self.on_unsubscribe = None
# pylint: disable=too-many-branches
def _get_connect_socket(self, host, port, *, timeout=1):
"""Obtains a new socket and connects to a broker.
:param str host: Desired broker hostname
:param int port: Desired broker port
:param int timeout: Desired socket timeout
"""
# For reconnections - check if we're using a socket already and close it
if self._sock:
self._sock.close()
self._sock = None
# Legacy API - use the interface's socket instead of a passed socket pool
if self._socket_pool is None:
self._socket_pool = _default_sock
# Legacy API - fake the ssl context
if self._ssl_context is None:
self._ssl_context = _fake_context
if not isinstance(port, int):
raise RuntimeError("Port must be an integer")
if port == 8883 and not self._ssl_context:
raise RuntimeError(
"ssl_context must be set before using adafruit_mqtt for secure MQTT."
)
if self.logger and port == MQTT_TLS_PORT:
self.logger.info(
"Establishing a SECURE SSL connection to {0}:{1}".format(host, port)
)
elif self.logger:
self.logger.info(
"Establishing an INSECURE connection to {0}:{1}".format(host, port)
)
addr_info = self._socket_pool.getaddrinfo(
host, port, 0, self._socket_pool.SOCK_STREAM
)[0]
sock = None
retry_count = 0
while retry_count < 5 and sock is None:
retry_count += 1
try:
sock = self._socket_pool.socket(
addr_info[0], addr_info[1], addr_info[2]
)
except OSError:
continue
connect_host = addr_info[-1][0]
if port == 8883:
sock = self._ssl_context.wrap_socket(sock, server_hostname=host)
connect_host = host
sock.settimeout(timeout)
try:
sock.connect((connect_host, port))
except MemoryError:
sock.close()
sock = None
except OSError:
sock.close()
sock = None
if sock is None:
raise RuntimeError("Repeated socket failures")
self._backwards_compatible_sock = not hasattr(sock, "recv_into")
return sock
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.deinit()
def _sock_exact_recv(self, bufsize):
"""Reads _exact_ number of bytes from the connected socket. Will only return
string with the exact number of bytes requested.
The semantics of native socket receive is that it returns no more than the
specified number of bytes (i.e. max size). However, it makes no guarantees in
terms of the minimum size of the buffer, which could be 1 byte. This is a
wrapper for socket recv() to ensure that no less than the expected number of
bytes is returned or trigger a timeout exception.
:param int bufsize: number of bytes to receive
"""
stamp = time.monotonic()
rc = self._sock.recv(bufsize)
to_read = bufsize - len(rc)
assert to_read >= 0
read_timeout = self.keep_alive
while to_read > 0:
recv = self._sock.recv(to_read)
to_read -= len(recv)
rc += recv
if time.monotonic() - stamp > read_timeout:
raise MMQTTException(
"Unable to receive {} bytes within {} seconds.".format(
to_read, read_timeout
)
)
return rc
def deinit(self):
"""De-initializes the MQTT client and disconnects from the mqtt broker."""
self.disconnect()
@property
def mqtt_msg(self):
"""Returns maximum MQTT payload and topic size."""
return self._msg_size_lim, MQTT_TOPIC_LENGTH_LIMIT
@mqtt_msg.setter
def mqtt_msg(self, msg_size):
"""Sets the maximum MQTT message payload size.
:param int msg_size: Maximum MQTT payload size.
"""
if msg_size < MQTT_MSG_MAX_SZ:
self._msg_size_lim = msg_size
def will_set(self, topic=None, payload=None, qos=0, retain=False):
"""Sets the last will and testament properties. MUST be called before `connect()`.
:param str topic: MQTT Broker topic.
:param int,float,str payload: Last will disconnection payload.
payloads of type int & float are converted to a string.
:param int qos: Quality of Service level, defaults to
zero. Conventional options are ``0`` (send at most once), ``1``
(send at least once), or ``2`` (send exactly once).
.. note:: Only options ``1`` or ``0`` are QoS levels supported by this library.
:param bool retain: Specifies if the payload is to be retained when
it is published.
"""
if self.logger:
self.logger.debug("Setting last will properties")
self._valid_qos(qos)
if self._is_connected:
raise MMQTTException("Last Will should only be called before connect().")
if payload is None:
payload = ""
if isinstance(payload, (int, float, str)):
payload = str(payload).encode()
else:
raise MMQTTException("Invalid message data type.")
self._lw_qos = qos
self._lw_topic = topic
self._lw_msg = payload
self._lw_retain = retain
def add_topic_callback(self, mqtt_topic, callback_method):
"""Registers a callback_method for a specific MQTT topic.
:param str mqtt_topic: MQTT topic identifier.
:param str callback_method: Name of callback method.
"""
if mqtt_topic is None or callback_method is None:
raise ValueError("MQTT topic and callback method must both be defined.")
self._on_message_filtered[mqtt_topic] = callback_method
def remove_topic_callback(self, mqtt_topic):
"""Removes a registered callback method.
:param str mqtt_topic: MQTT topic identifier string.
"""
if mqtt_topic is None:
raise ValueError("MQTT Topic must be defined.")
try:
del self._on_message_filtered[mqtt_topic]
except KeyError:
raise KeyError(
"MQTT topic callback not added with add_topic_callback."
) from None
@property
def on_message(self):
"""Called when a new message has been received on a subscribed topic.
Expected method signature is ``on_message(client, topic, message)``
"""
return self._on_message
@on_message.setter
def on_message(self, method):
self._on_message = method
def _handle_on_message(self, client, topic, message):
matched = False
if topic is not None:
for callback in self._on_message_filtered.iter_match(topic):
callback(client, topic, message) # on_msg with callback
matched = True
if not matched and self.on_message: # regular on_message
self.on_message(client, topic, message)
def username_pw_set(self, username, password=None):
"""Set client's username and an optional password.
:param str username: Username to use with your MQTT broker.
:param str password: Password to use with your MQTT broker.
"""
if self._is_connected:
raise MMQTTException("This method must be called before connect().")
self._username = username
if password is not None:
self._password = password
# pylint: disable=too-many-branches, too-many-statements, too-many-locals
def connect(self, clean_session=True, host=None, port=None, keep_alive=None):
"""Initiates connection with the MQTT Broker.
:param bool clean_session: Establishes a persistent session.
:param str host: Hostname or IP address of the remote broker.
:param int port: Network port of the remote broker.
:param int keep_alive: Maximum period allowed for communication, in seconds.
"""
if host:
self.broker = host
if port:
self.port = port
if keep_alive:
self.keep_alive = keep_alive
if self.logger:
self.logger.debug("Attempting to establish MQTT connection...")
# Get a new socket
self._sock = self._get_connect_socket(self.broker, self.port)
# Fixed Header
fixed_header = bytearray([0x10])
# NOTE: Variable header is
# MQTT_HDR_CONNECT = bytearray(b"\x04MQTT\x04\x02\0\0")
# because final 4 bytes are 4, 2, 0, 0
var_header = MQTT_HDR_CONNECT
var_header[6] = clean_session << 1
# Set up variable header and remaining_length
remaining_length = 12 + len(self.client_id)
if self._username:
remaining_length += 2 + len(self._username) + 2 + len(self._password)
var_header[6] |= 0xC0
if self.keep_alive:
assert self.keep_alive < MQTT_TOPIC_LENGTH_LIMIT
var_header[7] |= self.keep_alive >> 8
var_header[8] |= self.keep_alive & 0x00FF
if self._lw_topic:
remaining_length += 2 + len(self._lw_topic) + 2 + len(self._lw_msg)
var_header[6] |= 0x4 | (self._lw_qos & 0x1) << 3 | (self._lw_qos & 0x2) << 3
var_header[6] |= self._lw_retain << 5
# Remaining length calculation
large_rel_length = False
if remaining_length > 0x7F:
large_rel_length = True
# Calculate Remaining Length [2.2.3]
while remaining_length > 0:
encoded_byte = remaining_length % 0x80
remaining_length = remaining_length // 0x80
# if there is more data to encode, set the top bit of the byte
if remaining_length > 0:
encoded_byte |= 0x80
fixed_header.append(encoded_byte)
if large_rel_length:
fixed_header.append(0x00)
else:
fixed_header.append(remaining_length)
fixed_header.append(0x00)
if self.logger:
self.logger.debug("Sending CONNECT to broker...")
self.logger.debug(
"Fixed Header: %s\nVariable Header: %s", fixed_header, var_header
)
self._sock.send(fixed_header)
self._sock.send(var_header)
# [MQTT-3.1.3-4]
self._send_str(self.client_id)
if self._lw_topic:
# [MQTT-3.1.3-11]
self._send_str(self._lw_topic)
self._send_str(self._lw_msg)
if self._username is None:
self._username = None
else:
self._send_str(self._username)
self._send_str(self._password)
if self.logger:
self.logger.debug("Receiving CONNACK packet from broker")
while True:
op = self._wait_for_msg()
if op == 32:
rc = self._sock_exact_recv(3)
assert rc[0] == 0x02
if rc[2] != 0x00:
raise MMQTTException(CONNACK_ERRORS[rc[2]])
self._is_connected = True
result = rc[0] & 1
if self.on_connect is not None:
self.on_connect(self, self._user_data, result, rc[2])
return result
def disconnect(self):
"""Disconnects the MiniMQTT client from the MQTT broker."""
self.is_connected()
if self.logger is not None:
self.logger.debug("Sending DISCONNECT packet to broker")
try:
self._sock.send(MQTT_DISCONNECT)
except RuntimeError as e:
if self.logger:
self.logger.warning("Unable to send DISCONNECT packet: {}".format(e))
if self.logger is not None:
self.logger.debug("Closing socket")
self._sock.close()
self._is_connected = False
self._subscribed_topics = []
if self.on_disconnect is not None:
self.on_disconnect(self, self._user_data, 0)
def ping(self):
"""Pings the MQTT Broker to confirm if the broker is alive or if
there is an active network connection.
Returns response codes of any messages received while waiting for PINGRESP.
"""
self.is_connected()
if self.logger:
self.logger.debug("Sending PINGREQ")
self._sock.send(MQTT_PINGREQ)
ping_timeout = self.keep_alive
stamp = time.monotonic()
rc, rcs = None, []
while rc != MQTT_PINGRESP:
rc = self._wait_for_msg()
if rc:
rcs.append(rc)
if time.monotonic() - stamp > ping_timeout:
raise MMQTTException("PINGRESP not returned from broker.")
return rcs
# pylint: disable=too-many-branches, too-many-statements
def publish(self, topic, msg, retain=False, qos=0):
"""Publishes a message to a topic provided.
:param str topic: Unique topic identifier.
:param str,int,float,bytes msg: Data to send to the broker.
:param bool retain: Whether the message is saved by the broker.
:param int qos: Quality of Service level for the message, defaults to zero.
"""
self.is_connected()
self._valid_topic(topic)
if "+" in topic or "#" in topic:
raise MMQTTException("Publish topic can not contain wildcards.")
# check msg/qos kwargs
if msg is None:
raise MMQTTException("Message can not be None.")
if isinstance(msg, (int, float)):
msg = str(msg).encode("ascii")
elif isinstance(msg, str):
msg = str(msg).encode("utf-8")
elif isinstance(msg, bytes):
pass
else:
raise MMQTTException("Invalid message data type.")
if len(msg) > MQTT_MSG_MAX_SZ:
raise MMQTTException("Message size larger than %d bytes." % MQTT_MSG_MAX_SZ)
assert (
0 <= qos <= 1
), "Quality of Service Level 2 is unsupported by this library."
# fixed header. [3.3.1.2], [3.3.1.3]
pub_hdr_fixed = bytearray([0x30 | retain | qos << 1])
# variable header = 2-byte Topic length (big endian)
pub_hdr_var = bytearray(struct.pack(">H", len(topic)))
pub_hdr_var.extend(topic.encode("utf-8")) # Topic name
remaining_length = 2 + len(msg) + len(topic)
if qos > 0:
# packet identifier where QoS level is 1 or 2. [3.3.2.2]
remaining_length += 2
self._pid = self._pid + 1 if self._pid < 0xFFFF else 1
pub_hdr_var.append(self._pid >> 8)
pub_hdr_var.append(self._pid & 0xFF)
# Calculate remaining length [2.2.3]
if remaining_length > 0x7F:
while remaining_length > 0:
encoded_byte = remaining_length % 0x80
remaining_length = remaining_length // 0x80
if remaining_length > 0:
encoded_byte |= 0x80
pub_hdr_fixed.append(encoded_byte)
else:
pub_hdr_fixed.append(remaining_length)
if self.logger:
self.logger.debug(
"Sending PUBLISH\nTopic: %s\nMsg: %s\
\nQoS: %d\nRetain? %r",
topic,
msg,
qos,
retain,
)
self._sock.send(pub_hdr_fixed)
self._sock.send(pub_hdr_var)
self._sock.send(msg)
if qos == 0 and self.on_publish is not None:
self.on_publish(self, self._user_data, topic, self._pid)
if qos == 1:
while True:
op = self._wait_for_msg()
if op == 0x40:
sz = self._sock_exact_recv(1)
assert sz == b"\x02"
rcv_pid = self._sock_exact_recv(2)
rcv_pid = rcv_pid[0] << 0x08 | rcv_pid[1]
if self._pid == rcv_pid:
if self.on_publish is not None:
self.on_publish(self, self._user_data, topic, rcv_pid)
return
def subscribe(self, topic, qos=0):
"""Subscribes to a topic on the MQTT Broker.
This method can subscribe to one topics or multiple topics.
:param str,tuple,list topic: Unique MQTT topic identifier string. If
this is a `tuple`, then the tuple should
contain topic identifier string and qos
level integer. If this is a `list`, then
each list element should be a tuple containing
a topic identifier string and qos level integer.
:param int qos: Quality of Service level for the topic, defaults to
zero. Conventional options are ``0`` (send at most once), ``1``
(send at least once), or ``2`` (send exactly once).
"""
self.is_connected()
topics = None
if isinstance(topic, tuple):
topic, qos = topic
self._valid_topic(topic)
self._valid_qos(qos)
if isinstance(topic, str):
self._valid_topic(topic)
self._valid_qos(qos)
topics = [(topic, qos)]
if isinstance(topic, list):
topics = []
for t, q in topic:
self._valid_qos(q)
self._valid_topic(t)
topics.append((t, q))
# Assemble packet
packet_length = 2 + (2 * len(topics)) + (1 * len(topics))
packet_length += sum(len(topic) for topic, qos in topics)
packet_length_byte = packet_length.to_bytes(1, "big")
self._pid = self._pid + 1 if self._pid < 0xFFFF else 1
packet_id_bytes = self._pid.to_bytes(2, "big")
# Packet with variable and fixed headers
packet = MQTT_SUB + packet_length_byte + packet_id_bytes
# attaching topic and QOS level to the packet
for t, q in topics:
topic_size = len(t).to_bytes(2, "big")
qos_byte = q.to_bytes(1, "big")
packet += topic_size + t.encode() + qos_byte
if self.logger:
for t, q in topics:
self.logger.debug("SUBSCRIBING to topic %s with QoS %d", t, q)
self._sock.send(packet)
while True:
op = self._wait_for_msg()
if op == 0x90:
rc = self._sock_exact_recv(4)
assert rc[1] == packet[2] and rc[2] == packet[3]
if rc[3] == 0x80:
raise MMQTTException("SUBACK Failure!")
for t, q in topics:
if self.on_subscribe is not None:
self.on_subscribe(self, self._user_data, t, q)
self._subscribed_topics.append(t)
return
def unsubscribe(self, topic):
"""Unsubscribes from a MQTT topic.
:param str,list topic: Unique MQTT topic identifier string or list.
"""
topics = None
if isinstance(topic, str):
self._valid_topic(topic)
topics = [(topic)]
if isinstance(topic, list):
topics = []
for t in topic:
self._valid_topic(t)
topics.append((t))
for t in topics:
if t not in self._subscribed_topics:
raise MMQTTException(
"Topic must be subscribed to before attempting unsubscribe."
)
# Assemble packet
packet_length = 2 + (2 * len(topics))
packet_length += sum(len(topic) for topic in topics)
packet_length_byte = packet_length.to_bytes(1, "big")
self._pid = self._pid + 1 if self._pid < 0xFFFF else 1
packet_id_bytes = self._pid.to_bytes(2, "big")
packet = MQTT_UNSUB + packet_length_byte + packet_id_bytes
for t in topics:
topic_size = len(t).to_bytes(2, "big")
packet += topic_size + t.encode()
if self.logger:
for t in topics:
self.logger.debug("UNSUBSCRIBING from topic %s", t)
self._sock.send(packet)
if self.logger:
self.logger.debug("Waiting for UNSUBACK...")
while True:
op = self._wait_for_msg()
if op == 176:
rc = self._sock_exact_recv(3)
assert rc[0] == 0x02
# [MQTT-3.32]
assert rc[1] == packet_id_bytes[0] and rc[2] == packet_id_bytes[1]
for t in topics:
if self.on_unsubscribe is not None:
self.on_unsubscribe(self, self._user_data, t, self._pid)
self._subscribed_topics.remove(t)
return
def reconnect(self, resub_topics=True):
"""Attempts to reconnect to the MQTT broker.
:param bool resub_topics: Resubscribe to previously subscribed topics.
"""
if self.logger:
self.logger.debug("Attempting to reconnect with MQTT broker")
self.connect()
if self.logger:
self.logger.debug("Reconnected with broker")
if resub_topics:
if self.logger:
self.logger.debug(
"Attempting to resubscribe to previously subscribed topics."
)
subscribed_topics = self._subscribed_topics.copy()
self._subscribed_topics = []
while subscribed_topics:
feed = subscribed_topics.pop()
self.subscribe(feed)
def loop(self, timeout=1):
"""Non-blocking message loop. Use this method to
check incoming subscription messages.
Returns response codes of any messages received.
:param int timeout: Socket timeout, in seconds.
"""
if self._timestamp == 0:
self._timestamp = time.monotonic()
current_time = time.monotonic()
if current_time - self._timestamp >= self.keep_alive:
# Handle KeepAlive by expecting a PINGREQ/PINGRESP from the server
if self.logger is not None:
self.logger.debug(
"KeepAlive period elapsed - requesting a PINGRESP from the server..."
)
rcs = self.ping()
self._timestamp = 0
return rcs
self._sock.settimeout(timeout)
rc = self._wait_for_msg()
return [rc] if rc else None
def _wait_for_msg(self, timeout=0.1):
"""Reads and processes network events."""
# CPython socket module contains a timeout attribute
if hasattr(self._socket_pool, "timeout"):
try:
res = self._sock_exact_recv(1)
except self._socket_pool.timeout as error:
return None
else: # socketpool, esp32spi
try:
res = self._sock_exact_recv(1)
except OSError as error:
if error.errno == errno.ETIMEDOUT:
# raised by a socket timeout if 0 bytes were present
return None
raise MMQTTException from error
# Block while we parse the rest of the response
self._sock.settimeout(timeout)
if res in [None, b""]:
# If we get here, it means that there is nothing to be received
return None
if res[0] == MQTT_PINGRESP:
if self.logger:
self.logger.debug("Got PINGRESP")
sz = self._sock_exact_recv(1)[0]
if sz != 0x00:
raise MMQTTException(
"Unexpected PINGRESP returned from broker: {}.".format(sz)
)
return MQTT_PINGRESP
if res[0] & 0xF0 != 0x30:
return res[0]
sz = self._recv_len()
# topic length MSB & LSB
topic_len = self._sock_exact_recv(2)
topic_len = (topic_len[0] << 8) | topic_len[1]
topic = self._sock_exact_recv(topic_len)
topic = str(topic, "utf-8")
sz -= topic_len + 2
pid = 0
if res[0] & 0x06:
pid = self._sock_exact_recv(2)
pid = pid[0] << 0x08 | pid[1]
sz -= 0x02
# read message contents
msg = self._sock_exact_recv(sz)
self._handle_on_message(self, topic, str(msg, "utf-8"))
if res[0] & 0x06 == 0x02:
pkt = bytearray(b"\x40\x02\0\0")
struct.pack_into("!H", pkt, 2, pid)
self._sock.send(pkt)
elif res[0] & 6 == 4:
assert 0
return res[0]
def _recv_len(self):
"""Unpack MQTT message length."""
n = 0
sh = 0
b = bytearray(1)
while True:
b = self._sock_exact_recv(1)[0]
n |= (b & 0x7F) << sh
if not b & 0x80:
return n
sh += 7
def _recv_into(self, buf, size=0):
"""Backwards-compatible _recv_into implementation."""
if self._backwards_compatible_sock:
size = len(buf) if size == 0 else size
b = self._sock.recv(size)
read_size = len(b)
buf[:read_size] = b
return read_size
return self._sock.recv_into(buf, size)
def _sock_exact_recv(self, bufsize):
"""Reads _exact_ number of bytes from the connected socket. Will only return
string with the exact number of bytes requested.
The semantics of native socket receive is that it returns no more than the
specified number of bytes (i.e. max size). However, it makes no guarantees in
terms of the minimum size of the buffer, which could be 1 byte. This is a
wrapper for socket recv() to ensure that no less than the expected number of
bytes is returned or trigger a timeout exception.
:param int bufsize: number of bytes to receive
"""
if not self._backwards_compatible_sock:
# CPython/Socketpool Impl.
rc = bytearray(bufsize)
self._sock.recv_into(rc, bufsize)
else: # ESP32SPI Impl.
stamp = time.monotonic()
read_timeout = self.keep_alive
# This will timeout with socket timeout (not keepalive timeout)
rc = self._sock.recv(bufsize)
if not rc:
if self.logger:
self.logger.debug("_sock_exact_recv timeout")
# If no bytes waiting, raise same exception as socketpool
raise OSError(errno.ETIMEDOUT)
# If any bytes waiting, try to read them all,
# or raise exception if wait longer than read_timeout
to_read = bufsize - len(rc)
assert to_read >= 0
read_timeout = self.keep_alive
while to_read > 0:
recv = self._sock.recv(to_read)
to_read -= len(recv)
rc += recv
if time.monotonic() - stamp > read_timeout:
raise MMQTTException(
"Unable to receive {} bytes within {} seconds.".format(
to_read, read_timeout
)
)
return rc
def _send_str(self, string):
"""Encodes a string and sends it to a socket.
:param str string: String to write to the socket.
"""
self._sock.send(struct.pack("!H", len(string)))
if isinstance(string, str):
self._sock.send(str.encode(string, "utf-8"))
else:
self._sock.send(string)
@staticmethod
def _valid_topic(topic):
"""Validates if topic provided is proper MQTT topic format.
:param str topic: Topic identifier
"""
if topic is None:
raise MMQTTException("Topic may not be NoneType")
# [MQTT-4.7.3-1]
if not topic:
raise MMQTTException("Topic may not be empty.")
# [MQTT-4.7.3-3]
if len(topic.encode("utf-8")) > MQTT_TOPIC_LENGTH_LIMIT:
raise MMQTTException("Topic length is too large.")
@staticmethod
def _valid_qos(qos_level):
"""Validates if the QoS level is supported by this library
:param int qos_level: Desired QoS level.
"""
if isinstance(qos_level, int):
if qos_level < 0 or qos_level > 2:
raise MMQTTException("QoS must be between 1 and 2.")
else:
raise MMQTTException("QoS must be an integer.")
def is_connected(self):
"""Returns MQTT client session status as True if connected, raises
a `MMQTTException` if `False`.
"""
if self._sock is None or self._is_connected is False:
raise MMQTTException("MiniMQTT is not connected.")
return self._is_connected
# Logging
def enable_logger(self, logger, log_level=20):
"""Enables library logging provided a logger object.
:param logger: A python logger pacakge.
:param log_level: Numeric value of a logging level, defaults to INFO.
"""
self.logger = logger.getLogger("log")
self.logger.setLevel(log_level)
def disable_logger(self):
"""Disables logging."""
if not self.logger:
raise MMQTTException("Can not disable logger, no logger found.")
self.logger = None
| 37.38335 | 108 | 0.583274 |
import errno
import struct
import time
from random import randint
from micropython import const
from .matcher import MQTTMatcher
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_MiniMQTT.git"
MQTT_MSG_MAX_SZ = const(268435455)
MQTT_MSG_SZ_LIM = const(10000000)
MQTT_TOPIC_LENGTH_LIMIT = const(65535)
MQTT_TCP_PORT = const(1883)
MQTT_TLS_PORT = const(8883)
MQTT_PINGREQ = b"\xc0\0"
MQTT_PINGRESP = const(0xD0)
MQTT_SUB = b"\x82"
MQTT_UNSUB = b"\xA2"
MQTT_DISCONNECT = b"\xe0\0"
MQTT_HDR_CONNECT = bytearray(b"\x04MQTT\x04\x02\0\0")
CONNACK_ERRORS = {
const(0x01): "Connection Refused - Incorrect Protocol Version",
const(0x02): "Connection Refused - ID Rejected",
const(0x03): "Connection Refused - Server unavailable",
const(0x04): "Connection Refused - Incorrect username/password",
const(0x05): "Connection Refused - Unauthorized",
}
_default_sock = None _fake_context = None
class MMQTTException(Exception):
def set_socket(sock, iface=None):
global _default_sock global _fake_context _default_sock = sock
if iface:
_default_sock.set_interface(iface)
_fake_context = _FakeSSLContext(iface)
class _FakeSSLSocket:
def __init__(self, socket, tls_mode):
self._socket = socket
self._mode = tls_mode
self.settimeout = socket.settimeout
self.send = socket.send
self.recv = socket.recv
self.close = socket.close
def connect(self, address):
try:
return self._socket.connect(address, self._mode)
except RuntimeError as error:
raise OSError(errno.ENOMEM) from error
class _FakeSSLContext:
def __init__(self, iface):
self._iface = iface
def wrap_socket(self, socket, server_hostname=None):
return _FakeSSLSocket(socket, self._iface.TLS_MODE)
class MQTT:
def __init__(
self,
broker,
port=None,
username=None,
password=None,
client_id=None,
is_ssl=True,
keep_alive=60,
socket_pool=None,
ssl_context=None,
):
self._socket_pool = socket_pool
self._ssl_context = ssl_context
self._sock = None
self._backwards_compatible_sock = False
self.keep_alive = keep_alive
self._user_data = None
self._is_connected = False
self._msg_size_lim = MQTT_MSG_SZ_LIM
self._pid = 0
self._timestamp = 0
self.logger = None
self.broker = broker
self._username = username
self._password = password
if (
self._password and len(password.encode("utf-8")) > MQTT_TOPIC_LENGTH_LIMIT
): raise MMQTTException("Password length is too large.")
self.port = MQTT_TCP_PORT
if is_ssl:
self.port = MQTT_TLS_PORT
if port:
self.port = port
if client_id:
# non-alpha-numeric characters
self.client_id = client_id
else:
# assign a unique client_id
self.client_id = "cpy{0}{1}".format(
randint(0, int(time.monotonic() * 100) % 1000), randint(0, 99)
)
# generated client_id's enforce spec.'s length rules
if len(self.client_id) > 23 or not self.client_id:
raise ValueError("MQTT Client ID must be between 1 and 23 bytes")
# LWT
self._lw_topic = None
self._lw_qos = 0
self._lw_topic = None
self._lw_msg = None
self._lw_retain = False
# List of subscribed topics, used for tracking
self._subscribed_topics = []
self._on_message_filtered = MQTTMatcher()
# Default topic callback methods
self._on_message = None
self.on_connect = None
self.on_disconnect = None
self.on_publish = None
self.on_subscribe = None
self.on_unsubscribe = None
# pylint: disable=too-many-branches
def _get_connect_socket(self, host, port, *, timeout=1):
# For reconnections - check if we're using a socket already and close it
if self._sock:
self._sock.close()
self._sock = None
if self._socket_pool is None:
self._socket_pool = _default_sock
# Legacy API - fake the ssl context
if self._ssl_context is None:
self._ssl_context = _fake_context
if not isinstance(port, int):
raise RuntimeError("Port must be an integer")
if port == 8883 and not self._ssl_context:
raise RuntimeError(
"ssl_context must be set before using adafruit_mqtt for secure MQTT."
)
if self.logger and port == MQTT_TLS_PORT:
self.logger.info(
"Establishing a SECURE SSL connection to {0}:{1}".format(host, port)
)
elif self.logger:
self.logger.info(
"Establishing an INSECURE connection to {0}:{1}".format(host, port)
)
addr_info = self._socket_pool.getaddrinfo(
host, port, 0, self._socket_pool.SOCK_STREAM
)[0]
sock = None
retry_count = 0
while retry_count < 5 and sock is None:
retry_count += 1
try:
sock = self._socket_pool.socket(
addr_info[0], addr_info[1], addr_info[2]
)
except OSError:
continue
connect_host = addr_info[-1][0]
if port == 8883:
sock = self._ssl_context.wrap_socket(sock, server_hostname=host)
connect_host = host
sock.settimeout(timeout)
try:
sock.connect((connect_host, port))
except MemoryError:
sock.close()
sock = None
except OSError:
sock.close()
sock = None
if sock is None:
raise RuntimeError("Repeated socket failures")
self._backwards_compatible_sock = not hasattr(sock, "recv_into")
return sock
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.deinit()
def _sock_exact_recv(self, bufsize):
stamp = time.monotonic()
rc = self._sock.recv(bufsize)
to_read = bufsize - len(rc)
assert to_read >= 0
read_timeout = self.keep_alive
while to_read > 0:
recv = self._sock.recv(to_read)
to_read -= len(recv)
rc += recv
if time.monotonic() - stamp > read_timeout:
raise MMQTTException(
"Unable to receive {} bytes within {} seconds.".format(
to_read, read_timeout
)
)
return rc
def deinit(self):
self.disconnect()
@property
def mqtt_msg(self):
return self._msg_size_lim, MQTT_TOPIC_LENGTH_LIMIT
@mqtt_msg.setter
def mqtt_msg(self, msg_size):
if msg_size < MQTT_MSG_MAX_SZ:
self._msg_size_lim = msg_size
def will_set(self, topic=None, payload=None, qos=0, retain=False):
if self.logger:
self.logger.debug("Setting last will properties")
self._valid_qos(qos)
if self._is_connected:
raise MMQTTException("Last Will should only be called before connect().")
if payload is None:
payload = ""
if isinstance(payload, (int, float, str)):
payload = str(payload).encode()
else:
raise MMQTTException("Invalid message data type.")
self._lw_qos = qos
self._lw_topic = topic
self._lw_msg = payload
self._lw_retain = retain
def add_topic_callback(self, mqtt_topic, callback_method):
if mqtt_topic is None or callback_method is None:
raise ValueError("MQTT topic and callback method must both be defined.")
self._on_message_filtered[mqtt_topic] = callback_method
def remove_topic_callback(self, mqtt_topic):
if mqtt_topic is None:
raise ValueError("MQTT Topic must be defined.")
try:
del self._on_message_filtered[mqtt_topic]
except KeyError:
raise KeyError(
"MQTT topic callback not added with add_topic_callback."
) from None
@property
def on_message(self):
return self._on_message
@on_message.setter
def on_message(self, method):
self._on_message = method
def _handle_on_message(self, client, topic, message):
matched = False
if topic is not None:
for callback in self._on_message_filtered.iter_match(topic):
callback(client, topic, message) # on_msg with callback
matched = True
if not matched and self.on_message: # regular on_message
self.on_message(client, topic, message)
def username_pw_set(self, username, password=None):
if self._is_connected:
raise MMQTTException("This method must be called before connect().")
self._username = username
if password is not None:
self._password = password
# pylint: disable=too-many-branches, too-many-statements, too-many-locals
def connect(self, clean_session=True, host=None, port=None, keep_alive=None):
if host:
self.broker = host
if port:
self.port = port
if keep_alive:
self.keep_alive = keep_alive
if self.logger:
self.logger.debug("Attempting to establish MQTT connection...")
# Get a new socket
self._sock = self._get_connect_socket(self.broker, self.port)
# Fixed Header
fixed_header = bytearray([0x10])
# NOTE: Variable header is
# MQTT_HDR_CONNECT = bytearray(b"\x04MQTT\x04\x02\0\0")
# because final 4 bytes are 4, 2, 0, 0
var_header = MQTT_HDR_CONNECT
var_header[6] = clean_session << 1
# Set up variable header and remaining_length
remaining_length = 12 + len(self.client_id)
if self._username:
remaining_length += 2 + len(self._username) + 2 + len(self._password)
var_header[6] |= 0xC0
if self.keep_alive:
assert self.keep_alive < MQTT_TOPIC_LENGTH_LIMIT
var_header[7] |= self.keep_alive >> 8
var_header[8] |= self.keep_alive & 0x00FF
if self._lw_topic:
remaining_length += 2 + len(self._lw_topic) + 2 + len(self._lw_msg)
var_header[6] |= 0x4 | (self._lw_qos & 0x1) << 3 | (self._lw_qos & 0x2) << 3
var_header[6] |= self._lw_retain << 5
# Remaining length calculation
large_rel_length = False
if remaining_length > 0x7F:
large_rel_length = True
# Calculate Remaining Length [2.2.3]
while remaining_length > 0:
encoded_byte = remaining_length % 0x80
remaining_length = remaining_length // 0x80
# if there is more data to encode, set the top bit of the byte
if remaining_length > 0:
encoded_byte |= 0x80
fixed_header.append(encoded_byte)
if large_rel_length:
fixed_header.append(0x00)
else:
fixed_header.append(remaining_length)
fixed_header.append(0x00)
if self.logger:
self.logger.debug("Sending CONNECT to broker...")
self.logger.debug(
"Fixed Header: %s\nVariable Header: %s", fixed_header, var_header
)
self._sock.send(fixed_header)
self._sock.send(var_header)
# [MQTT-3.1.3-4]
self._send_str(self.client_id)
if self._lw_topic:
# [MQTT-3.1.3-11]
self._send_str(self._lw_topic)
self._send_str(self._lw_msg)
if self._username is None:
self._username = None
else:
self._send_str(self._username)
self._send_str(self._password)
if self.logger:
self.logger.debug("Receiving CONNACK packet from broker")
while True:
op = self._wait_for_msg()
if op == 32:
rc = self._sock_exact_recv(3)
assert rc[0] == 0x02
if rc[2] != 0x00:
raise MMQTTException(CONNACK_ERRORS[rc[2]])
self._is_connected = True
result = rc[0] & 1
if self.on_connect is not None:
self.on_connect(self, self._user_data, result, rc[2])
return result
def disconnect(self):
self.is_connected()
if self.logger is not None:
self.logger.debug("Sending DISCONNECT packet to broker")
try:
self._sock.send(MQTT_DISCONNECT)
except RuntimeError as e:
if self.logger:
self.logger.warning("Unable to send DISCONNECT packet: {}".format(e))
if self.logger is not None:
self.logger.debug("Closing socket")
self._sock.close()
self._is_connected = False
self._subscribed_topics = []
if self.on_disconnect is not None:
self.on_disconnect(self, self._user_data, 0)
def ping(self):
self.is_connected()
if self.logger:
self.logger.debug("Sending PINGREQ")
self._sock.send(MQTT_PINGREQ)
ping_timeout = self.keep_alive
stamp = time.monotonic()
rc, rcs = None, []
while rc != MQTT_PINGRESP:
rc = self._wait_for_msg()
if rc:
rcs.append(rc)
if time.monotonic() - stamp > ping_timeout:
raise MMQTTException("PINGRESP not returned from broker.")
return rcs
# pylint: disable=too-many-branches, too-many-statements
def publish(self, topic, msg, retain=False, qos=0):
self.is_connected()
self._valid_topic(topic)
if "+" in topic or "#" in topic:
raise MMQTTException("Publish topic can not contain wildcards.")
# check msg/qos kwargs
if msg is None:
raise MMQTTException("Message can not be None.")
if isinstance(msg, (int, float)):
msg = str(msg).encode("ascii")
elif isinstance(msg, str):
msg = str(msg).encode("utf-8")
elif isinstance(msg, bytes):
pass
else:
raise MMQTTException("Invalid message data type.")
if len(msg) > MQTT_MSG_MAX_SZ:
raise MMQTTException("Message size larger than %d bytes." % MQTT_MSG_MAX_SZ)
assert (
0 <= qos <= 1
), "Quality of Service Level 2 is unsupported by this library."
# fixed header. [3.3.1.2], [3.3.1.3]
pub_hdr_fixed = bytearray([0x30 | retain | qos << 1])
# variable header = 2-byte Topic length (big endian)
pub_hdr_var = bytearray(struct.pack(">H", len(topic)))
pub_hdr_var.extend(topic.encode("utf-8")) # Topic name
remaining_length = 2 + len(msg) + len(topic)
if qos > 0:
# packet identifier where QoS level is 1 or 2. [3.3.2.2]
remaining_length += 2
self._pid = self._pid + 1 if self._pid < 0xFFFF else 1
pub_hdr_var.append(self._pid >> 8)
pub_hdr_var.append(self._pid & 0xFF)
# Calculate remaining length [2.2.3]
if remaining_length > 0x7F:
while remaining_length > 0:
encoded_byte = remaining_length % 0x80
remaining_length = remaining_length // 0x80
if remaining_length > 0:
encoded_byte |= 0x80
pub_hdr_fixed.append(encoded_byte)
else:
pub_hdr_fixed.append(remaining_length)
if self.logger:
self.logger.debug(
"Sending PUBLISH\nTopic: %s\nMsg: %s\
\nQoS: %d\nRetain? %r",
topic,
msg,
qos,
retain,
)
self._sock.send(pub_hdr_fixed)
self._sock.send(pub_hdr_var)
self._sock.send(msg)
if qos == 0 and self.on_publish is not None:
self.on_publish(self, self._user_data, topic, self._pid)
if qos == 1:
while True:
op = self._wait_for_msg()
if op == 0x40:
sz = self._sock_exact_recv(1)
assert sz == b"\x02"
rcv_pid = self._sock_exact_recv(2)
rcv_pid = rcv_pid[0] << 0x08 | rcv_pid[1]
if self._pid == rcv_pid:
if self.on_publish is not None:
self.on_publish(self, self._user_data, topic, rcv_pid)
return
def subscribe(self, topic, qos=0):
self.is_connected()
topics = None
if isinstance(topic, tuple):
topic, qos = topic
self._valid_topic(topic)
self._valid_qos(qos)
if isinstance(topic, str):
self._valid_topic(topic)
self._valid_qos(qos)
topics = [(topic, qos)]
if isinstance(topic, list):
topics = []
for t, q in topic:
self._valid_qos(q)
self._valid_topic(t)
topics.append((t, q))
# Assemble packet
packet_length = 2 + (2 * len(topics)) + (1 * len(topics))
packet_length += sum(len(topic) for topic, qos in topics)
packet_length_byte = packet_length.to_bytes(1, "big")
self._pid = self._pid + 1 if self._pid < 0xFFFF else 1
packet_id_bytes = self._pid.to_bytes(2, "big")
# Packet with variable and fixed headers
packet = MQTT_SUB + packet_length_byte + packet_id_bytes
# attaching topic and QOS level to the packet
for t, q in topics:
topic_size = len(t).to_bytes(2, "big")
qos_byte = q.to_bytes(1, "big")
packet += topic_size + t.encode() + qos_byte
if self.logger:
for t, q in topics:
self.logger.debug("SUBSCRIBING to topic %s with QoS %d", t, q)
self._sock.send(packet)
while True:
op = self._wait_for_msg()
if op == 0x90:
rc = self._sock_exact_recv(4)
assert rc[1] == packet[2] and rc[2] == packet[3]
if rc[3] == 0x80:
raise MMQTTException("SUBACK Failure!")
for t, q in topics:
if self.on_subscribe is not None:
self.on_subscribe(self, self._user_data, t, q)
self._subscribed_topics.append(t)
return
def unsubscribe(self, topic):
topics = None
if isinstance(topic, str):
self._valid_topic(topic)
topics = [(topic)]
if isinstance(topic, list):
topics = []
for t in topic:
self._valid_topic(t)
topics.append((t))
for t in topics:
if t not in self._subscribed_topics:
raise MMQTTException(
"Topic must be subscribed to before attempting unsubscribe."
)
# Assemble packet
packet_length = 2 + (2 * len(topics))
packet_length += sum(len(topic) for topic in topics)
packet_length_byte = packet_length.to_bytes(1, "big")
self._pid = self._pid + 1 if self._pid < 0xFFFF else 1
packet_id_bytes = self._pid.to_bytes(2, "big")
packet = MQTT_UNSUB + packet_length_byte + packet_id_bytes
for t in topics:
topic_size = len(t).to_bytes(2, "big")
packet += topic_size + t.encode()
if self.logger:
for t in topics:
self.logger.debug("UNSUBSCRIBING from topic %s", t)
self._sock.send(packet)
if self.logger:
self.logger.debug("Waiting for UNSUBACK...")
while True:
op = self._wait_for_msg()
if op == 176:
rc = self._sock_exact_recv(3)
assert rc[0] == 0x02
# [MQTT-3.32]
assert rc[1] == packet_id_bytes[0] and rc[2] == packet_id_bytes[1]
for t in topics:
if self.on_unsubscribe is not None:
self.on_unsubscribe(self, self._user_data, t, self._pid)
self._subscribed_topics.remove(t)
return
def reconnect(self, resub_topics=True):
if self.logger:
self.logger.debug("Attempting to reconnect with MQTT broker")
self.connect()
if self.logger:
self.logger.debug("Reconnected with broker")
if resub_topics:
if self.logger:
self.logger.debug(
"Attempting to resubscribe to previously subscribed topics."
)
subscribed_topics = self._subscribed_topics.copy()
self._subscribed_topics = []
while subscribed_topics:
feed = subscribed_topics.pop()
self.subscribe(feed)
def loop(self, timeout=1):
if self._timestamp == 0:
self._timestamp = time.monotonic()
current_time = time.monotonic()
if current_time - self._timestamp >= self.keep_alive:
# Handle KeepAlive by expecting a PINGREQ/PINGRESP from the server
if self.logger is not None:
self.logger.debug(
"KeepAlive period elapsed - requesting a PINGRESP from the server..."
)
rcs = self.ping()
self._timestamp = 0
return rcs
self._sock.settimeout(timeout)
rc = self._wait_for_msg()
return [rc] if rc else None
def _wait_for_msg(self, timeout=0.1):
# CPython socket module contains a timeout attribute
if hasattr(self._socket_pool, "timeout"):
try:
res = self._sock_exact_recv(1)
except self._socket_pool.timeout as error:
return None
else: # socketpool, esp32spi
try:
res = self._sock_exact_recv(1)
except OSError as error:
if error.errno == errno.ETIMEDOUT:
# raised by a socket timeout if 0 bytes were present
return None
raise MMQTTException from error
# Block while we parse the rest of the response
self._sock.settimeout(timeout)
if res in [None, b""]:
# If we get here, it means that there is nothing to be received
return None
if res[0] == MQTT_PINGRESP:
if self.logger:
self.logger.debug("Got PINGRESP")
sz = self._sock_exact_recv(1)[0]
if sz != 0x00:
raise MMQTTException(
"Unexpected PINGRESP returned from broker: {}.".format(sz)
)
return MQTT_PINGRESP
if res[0] & 0xF0 != 0x30:
return res[0]
sz = self._recv_len()
# topic length MSB & LSB
topic_len = self._sock_exact_recv(2)
topic_len = (topic_len[0] << 8) | topic_len[1]
topic = self._sock_exact_recv(topic_len)
topic = str(topic, "utf-8")
sz -= topic_len + 2
pid = 0
if res[0] & 0x06:
pid = self._sock_exact_recv(2)
pid = pid[0] << 0x08 | pid[1]
sz -= 0x02
# read message contents
msg = self._sock_exact_recv(sz)
self._handle_on_message(self, topic, str(msg, "utf-8"))
if res[0] & 0x06 == 0x02:
pkt = bytearray(b"\x40\x02\0\0")
struct.pack_into("!H", pkt, 2, pid)
self._sock.send(pkt)
elif res[0] & 6 == 4:
assert 0
return res[0]
def _recv_len(self):
n = 0
sh = 0
b = bytearray(1)
while True:
b = self._sock_exact_recv(1)[0]
n |= (b & 0x7F) << sh
if not b & 0x80:
return n
sh += 7
def _recv_into(self, buf, size=0):
if self._backwards_compatible_sock:
size = len(buf) if size == 0 else size
b = self._sock.recv(size)
read_size = len(b)
buf[:read_size] = b
return read_size
return self._sock.recv_into(buf, size)
def _sock_exact_recv(self, bufsize):
if not self._backwards_compatible_sock:
# CPython/Socketpool Impl.
rc = bytearray(bufsize)
self._sock.recv_into(rc, bufsize)
else: # ESP32SPI Impl.
stamp = time.monotonic()
read_timeout = self.keep_alive
# This will timeout with socket timeout (not keepalive timeout)
rc = self._sock.recv(bufsize)
if not rc:
if self.logger:
self.logger.debug("_sock_exact_recv timeout")
# If no bytes waiting, raise same exception as socketpool
raise OSError(errno.ETIMEDOUT)
# If any bytes waiting, try to read them all,
# or raise exception if wait longer than read_timeout
to_read = bufsize - len(rc)
assert to_read >= 0
read_timeout = self.keep_alive
while to_read > 0:
recv = self._sock.recv(to_read)
to_read -= len(recv)
rc += recv
if time.monotonic() - stamp > read_timeout:
raise MMQTTException(
"Unable to receive {} bytes within {} seconds.".format(
to_read, read_timeout
)
)
return rc
def _send_str(self, string):
self._sock.send(struct.pack("!H", len(string)))
if isinstance(string, str):
self._sock.send(str.encode(string, "utf-8"))
else:
self._sock.send(string)
@staticmethod
def _valid_topic(topic):
if topic is None:
raise MMQTTException("Topic may not be NoneType")
# [MQTT-4.7.3-1]
if not topic:
raise MMQTTException("Topic may not be empty.")
# [MQTT-4.7.3-3]
if len(topic.encode("utf-8")) > MQTT_TOPIC_LENGTH_LIMIT:
raise MMQTTException("Topic length is too large.")
@staticmethod
def _valid_qos(qos_level):
if isinstance(qos_level, int):
if qos_level < 0 or qos_level > 2:
raise MMQTTException("QoS must be between 1 and 2.")
else:
raise MMQTTException("QoS must be an integer.")
def is_connected(self):
if self._sock is None or self._is_connected is False:
raise MMQTTException("MiniMQTT is not connected.")
return self._is_connected
# Logging
def enable_logger(self, logger, log_level=20):
self.logger = logger.getLogger("log")
self.logger.setLevel(log_level)
def disable_logger(self):
if not self.logger:
raise MMQTTException("Can not disable logger, no logger found.")
self.logger = None
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.