hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c49844b5764b12e0b5ad75cf890bacc50de35c9 | 16,557 | py | Python | tests/core/test_lightning_optimizer.py | aribornstein/pytorch-lightning | ca68cac57ad8eefc9b477ee126eb42a483f27a39 | [
"Apache-2.0"
] | 1 | 2021-01-18T06:31:43.000Z | 2021-01-18T06:31:43.000Z | tests/core/test_lightning_optimizer.py | aribornstein/pytorch-lightning | ca68cac57ad8eefc9b477ee126eb42a483f27a39 | [
"Apache-2.0"
] | 8 | 2020-10-27T22:39:24.000Z | 2021-01-24T16:41:34.000Z | tests/core/test_lightning_optimizer.py | tarepan/pytorch-lightning | 0b7f5a88a0f4691ec228c4708295a10d403fd592 | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest.mock import patch
import numpy as np
import pytest
import torch
import torch.nn as nn
from torch.optim import Adam, Optimizer
import pytorch_lightning as pl
from pytorch_lightning import LightningModule, seed_everything, Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.model_utils import is_overridden
from tests.base.boring_model import BoringModel, RandomDataset, RandomDictDataset, RandomDictStringDataset
def test_lightning_optimizer(tmpdir):
"""
Test that optimizer are correctly wrapped by our LightningOptimizer
"""
class TestModel(BoringModel):
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
# optimizer = LightningOptimizer(self.trainer, optimizer)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=1,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
groups = "{'dampening': 0, 'initial_lr': 0.1, 'lr': 0.01, 'momentum': 0, 'nesterov': False, 'weight_decay': 0}"
expected = f"LightningSGD(groups=[{groups}])"
assert trainer._lightning_optimizers[0].__repr__() == expected
def test_lightning_optimizer_from_user(tmpdir):
"""
Test that the user can use our LightningOptimizer. Not recommended.
"""
class TestModel(BoringModel):
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.layer.parameters(), lr=0.1)
optimizer = LightningOptimizer(optimizer)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=1,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
groups = "{'amsgrad': False, 'betas': (0.9, 0.999), 'eps': 1e-08, 'initial_lr': 0.1, 'lr': 0.01, 'weight_decay': 0}"
expected = f"LightningAdam(groups=[{groups}])"
assert trainer._lightning_optimizers[0].__repr__() == expected
@patch("torch.optim.Adam.step", autospec=True)
@patch("torch.optim.SGD.step", autospec=True)
def test_lightning_optimizer_manual_optimization(mock_sgd_step, mock_adam_step, tmpdir):
"""
Test that the user can use our LightningOptimizer. Not recommended for now.
"""
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.automatic_optimization = False
def training_step(self, batch, batch_idx, optimizer_idx=None):
(opt_1, opt_2) = self.optimizers()
assert isinstance(opt_1, LightningOptimizer)
assert isinstance(opt_2, LightningOptimizer)
output = self.layer(batch)
loss_1 = self.loss(batch, output)
self.manual_backward(loss_1, opt_1)
opt_1.step()
def closure():
output = self.layer(batch)
loss_2 = self.loss(batch, output)
self.manual_backward(loss_2, opt_2)
opt_2.step(closure=closure)
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
optimizer_1 = LightningOptimizer(optimizer_1, 4)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
model.training_step_end = None
model.training_epoch_end = None
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=8,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
assert len(mock_sgd_step.mock_calls) == 2
assert len(mock_adam_step.mock_calls) == 8
@patch("torch.optim.Adam.step", autospec=True)
@patch("torch.optim.SGD.step", autospec=True)
def test_lightning_optimizer_manual_optimization_and_accumulated_gradients(mock_sgd_step, mock_adam_step, tmpdir):
"""
Test that the user can use our LightningOptimizer. Not recommended.
"""
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.automatic_optimization = False
def training_step(self, batch, batch_idx, optimizer_idx=None):
(opt_1, opt_2) = self.optimizers()
assert isinstance(opt_1, LightningOptimizer)
assert isinstance(opt_2, LightningOptimizer)
output = self.layer(batch)
loss_1 = self.loss(batch, output)
self.manual_backward(loss_1, opt_1)
opt_1.step()
def closure():
output = self.layer(batch)
loss_2 = self.loss(batch, output)
self.manual_backward(loss_2, opt_2)
opt_2.step(closure=closure)
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
optimizer_1 = LightningOptimizer(optimizer_1, 4)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
model.training_step_end = None
model.training_epoch_end = None
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=8,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
accumulate_grad_batches=2,
)
trainer.fit(model)
assert len(mock_sgd_step.mock_calls) == 2
assert len(mock_adam_step.mock_calls) == 4
def test_state(tmpdir):
model = torch.nn.Linear(3, 4)
optimizer = torch.optim.Adam(model.parameters())
lightning_optimizer = LightningOptimizer(optimizer)
# test state
assert optimizer.state == lightning_optimizer.state
lightning_optimizer.state = optimizer.state
assert optimizer.state == lightning_optimizer.state
# test param_groups
assert optimizer.param_groups == lightning_optimizer.param_groups
lightning_optimizer.param_groups = optimizer.param_groups
assert optimizer.param_groups == lightning_optimizer.param_groups
# test defaults
assert optimizer.defaults == lightning_optimizer.defaults
lightning_optimizer.defaults = optimizer.defaults
assert optimizer.defaults == lightning_optimizer.defaults
assert isinstance(lightning_optimizer, LightningOptimizer)
assert isinstance(lightning_optimizer, Adam)
assert isinstance(lightning_optimizer, Optimizer)
lightning_dict = {}
special_attrs = ["_accumulate_grad_batches", "_optimizer", "_optimizer_idx", "_support_closure",
"_trainer", "__getstate__", "__setstate__", "state_dict", "load_state_dict",
"zero_grad", "__setstate__", "add_param_group"]
for k, v in lightning_optimizer.__dict__.items():
if k not in special_attrs:
lightning_dict[k] = v
assert lightning_dict == optimizer.__dict__
assert optimizer.state_dict() == lightning_optimizer.state_dict()
assert optimizer.state == lightning_optimizer.state
def test_lightning_optimizer_automatic_optimization(tmpdir):
"""
Test lightning optimize works with make_optimizer_step in automatic_optimization
"""
class TestModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx=None):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def training_epoch_end(self, outputs):
outputs = sum(outputs, [])
torch.stack([x["loss"] for x in outputs]).mean()
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
assert optimizer_closure.__name__ == "train_step_and_backward_closure"
optimizer.step(closure=optimizer_closure, make_optimizer_step=batch_idx % 2 == 0)
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
optimizer_1 = LightningOptimizer(optimizer_1, 4)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=10,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
def test_lightning_optimizer_automatic_optimization_optimizer_zero_grad(tmpdir):
"""
Test lightning optimize works with optimizer_zero_grad overrides in automatic_optimization
"""
with patch("torch.optim.Adam.zero_grad") as adam_zero_grad, \
patch("torch.optim.SGD.zero_grad") as sgd_zero_grad:
class TestModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx=None):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def training_epoch_end(self, outputs):
outputs = sum(outputs, [])
torch.stack([x["loss"] for x in outputs]).mean()
def optimizer_zero_grad(self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int):
if optimizer_idx == 0:
if batch_idx % 2 == 0:
optimizer.zero_grad()
if optimizer_idx == 1:
if batch_idx % 5 == 0:
optimizer.zero_grad()
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
assert optimizer_closure.__name__ == "train_step_and_backward_closure"
optimizer.step(closure=optimizer_closure)
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=10,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
assert adam_zero_grad.call_count == 2
assert sgd_zero_grad.call_count == 5
def test_lightning_optimizer_automatic_optimization_optimizer_zero_grad_make_optimizer_step(tmpdir):
"""
Test lightning optimize works with optimizer_zero_grad overrides and make_optimizer_step in automatic_optimization
"""
try:
with patch("torch.optim.Adam.zero_grad") as adam_zero_grad, \
patch("torch.optim.SGD.zero_grad") as sgd_zero_grad:
class TestModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx=None):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def training_epoch_end(self, outputs):
outputs = sum(outputs, [])
torch.stack([x["loss"] for x in outputs]).mean()
def optimizer_zero_grad(self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int):
if optimizer_idx == 0:
if batch_idx % 2 == 0:
optimizer.zero_grad()
if optimizer_idx == 1:
if batch_idx % 5 == 0:
optimizer.zero_grad()
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
assert optimizer_closure.__name__ == "train_step_and_backward_closure"
if optimizer_idx == 0:
optimizer.step(closure=optimizer_closure, make_optimizer_step=batch_idx % 3 == 0)
return
optimizer.step(closure=optimizer_closure)
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=20,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
assert adam_zero_grad.call_count == 4
assert sgd_zero_grad.call_count == 10
except MisconfigurationException as e:
assert "When overriding LightningModule `optimizer_zero_grad`, make_optimizer_step is not allowed" in str(e)
def test_lightning_optimizer_automatic_optimization_make_optimizer_step_2(tmpdir):
"""
Test lightning optimize works with make_optimizer_step in automatic_optimization
"""
with patch("torch.optim.Adam.zero_grad") as adam_zero_grad, \
patch("torch.optim.SGD.zero_grad") as sgd_zero_grad:
class TestModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx=None):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def training_epoch_end(self, outputs):
outputs = sum(outputs, [])
torch.stack([x["loss"] for x in outputs]).mean()
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
assert optimizer_closure.__name__ == "train_step_and_backward_closure"
make_optimizer_step = None
if optimizer_idx == 0:
make_optimizer_step = batch_idx % 4 == 0
optimizer.step(closure=optimizer_closure, make_optimizer_step=make_optimizer_step)
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=20,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
assert adam_zero_grad.call_count == 20
assert sgd_zero_grad.call_count == 5
| 38.684579 | 120 | 0.643474 | import os
from unittest.mock import patch
import numpy as np
import pytest
import torch
import torch.nn as nn
from torch.optim import Adam, Optimizer
import pytorch_lightning as pl
from pytorch_lightning import LightningModule, seed_everything, Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.model_utils import is_overridden
from tests.base.boring_model import BoringModel, RandomDataset, RandomDictDataset, RandomDictStringDataset
def test_lightning_optimizer(tmpdir):
class TestModel(BoringModel):
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=1,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
groups = "{'dampening': 0, 'initial_lr': 0.1, 'lr': 0.01, 'momentum': 0, 'nesterov': False, 'weight_decay': 0}"
expected = f"LightningSGD(groups=[{groups}])"
assert trainer._lightning_optimizers[0].__repr__() == expected
def test_lightning_optimizer_from_user(tmpdir):
class TestModel(BoringModel):
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.layer.parameters(), lr=0.1)
optimizer = LightningOptimizer(optimizer)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=1,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
groups = "{'amsgrad': False, 'betas': (0.9, 0.999), 'eps': 1e-08, 'initial_lr': 0.1, 'lr': 0.01, 'weight_decay': 0}"
expected = f"LightningAdam(groups=[{groups}])"
assert trainer._lightning_optimizers[0].__repr__() == expected
@patch("torch.optim.Adam.step", autospec=True)
@patch("torch.optim.SGD.step", autospec=True)
def test_lightning_optimizer_manual_optimization(mock_sgd_step, mock_adam_step, tmpdir):
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.automatic_optimization = False
def training_step(self, batch, batch_idx, optimizer_idx=None):
(opt_1, opt_2) = self.optimizers()
assert isinstance(opt_1, LightningOptimizer)
assert isinstance(opt_2, LightningOptimizer)
output = self.layer(batch)
loss_1 = self.loss(batch, output)
self.manual_backward(loss_1, opt_1)
opt_1.step()
def closure():
output = self.layer(batch)
loss_2 = self.loss(batch, output)
self.manual_backward(loss_2, opt_2)
opt_2.step(closure=closure)
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
optimizer_1 = LightningOptimizer(optimizer_1, 4)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
model.training_step_end = None
model.training_epoch_end = None
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=8,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
assert len(mock_sgd_step.mock_calls) == 2
assert len(mock_adam_step.mock_calls) == 8
@patch("torch.optim.Adam.step", autospec=True)
@patch("torch.optim.SGD.step", autospec=True)
def test_lightning_optimizer_manual_optimization_and_accumulated_gradients(mock_sgd_step, mock_adam_step, tmpdir):
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.automatic_optimization = False
def training_step(self, batch, batch_idx, optimizer_idx=None):
(opt_1, opt_2) = self.optimizers()
assert isinstance(opt_1, LightningOptimizer)
assert isinstance(opt_2, LightningOptimizer)
output = self.layer(batch)
loss_1 = self.loss(batch, output)
self.manual_backward(loss_1, opt_1)
opt_1.step()
def closure():
output = self.layer(batch)
loss_2 = self.loss(batch, output)
self.manual_backward(loss_2, opt_2)
opt_2.step(closure=closure)
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
optimizer_1 = LightningOptimizer(optimizer_1, 4)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
model.training_step_end = None
model.training_epoch_end = None
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=8,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
accumulate_grad_batches=2,
)
trainer.fit(model)
assert len(mock_sgd_step.mock_calls) == 2
assert len(mock_adam_step.mock_calls) == 4
def test_state(tmpdir):
model = torch.nn.Linear(3, 4)
optimizer = torch.optim.Adam(model.parameters())
lightning_optimizer = LightningOptimizer(optimizer)
assert optimizer.state == lightning_optimizer.state
lightning_optimizer.state = optimizer.state
assert optimizer.state == lightning_optimizer.state
assert optimizer.param_groups == lightning_optimizer.param_groups
lightning_optimizer.param_groups = optimizer.param_groups
assert optimizer.param_groups == lightning_optimizer.param_groups
assert optimizer.defaults == lightning_optimizer.defaults
lightning_optimizer.defaults = optimizer.defaults
assert optimizer.defaults == lightning_optimizer.defaults
assert isinstance(lightning_optimizer, LightningOptimizer)
assert isinstance(lightning_optimizer, Adam)
assert isinstance(lightning_optimizer, Optimizer)
lightning_dict = {}
special_attrs = ["_accumulate_grad_batches", "_optimizer", "_optimizer_idx", "_support_closure",
"_trainer", "__getstate__", "__setstate__", "state_dict", "load_state_dict",
"zero_grad", "__setstate__", "add_param_group"]
for k, v in lightning_optimizer.__dict__.items():
if k not in special_attrs:
lightning_dict[k] = v
assert lightning_dict == optimizer.__dict__
assert optimizer.state_dict() == lightning_optimizer.state_dict()
assert optimizer.state == lightning_optimizer.state
def test_lightning_optimizer_automatic_optimization(tmpdir):
class TestModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx=None):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def training_epoch_end(self, outputs):
outputs = sum(outputs, [])
torch.stack([x["loss"] for x in outputs]).mean()
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
assert optimizer_closure.__name__ == "train_step_and_backward_closure"
optimizer.step(closure=optimizer_closure, make_optimizer_step=batch_idx % 2 == 0)
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
optimizer_1 = LightningOptimizer(optimizer_1, 4)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=10,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
def test_lightning_optimizer_automatic_optimization_optimizer_zero_grad(tmpdir):
with patch("torch.optim.Adam.zero_grad") as adam_zero_grad, \
patch("torch.optim.SGD.zero_grad") as sgd_zero_grad:
class TestModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx=None):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def training_epoch_end(self, outputs):
outputs = sum(outputs, [])
torch.stack([x["loss"] for x in outputs]).mean()
def optimizer_zero_grad(self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int):
if optimizer_idx == 0:
if batch_idx % 2 == 0:
optimizer.zero_grad()
if optimizer_idx == 1:
if batch_idx % 5 == 0:
optimizer.zero_grad()
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
assert optimizer_closure.__name__ == "train_step_and_backward_closure"
optimizer.step(closure=optimizer_closure)
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=10,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
assert adam_zero_grad.call_count == 2
assert sgd_zero_grad.call_count == 5
def test_lightning_optimizer_automatic_optimization_optimizer_zero_grad_make_optimizer_step(tmpdir):
try:
with patch("torch.optim.Adam.zero_grad") as adam_zero_grad, \
patch("torch.optim.SGD.zero_grad") as sgd_zero_grad:
class TestModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx=None):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def training_epoch_end(self, outputs):
outputs = sum(outputs, [])
torch.stack([x["loss"] for x in outputs]).mean()
def optimizer_zero_grad(self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int):
if optimizer_idx == 0:
if batch_idx % 2 == 0:
optimizer.zero_grad()
if optimizer_idx == 1:
if batch_idx % 5 == 0:
optimizer.zero_grad()
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
assert optimizer_closure.__name__ == "train_step_and_backward_closure"
if optimizer_idx == 0:
optimizer.step(closure=optimizer_closure, make_optimizer_step=batch_idx % 3 == 0)
return
optimizer.step(closure=optimizer_closure)
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=20,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
assert adam_zero_grad.call_count == 4
assert sgd_zero_grad.call_count == 10
except MisconfigurationException as e:
assert "When overriding LightningModule `optimizer_zero_grad`, make_optimizer_step is not allowed" in str(e)
def test_lightning_optimizer_automatic_optimization_make_optimizer_step_2(tmpdir):
with patch("torch.optim.Adam.zero_grad") as adam_zero_grad, \
patch("torch.optim.SGD.zero_grad") as sgd_zero_grad:
class TestModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx=None):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def training_epoch_end(self, outputs):
outputs = sum(outputs, [])
torch.stack([x["loss"] for x in outputs]).mean()
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
assert optimizer_closure.__name__ == "train_step_and_backward_closure"
make_optimizer_step = None
if optimizer_idx == 0:
make_optimizer_step = batch_idx % 4 == 0
optimizer.step(closure=optimizer_closure, make_optimizer_step=make_optimizer_step)
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=20,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
assert adam_zero_grad.call_count == 20
assert sgd_zero_grad.call_count == 5
| true | true |
1c49853e566203e2d86ea511f9e25cee8a9845fb | 2,074 | py | Python | test/pycore/schema_gen.py | iGeeky/open-account | 8e1329cddcb97517a841f3d98786ba4d76065e2b | [
"MIT"
] | 10 | 2021-01-17T14:12:01.000Z | 2021-07-12T07:29:29.000Z | test/pycore/schema_gen.py | iGeeky/open-account | 8e1329cddcb97517a841f3d98786ba4d76065e2b | [
"MIT"
] | null | null | null | test/pycore/schema_gen.py | iGeeky/open-account | 8e1329cddcb97517a841f3d98786ba4d76065e2b | [
"MIT"
] | 1 | 2022-01-02T15:18:40.000Z | 2022-01-02T15:18:40.000Z | # coding=utf8
def get_type(value):
t = type(value)
if t == dict:
t = 'object'
elif t == list:
t = 'array'
elif value == None:
t = 'null'
elif t == str:
t = 'string'
elif t == int:
t = 'integer'
elif t == float:
t = 'number'
elif t == bool:
t = 'boolean'
else:
t = 'unknow'
return t
def generate_schema(field, value, **opts):
t = get_type(value)
schema = { "type": t }
opts = opts or {}
enums = opts.get("enums", False)
forceEnumFields = opts.get("forceEnumFields", {})
deep = opts.get("deep", 10)
curLevel = opts.get("curLevel", 0)
level = curLevel + 1
opts["curLevel"] = level
if t == 'object':
if level <= deep:
properties = {}
required = []
subFields = value.keys()
for subField in subFields:
childValue = value[subField]
properties[subField] = generate_schema(subField, childValue, **opts.copy())
required.append(subField)
schema["properties"] = properties
schema["required"] = required
elif t == 'array':
if level <= deep and len(value) > 0:
schema["items"] = generate_schema(None, value[0], **opts.copy())
elif t == 'number' or t == 'float' or t == 'string' or t == 'integer' or t == 'boolean':
if enums or (field and forceEnumFields and forceEnumFields[field]):
schema["enum"] = [value]
elif t == 'null': # null的不自动生成,指定null容易有出错的情况.
del(schema["type"])
else:
raise BaseException('UnKnown type:%s, value:%s' % (t, value))
return schema
def auto_schema(value, **opts):
return generate_schema(None, value, **opts)
def set_schema_enums(schema, enums):
for field in enums:
field_schema = schema.get(field)
if field_schema:
enum_value = enums[field]
if type(enum_value) != list:
enum_value = [enum_value]
field_schema["enum"] = enum_value
| 28.805556 | 92 | 0.540501 |
def get_type(value):
t = type(value)
if t == dict:
t = 'object'
elif t == list:
t = 'array'
elif value == None:
t = 'null'
elif t == str:
t = 'string'
elif t == int:
t = 'integer'
elif t == float:
t = 'number'
elif t == bool:
t = 'boolean'
else:
t = 'unknow'
return t
def generate_schema(field, value, **opts):
t = get_type(value)
schema = { "type": t }
opts = opts or {}
enums = opts.get("enums", False)
forceEnumFields = opts.get("forceEnumFields", {})
deep = opts.get("deep", 10)
curLevel = opts.get("curLevel", 0)
level = curLevel + 1
opts["curLevel"] = level
if t == 'object':
if level <= deep:
properties = {}
required = []
subFields = value.keys()
for subField in subFields:
childValue = value[subField]
properties[subField] = generate_schema(subField, childValue, **opts.copy())
required.append(subField)
schema["properties"] = properties
schema["required"] = required
elif t == 'array':
if level <= deep and len(value) > 0:
schema["items"] = generate_schema(None, value[0], **opts.copy())
elif t == 'number' or t == 'float' or t == 'string' or t == 'integer' or t == 'boolean':
if enums or (field and forceEnumFields and forceEnumFields[field]):
schema["enum"] = [value]
elif t == 'null': del(schema["type"])
else:
raise BaseException('UnKnown type:%s, value:%s' % (t, value))
return schema
def auto_schema(value, **opts):
return generate_schema(None, value, **opts)
def set_schema_enums(schema, enums):
for field in enums:
field_schema = schema.get(field)
if field_schema:
enum_value = enums[field]
if type(enum_value) != list:
enum_value = [enum_value]
field_schema["enum"] = enum_value
| true | true |
1c4986f6dbd679f80bc76138d54275a1e2e4f850 | 1,898 | py | Python | nova/api/openstack/compute/views/addresses.py | hemanthnakkina/nova | 3756f4ffa6ff670bfd6b491a12b833da0a36b017 | [
"Apache-2.0"
] | 2 | 2021-10-11T04:56:25.000Z | 2022-02-16T08:49:29.000Z | nova/api/openstack/compute/views/addresses.py | woraser/nova | fc3890667e4971e3f0f35ac921c2a6c25f72adec | [
"Apache-2.0"
] | 132 | 2017-03-27T11:31:52.000Z | 2022-03-30T08:45:02.000Z | nova/api/openstack/compute/views/addresses.py | woraser/nova | fc3890667e4971e3f0f35ac921c2a6c25f72adec | [
"Apache-2.0"
] | 8 | 2017-03-27T07:50:38.000Z | 2020-02-14T16:55:56.000Z | # Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import itertools
from nova.api.openstack import common
class ViewBuilder(common.ViewBuilder):
"""Models server addresses as a dictionary."""
_collection_name = "addresses"
def basic(self, ip, extend_address=False):
"""Return a dictionary describing an IP address."""
address = {
"version": ip["version"],
"addr": ip["address"],
}
if extend_address:
address.update({
"OS-EXT-IPS:type": ip["type"],
"OS-EXT-IPS-MAC:mac_addr": ip['mac_address'],
})
return address
def show(self, network, label, extend_address=False):
"""Returns a dictionary describing a network."""
all_ips = itertools.chain(network["ips"], network["floating_ips"])
return {label: [self.basic(ip, extend_address) for ip in all_ips]}
def index(self, networks, extend_address=False):
"""Return a dictionary describing a list of networks."""
addresses = collections.OrderedDict()
for label, network in networks.items():
network_dict = self.show(network, label, extend_address)
addresses[label] = network_dict[label]
return dict(addresses=addresses)
| 36.5 | 78 | 0.658061 |
import collections
import itertools
from nova.api.openstack import common
class ViewBuilder(common.ViewBuilder):
_collection_name = "addresses"
def basic(self, ip, extend_address=False):
address = {
"version": ip["version"],
"addr": ip["address"],
}
if extend_address:
address.update({
"OS-EXT-IPS:type": ip["type"],
"OS-EXT-IPS-MAC:mac_addr": ip['mac_address'],
})
return address
def show(self, network, label, extend_address=False):
all_ips = itertools.chain(network["ips"], network["floating_ips"])
return {label: [self.basic(ip, extend_address) for ip in all_ips]}
def index(self, networks, extend_address=False):
addresses = collections.OrderedDict()
for label, network in networks.items():
network_dict = self.show(network, label, extend_address)
addresses[label] = network_dict[label]
return dict(addresses=addresses)
| true | true |
1c49873014937e8fca246eefd2074eda95180dec | 4,361 | py | Python | examples/task_manager_plugin/task_manager_plugin_app.py | pxlc/PyWebEngineGui | 12391f78e3708a7f61154331a01a193630f8f2e4 | [
"MIT"
] | 1 | 2021-11-09T07:51:09.000Z | 2021-11-09T07:51:09.000Z | examples/task_manager_plugin/task_manager_plugin_app.py | pxlc/PyWebEngineGui | 12391f78e3708a7f61154331a01a193630f8f2e4 | [
"MIT"
] | null | null | null | examples/task_manager_plugin/task_manager_plugin_app.py | pxlc/PyWebEngineGui | 12391f78e3708a7f61154331a01a193630f8f2e4 | [
"MIT"
] | 1 | 2022-03-29T09:01:18.000Z | 2022-03-29T09:01:18.000Z | # -------------------------------------------------------------------------------
# MIT License
#
# Copyright (c) 2021 pxlc@github
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -------------------------------------------------------------------------------
import os
import sys
import json
import logging
from PyWebEngineGui.pweg import WebEngineDialogBase, register_op, launch_main_app
from directory_listing_task import directory_listing_task_validation
from directory_listing_task import directory_listing_task
class TaskManagerPluginApp(WebEngineDialogBase):
def __init__(self, parent=None, html_filepath='', app_title='', width=500, height=200,
log_level_str='INFO', log_to_shell=True, is_modal_dialog=True):
NEEDED_PLUGINS = ['TaskManager']
super(TaskManagerPluginApp, self).__init__(parent=parent, app_module_path=os.path.abspath(__file__),
html_filepath='', app_title=app_title,
width=width, height=height,
requested_plugins_list=NEEDED_PLUGINS,
override_session_log_filepath='',
log_level_str=log_level_str,
log_to_shell=log_to_shell,
is_modal_dialog=is_modal_dialog)
task_plugin = self.get_plugin_instance('TaskManager')
task_plugin.setup_task('DirectoryListing', directory_listing_task_validation,
directory_listing_task)
# --------------------------------------------------------------------------------------------------------
# "setup_extra_template_vars()" is a REQUIRED override method
#
# Establish any values for template vars in this method that you need to use in your HTML template file.
# --------------------------------------------------------------------------------------------------------
def setup_extra_template_vars(self):
return {
'APP_HEADER': '%s Example App' % self.get_app_title(),
}
# --------------------------------------------------------------------------------------------------------
# Register any callback op handler methods in this way ...
#
# @register_op
# def my_op_handler(self, op_data):
# # op_data is data dict received from JavaScript side
# for op_data_key in sorted(op_data.keys()):
# self.info(' %s = %s' % (op_data_key, op_data[op_data_key]))
#
# NOTE: DO NOT register an op handler method named "print_message" (that is a default one
# provided by the base class)
# --------------------------------------------------------------------------------------------------------
@register_op
def test_one_js_click(self, op_data):
self.info('')
self.info(':: got op "test_one_js_click" with data "{0}"'.format(op_data))
self.info('')
self.send_to_webbrowser('test_one', {'x': 999, 'y': 808, 'z': 345})
if __name__ == '__main__':
sys.exit(launch_main_app(TaskManagerPluginApp, app_title='Task Manager Example', width=600, height=400))
| 44.958763 | 110 | 0.561798 |
import os
import sys
import json
import logging
from PyWebEngineGui.pweg import WebEngineDialogBase, register_op, launch_main_app
from directory_listing_task import directory_listing_task_validation
from directory_listing_task import directory_listing_task
class TaskManagerPluginApp(WebEngineDialogBase):
def __init__(self, parent=None, html_filepath='', app_title='', width=500, height=200,
log_level_str='INFO', log_to_shell=True, is_modal_dialog=True):
NEEDED_PLUGINS = ['TaskManager']
super(TaskManagerPluginApp, self).__init__(parent=parent, app_module_path=os.path.abspath(__file__),
html_filepath='', app_title=app_title,
width=width, height=height,
requested_plugins_list=NEEDED_PLUGINS,
override_session_log_filepath='',
log_level_str=log_level_str,
log_to_shell=log_to_shell,
is_modal_dialog=is_modal_dialog)
task_plugin = self.get_plugin_instance('TaskManager')
task_plugin.setup_task('DirectoryListing', directory_listing_task_validation,
directory_listing_task)
def setup_extra_template_vars(self):
return {
'APP_HEADER': '%s Example App' % self.get_app_title(),
}
@register_op
def test_one_js_click(self, op_data):
self.info('')
self.info(':: got op "test_one_js_click" with data "{0}"'.format(op_data))
self.info('')
self.send_to_webbrowser('test_one', {'x': 999, 'y': 808, 'z': 345})
if __name__ == '__main__':
sys.exit(launch_main_app(TaskManagerPluginApp, app_title='Task Manager Example', width=600, height=400))
| true | true |
1c4987385c9c55d21a2f9a9cc2cc5c8df95c269c | 829 | py | Python | python/qitoolchain/actions/list.py | vbarbaresi/qibuild | eab6b815fe0af49ea5c41ccddcd0dff2363410e1 | [
"BSD-3-Clause"
] | null | null | null | python/qitoolchain/actions/list.py | vbarbaresi/qibuild | eab6b815fe0af49ea5c41ccddcd0dff2363410e1 | [
"BSD-3-Clause"
] | null | null | null | python/qitoolchain/actions/list.py | vbarbaresi/qibuild | eab6b815fe0af49ea5c41ccddcd0dff2363410e1 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2012-2018 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the COPYING file.
"""Display the toolchains names.
"""
from qisys import ui
import qisys.worktree
import qisys.parsers
import qitoolchain
def configure_parser(parser):
"""Configure parser for this action """
qisys.parsers.default_parser(parser)
def do(args): # pylint: disable=unused-argument
""" Main method """
tc_names = qitoolchain.get_tc_names()
if not tc_names:
ui.info("No toolchain yet", "\n",
"Use `qitoolchain create` to create a new toolchain")
return
ui.info("Known toolchains:")
for tc_name in tc_names:
ui.info("*", tc_name)
ui.info("Use ``qitoolchain info <tc_name>`` for more info")
| 26.741935 | 72 | 0.679131 |
from qisys import ui
import qisys.worktree
import qisys.parsers
import qitoolchain
def configure_parser(parser):
qisys.parsers.default_parser(parser)
def do(args): tc_names = qitoolchain.get_tc_names()
if not tc_names:
ui.info("No toolchain yet", "\n",
"Use `qitoolchain create` to create a new toolchain")
return
ui.info("Known toolchains:")
for tc_name in tc_names:
ui.info("*", tc_name)
ui.info("Use ``qitoolchain info <tc_name>`` for more info")
| true | true |
1c4987b52231b64a6534695e6c3a883f0c14cd41 | 2,952 | py | Python | ospt/utils.py | Murray-LIANG/ospt | c1a2a89cc57d06d8bc6b1fd01b647c1f63ab9e2b | [
"Apache-2.0"
] | null | null | null | ospt/utils.py | Murray-LIANG/ospt | c1a2a89cc57d06d8bc6b1fd01b647c1f63ab9e2b | [
"Apache-2.0"
] | null | null | null | ospt/utils.py | Murray-LIANG/ospt | c1a2a89cc57d06d8bc6b1fd01b647c1f63ab9e2b | [
"Apache-2.0"
] | null | null | null | import functools
import inspect
import logging
import time
from contextlib import contextmanager
from logging import handlers
from ospt import exceptions as ospt_ex
LOG = logging.getLogger()
def setup_log(file_path=None, level=logging.INFO, to_stdout=True,
max_bytes=104857600, max_file_count=5):
fmt_str = ('%(asctime)-15s %(name)-8s %(threadName)s '
'%(levelname)-4s %(message)s')
fmt = logging.Formatter(fmt_str)
# Set root logger to `level` or it would be warning which will
# suppress logs lower than warning.
root = logging.getLogger()
root.setLevel(level)
if to_stdout:
console = logging.StreamHandler()
console.setLevel(level)
console.setFormatter(fmt)
root.addHandler(console)
if file_path:
file_handler = handlers.RotatingFileHandler(
filename=file_path, maxBytes=max_bytes, backupCount=max_file_count)
file_handler.setLevel(level)
file_handler.setFormatter(fmt)
root.addHandler(file_handler)
@contextmanager
def timer():
class _Time(object):
def __init__(self, time_start):
self.start = time_start
self.end = None
@property
def interval(self):
return self.end - self.start
_timer = _Time(time.time())
try:
yield _timer
finally:
_timer.end = time.time()
def to_str(resource):
if isinstance(resource, list) or isinstance(resource, tuple):
return ':'.join(to_str(each) for each in resource)
from ospt.control import Resource as OsptRes
if isinstance(resource, OsptRes):
return str(resource)
from storops.lib.resource import Resource as StoropsRes
if isinstance(resource, StoropsRes):
return 'id={},name={}'.format(resource.get_id(), resource.name)
return str(resource)
def timeit(func):
@functools.wraps(func)
def _wrapper(*args, **kwargs):
LOG.info('%s: %s.', func.__name__, to_str(args))
with timer() as t:
result = func(*args, **kwargs)
LOG.info('TIME: %s, %s: %s.', t.interval, func.__name__, to_str(args))
return result
return _wrapper
def wait_until(res_manager, res_id, criteria, timeout=1200):
start_point = time.time()
while True:
if time.time() - start_point > timeout:
raise ospt_ex.TimeoutError(
'Timeout before {} becoming {}. {} sec passed.'.format(
res_id, criteria, timeout))
time.sleep(1)
try:
res = res_manager.get(res_id)
except Exception as ex:
if inspect.isclass(criteria) and isinstance(ex, criteria):
break
if res.status == criteria:
break
def sort_by_name(resources):
return sorted(resources, key=lambda x: x.name)
| 29.52 | 80 | 0.613144 | import functools
import inspect
import logging
import time
from contextlib import contextmanager
from logging import handlers
from ospt import exceptions as ospt_ex
LOG = logging.getLogger()
def setup_log(file_path=None, level=logging.INFO, to_stdout=True,
max_bytes=104857600, max_file_count=5):
fmt_str = ('%(asctime)-15s %(name)-8s %(threadName)s '
'%(levelname)-4s %(message)s')
fmt = logging.Formatter(fmt_str)
root = logging.getLogger()
root.setLevel(level)
if to_stdout:
console = logging.StreamHandler()
console.setLevel(level)
console.setFormatter(fmt)
root.addHandler(console)
if file_path:
file_handler = handlers.RotatingFileHandler(
filename=file_path, maxBytes=max_bytes, backupCount=max_file_count)
file_handler.setLevel(level)
file_handler.setFormatter(fmt)
root.addHandler(file_handler)
@contextmanager
def timer():
class _Time(object):
def __init__(self, time_start):
self.start = time_start
self.end = None
@property
def interval(self):
return self.end - self.start
_timer = _Time(time.time())
try:
yield _timer
finally:
_timer.end = time.time()
def to_str(resource):
if isinstance(resource, list) or isinstance(resource, tuple):
return ':'.join(to_str(each) for each in resource)
from ospt.control import Resource as OsptRes
if isinstance(resource, OsptRes):
return str(resource)
from storops.lib.resource import Resource as StoropsRes
if isinstance(resource, StoropsRes):
return 'id={},name={}'.format(resource.get_id(), resource.name)
return str(resource)
def timeit(func):
@functools.wraps(func)
def _wrapper(*args, **kwargs):
LOG.info('%s: %s.', func.__name__, to_str(args))
with timer() as t:
result = func(*args, **kwargs)
LOG.info('TIME: %s, %s: %s.', t.interval, func.__name__, to_str(args))
return result
return _wrapper
def wait_until(res_manager, res_id, criteria, timeout=1200):
start_point = time.time()
while True:
if time.time() - start_point > timeout:
raise ospt_ex.TimeoutError(
'Timeout before {} becoming {}. {} sec passed.'.format(
res_id, criteria, timeout))
time.sleep(1)
try:
res = res_manager.get(res_id)
except Exception as ex:
if inspect.isclass(criteria) and isinstance(ex, criteria):
break
if res.status == criteria:
break
def sort_by_name(resources):
return sorted(resources, key=lambda x: x.name)
| true | true |
1c4987dda02a4463a27ae5d6523d313400cc871d | 7,287 | py | Python | options/valuation.py | JuanCRCano/AmericanOpt_Methods | 38a4de4da20337e629ab47edf2d2e7e134586264 | [
"MIT"
] | null | null | null | options/valuation.py | JuanCRCano/AmericanOpt_Methods | 38a4de4da20337e629ab47edf2d2e7e134586264 | [
"MIT"
] | null | null | null | options/valuation.py | JuanCRCano/AmericanOpt_Methods | 38a4de4da20337e629ab47edf2d2e7e134586264 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import math as mt
from sklearn.linear_model import LinearRegression
def Binomial_Tree(Spot, Strike, Vencimiento, Volatilidad, TLibre_Riesgo, Call_Put, Tasa_Foranea=0, Tasa_Dividendo=0,
Ramificaciones_Arbol=100, Modelo="Cox Equity"):
if Modelo == "Cox Equity":
ConfigModelo = TLibre_Riesgo - Tasa_Dividendo
if Modelo == "Cox Futuros":
ConfigModelo = 0
if Modelo == "Cox Divisas":
ConfigModelo = TLibre_Riesgo - Tasa_Foranea
Arbol_Subyacente = np.zeros((Ramificaciones_Arbol + 1, Ramificaciones_Arbol + 1))
Arbol_Derivado = np.zeros((Ramificaciones_Arbol + 1, Ramificaciones_Arbol + 1))
Vencimiento = Vencimiento / 365.0
Steps = Vencimiento / Ramificaciones_Arbol
Up = mt.exp(Volatilidad * mt.sqrt(Steps))
Down = mt.exp(-Volatilidad * mt.sqrt(Steps))
P = (mt.exp(ConfigModelo * Steps) - Down) / (Up - Down)
# Obtener las ultimas ramas del arbol binomial del precio del subyacente
Arbol_Subyacente[0, 0] = Spot
for i in range(1, Ramificaciones_Arbol + 1):
Arbol_Subyacente[i, 0] = Arbol_Subyacente[i - 1, 0] * Up
for j in range(1, i + 1):
Arbol_Subyacente[i, j] = Arbol_Subyacente[i - 1, j - 1] * Down
for j in range(Ramificaciones_Arbol + 1):
Arbol_Derivado[Ramificaciones_Arbol, j] = max(0,
Call_Put * (Arbol_Subyacente[Ramificaciones_Arbol, j] - Strike))
for m in range(Ramificaciones_Arbol + 1):
i = Ramificaciones_Arbol - m - 1
for j in range(i + 1):
Arbol_Derivado[i, j] = max(Call_Put * (Arbol_Subyacente[i, j] - Strike),
(P * Arbol_Derivado[i + 1, j] + (1 - P) * Arbol_Derivado[i + 1, j + 1]) * mt.exp(
-TLibre_Riesgo * Steps))
# return pd.concat([pd.DataFrame(Arbol_Subyacente).replace(0,""),pd.DataFrame(Arbol_Derivado).replace(0,"")])
return Arbol_Derivado[0, 0]
def Trinomial_Tree(Spot, Strike, Vencimiento, Volatilidad, TLibre_Riesgo, Call_Put, Tasa_Foranea=0, Tasa_Dividendo=0,
Ramificaciones_Arbol=100, Modelo="Cox Equity"):
if Modelo == "Cox Equity":
ConfigModelo = TLibre_Riesgo - Tasa_Dividendo
if Modelo == "Cox Futuros":
ConfigModelo = 0
if Modelo == "Cox Divisas":
ConfigModelo = TLibre_Riesgo - Tasa_Foranea
Arbol_Subyacente = np.zeros((Ramificaciones_Arbol + 1, (2 * Ramificaciones_Arbol) + 1))
Arbol_Derivado = np.zeros((Ramificaciones_Arbol + 1, (2 * Ramificaciones_Arbol) + 1))
Vencimiento = Vencimiento / 365.0
Steps = Vencimiento / Ramificaciones_Arbol
Up = mt.exp(Volatilidad * mt.sqrt(2 * Steps))
Down = mt.exp(-Volatilidad * mt.sqrt(2 * Steps))
Pu = ((mt.exp(TLibre_Riesgo * Steps / 2) - mt.exp(-Volatilidad * mt.sqrt(Steps / 2))) / (
mt.exp(Volatilidad * mt.sqrt(Steps / 2)) - mt.exp(-Volatilidad * mt.sqrt(Steps / 2)))) ** 2
Pd = ((mt.exp(Volatilidad * mt.sqrt(Steps / 2)) - mt.exp(TLibre_Riesgo * Steps / 2)) / (
mt.exp(Volatilidad * mt.sqrt(Steps / 2)) - mt.exp(-Volatilidad * mt.sqrt(Steps / 2)))) ** 2
Pm = 1 - (Pu + Pd)
# Obtener las ultimas ramas del arbol binomial del precio del subyacente
Arbol_Subyacente[0, 0] = Spot
for i in range(1, Ramificaciones_Arbol + 1):
Arbol_Subyacente[i, 0] = Arbol_Subyacente[i - 1, 0] * Up
for j in range(1, (2 * i)):
Arbol_Subyacente[i, j] = Arbol_Subyacente[i - 1, j - 1]
Arbol_Subyacente[i, j + 1] = Arbol_Subyacente[i - 1, j - 1] * Down
for j in range((2 * Ramificaciones_Arbol) + 1):
Arbol_Derivado[Ramificaciones_Arbol, j] = max(Call_Put * (Arbol_Subyacente[Ramificaciones_Arbol, j] - Strike),
0)
for m in range(Ramificaciones_Arbol + 1):
i = Ramificaciones_Arbol - m - 1
for j in range((2 * i) + 1):
Arbol_Derivado[i, j] = max(Call_Put * (Arbol_Subyacente[i, j] - Strike), (
Pu * Arbol_Derivado[i + 1, j] + Pm * Arbol_Derivado[i + 1, j + 1] + Pd * Arbol_Derivado[
i + 1, j + 2]) * mt.exp(-TLibre_Riesgo * Steps))
# return pd.concat([pd.DataFrame(Arbol_Subyacente).replace(0,""),pd.DataFrame(Arbol_Derivado).replace(0,"")])
return Arbol_Derivado[0, 0]
def LSM(Spot,Strike,Vencimiento,Volatilidad,TLibre_Riesgo,Call_Put,NumSim=10,CambiosXDia=1):
Deltat = 1/(Vencimiento*CambiosXDia) # Asumo N Cambios en el precio del subyacente por cada día
Caminos_Subyacente = np.zeros((NumSim,(Vencimiento*CambiosXDia)+1))
v = Volatilidad/mt.sqrt(365/Vencimiento) # Se ajusta v pues v es anualizada
r = TLibre_Riesgo/(365/Vencimiento) # Se ajusta r pues r es anualizada
for m in range(0,NumSim):
Caminos_Subyacente[m,0] = Spot
for t in range(1,(Vencimiento*CambiosXDia)+1):
Caminos_Subyacente[m,t] = Caminos_Subyacente[m,t-1]*mt.exp((r - (v**2)/2)*Deltat + np.random.normal(0,1)*mt.sqrt((v**2)*Deltat))
Caminos_Derivado = np.zeros((NumSim,(Vencimiento*CambiosXDia)+1))
Caminos_Derivado[:,(Vencimiento*CambiosXDia)] = np.maximum((Caminos_Subyacente[:,(Vencimiento*CambiosXDia)] - Strike)*Call_Put,0)
for t in range((Vencimiento*CambiosXDia)-1,-1,-1):
Caminos_Derivado[:,t] = Caminos_Derivado[:,t+1]*mt.exp(-r*Deltat) # Valor de Continuidad Observado (HV)
Caminos_EnEl_Dinero = ((Caminos_Subyacente[:,t]-Strike)*Call_Put>0)
if Caminos_EnEl_Dinero.sum()>0:
Tabla_Regresion = np.zeros((Caminos_EnEl_Dinero.sum(),4))
Tabla_Regresion[:,0] = Caminos_Subyacente[:,t][Caminos_EnEl_Dinero] #np.vectorize(mt.exp)(-Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]/2)
Tabla_Regresion[:,1] = Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]**2 #np.vectorize(mt.exp)(-Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]/2)*(1-Caminos_Subyacente[:,t][Caminos_EnEl_Dinero])
Tabla_Regresion[:,2] = Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]**3 #np.vectorize(mt.exp)(-Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]/2)*(1-2*Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]+(Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]**2)/2)
Modelo = LinearRegression().fit(Tabla_Regresion[:,0:3],Caminos_Derivado[:,t][Caminos_EnEl_Dinero])
#print(Modelo.score(Tabla_Regresion[:,0:3],Caminos_Derivado[:,t][Caminos_EnEl_Dinero]))
Tabla_Regresion[:,3] = Modelo.intercept_ + Modelo.coef_[0]*Tabla_Regresion[:,0] + Modelo.coef_[1]*Tabla_Regresion[:,1] + Modelo.coef_[2]*Tabla_Regresion[:,2] # Valor de Continuidad Esperado
# Your next line is: Si E[HV]<EV entonces EV, HV En otro caso (OV)
Caminos_Derivado[np.where(Caminos_EnEl_Dinero==True),t] = np.where(Tabla_Regresion[:,3]<(Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]-Strike)*Call_Put,(Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]-Strike)*Call_Put,Caminos_Derivado[:,t][Caminos_EnEl_Dinero])
#Caminos_Derivado[np.where((Caminos_EnEl_Dinero==True)&(Tabla_Regresion[:,3]<(Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]-Strike)*Call_Put)),t+1] = 0
#return pd.DataFrame(Caminos_Subyacente)
return Caminos_Derivado[:,0].mean() | 59.243902 | 269 | 0.651571 | import pandas as pd
import numpy as np
import math as mt
from sklearn.linear_model import LinearRegression
def Binomial_Tree(Spot, Strike, Vencimiento, Volatilidad, TLibre_Riesgo, Call_Put, Tasa_Foranea=0, Tasa_Dividendo=0,
Ramificaciones_Arbol=100, Modelo="Cox Equity"):
if Modelo == "Cox Equity":
ConfigModelo = TLibre_Riesgo - Tasa_Dividendo
if Modelo == "Cox Futuros":
ConfigModelo = 0
if Modelo == "Cox Divisas":
ConfigModelo = TLibre_Riesgo - Tasa_Foranea
Arbol_Subyacente = np.zeros((Ramificaciones_Arbol + 1, Ramificaciones_Arbol + 1))
Arbol_Derivado = np.zeros((Ramificaciones_Arbol + 1, Ramificaciones_Arbol + 1))
Vencimiento = Vencimiento / 365.0
Steps = Vencimiento / Ramificaciones_Arbol
Up = mt.exp(Volatilidad * mt.sqrt(Steps))
Down = mt.exp(-Volatilidad * mt.sqrt(Steps))
P = (mt.exp(ConfigModelo * Steps) - Down) / (Up - Down)
Arbol_Subyacente[0, 0] = Spot
for i in range(1, Ramificaciones_Arbol + 1):
Arbol_Subyacente[i, 0] = Arbol_Subyacente[i - 1, 0] * Up
for j in range(1, i + 1):
Arbol_Subyacente[i, j] = Arbol_Subyacente[i - 1, j - 1] * Down
for j in range(Ramificaciones_Arbol + 1):
Arbol_Derivado[Ramificaciones_Arbol, j] = max(0,
Call_Put * (Arbol_Subyacente[Ramificaciones_Arbol, j] - Strike))
for m in range(Ramificaciones_Arbol + 1):
i = Ramificaciones_Arbol - m - 1
for j in range(i + 1):
Arbol_Derivado[i, j] = max(Call_Put * (Arbol_Subyacente[i, j] - Strike),
(P * Arbol_Derivado[i + 1, j] + (1 - P) * Arbol_Derivado[i + 1, j + 1]) * mt.exp(
-TLibre_Riesgo * Steps))
return Arbol_Derivado[0, 0]
def Trinomial_Tree(Spot, Strike, Vencimiento, Volatilidad, TLibre_Riesgo, Call_Put, Tasa_Foranea=0, Tasa_Dividendo=0,
Ramificaciones_Arbol=100, Modelo="Cox Equity"):
if Modelo == "Cox Equity":
ConfigModelo = TLibre_Riesgo - Tasa_Dividendo
if Modelo == "Cox Futuros":
ConfigModelo = 0
if Modelo == "Cox Divisas":
ConfigModelo = TLibre_Riesgo - Tasa_Foranea
Arbol_Subyacente = np.zeros((Ramificaciones_Arbol + 1, (2 * Ramificaciones_Arbol) + 1))
Arbol_Derivado = np.zeros((Ramificaciones_Arbol + 1, (2 * Ramificaciones_Arbol) + 1))
Vencimiento = Vencimiento / 365.0
Steps = Vencimiento / Ramificaciones_Arbol
Up = mt.exp(Volatilidad * mt.sqrt(2 * Steps))
Down = mt.exp(-Volatilidad * mt.sqrt(2 * Steps))
Pu = ((mt.exp(TLibre_Riesgo * Steps / 2) - mt.exp(-Volatilidad * mt.sqrt(Steps / 2))) / (
mt.exp(Volatilidad * mt.sqrt(Steps / 2)) - mt.exp(-Volatilidad * mt.sqrt(Steps / 2)))) ** 2
Pd = ((mt.exp(Volatilidad * mt.sqrt(Steps / 2)) - mt.exp(TLibre_Riesgo * Steps / 2)) / (
mt.exp(Volatilidad * mt.sqrt(Steps / 2)) - mt.exp(-Volatilidad * mt.sqrt(Steps / 2)))) ** 2
Pm = 1 - (Pu + Pd)
Arbol_Subyacente[0, 0] = Spot
for i in range(1, Ramificaciones_Arbol + 1):
Arbol_Subyacente[i, 0] = Arbol_Subyacente[i - 1, 0] * Up
for j in range(1, (2 * i)):
Arbol_Subyacente[i, j] = Arbol_Subyacente[i - 1, j - 1]
Arbol_Subyacente[i, j + 1] = Arbol_Subyacente[i - 1, j - 1] * Down
for j in range((2 * Ramificaciones_Arbol) + 1):
Arbol_Derivado[Ramificaciones_Arbol, j] = max(Call_Put * (Arbol_Subyacente[Ramificaciones_Arbol, j] - Strike),
0)
for m in range(Ramificaciones_Arbol + 1):
i = Ramificaciones_Arbol - m - 1
for j in range((2 * i) + 1):
Arbol_Derivado[i, j] = max(Call_Put * (Arbol_Subyacente[i, j] - Strike), (
Pu * Arbol_Derivado[i + 1, j] + Pm * Arbol_Derivado[i + 1, j + 1] + Pd * Arbol_Derivado[
i + 1, j + 2]) * mt.exp(-TLibre_Riesgo * Steps))
return Arbol_Derivado[0, 0]
def LSM(Spot,Strike,Vencimiento,Volatilidad,TLibre_Riesgo,Call_Put,NumSim=10,CambiosXDia=1):
Deltat = 1/(Vencimiento*CambiosXDia) Caminos_Subyacente = np.zeros((NumSim,(Vencimiento*CambiosXDia)+1))
v = Volatilidad/mt.sqrt(365/Vencimiento) r = TLibre_Riesgo/(365/Vencimiento)
for m in range(0,NumSim):
Caminos_Subyacente[m,0] = Spot
for t in range(1,(Vencimiento*CambiosXDia)+1):
Caminos_Subyacente[m,t] = Caminos_Subyacente[m,t-1]*mt.exp((r - (v**2)/2)*Deltat + np.random.normal(0,1)*mt.sqrt((v**2)*Deltat))
Caminos_Derivado = np.zeros((NumSim,(Vencimiento*CambiosXDia)+1))
Caminos_Derivado[:,(Vencimiento*CambiosXDia)] = np.maximum((Caminos_Subyacente[:,(Vencimiento*CambiosXDia)] - Strike)*Call_Put,0)
for t in range((Vencimiento*CambiosXDia)-1,-1,-1):
Caminos_Derivado[:,t] = Caminos_Derivado[:,t+1]*mt.exp(-r*Deltat) Caminos_EnEl_Dinero = ((Caminos_Subyacente[:,t]-Strike)*Call_Put>0)
if Caminos_EnEl_Dinero.sum()>0:
Tabla_Regresion = np.zeros((Caminos_EnEl_Dinero.sum(),4))
Tabla_Regresion[:,0] = Caminos_Subyacente[:,t][Caminos_EnEl_Dinero] Tabla_Regresion[:,1] = Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]**2 Tabla_Regresion[:,2] = Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]**3 Modelo = LinearRegression().fit(Tabla_Regresion[:,0:3],Caminos_Derivado[:,t][Caminos_EnEl_Dinero])
Tabla_Regresion[:,3] = Modelo.intercept_ + Modelo.coef_[0]*Tabla_Regresion[:,0] + Modelo.coef_[1]*Tabla_Regresion[:,1] + Modelo.coef_[2]*Tabla_Regresion[:,2] Caminos_Derivado[np.where(Caminos_EnEl_Dinero==True),t] = np.where(Tabla_Regresion[:,3]<(Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]-Strike)*Call_Put,(Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]-Strike)*Call_Put,Caminos_Derivado[:,t][Caminos_EnEl_Dinero])
return Caminos_Derivado[:,0].mean() | true | true |
1c4988afa1867c543a8f26fed4ae75527832aa35 | 2,372 | py | Python | scalyr_agent/json_lib/__init__.py | code-sauce/scalyr-agent-2 | 41023d5c1272186193dd02900782b150dda5f38e | [
"Apache-2.0"
] | null | null | null | scalyr_agent/json_lib/__init__.py | code-sauce/scalyr-agent-2 | 41023d5c1272186193dd02900782b150dda5f38e | [
"Apache-2.0"
] | null | null | null | scalyr_agent/json_lib/__init__.py | code-sauce/scalyr-agent-2 | 41023d5c1272186193dd02900782b150dda5f38e | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
r"""A lightweight JSON library used by the Scalyr agent to serialize data
for storage to disk and for sending over HTTP.
This library is used instead of python's default json library because
it supports some custom Scalyr extensions (chiefly it allows for comments
in the JSON) and the json library is not included in all versions of Python
supported by the Scalyr agent.
The classes exported by this package are:
JsonObject -- A JSON object containing keys and fields. Has similar methods as a dict.
JsonArray -- A JSON array. Has similar methods to a list.
JsonConversionException -- Exception raised when conversion of a field in a JSON object fails.
JsonMissingFieldException -- Exception raised when a request field in a JSON object is missing.
JsonParseException -- Exception raised when parsing a string as JSON fails.
The methods exported are:
parse -- Parses a string as JSON and returns the value.
serialize -- Serializes a JSON value to a string.
"""
__author__ = 'Steven Czerwinski <[email protected]>'
from scalyr_agent.json_lib.exceptions import JsonConversionException
from scalyr_agent.json_lib.exceptions import JsonMissingFieldException, JsonParseException
from scalyr_agent.json_lib.objects import JsonObject, JsonArray
from scalyr_agent.json_lib.parser import parse
from scalyr_agent.json_lib.serializer import serialize
from scalyr_agent.json_lib.serializer import serialize_as_length_prefixed_string
__all__ = ['parse', 'serialize', 'JsonObject', 'JsonArray', 'JsonConversionException', 'JsonMissingFieldException',
'JsonParseException', 'serialize_as_length_prefixed_string']
| 50.468085 | 115 | 0.73946 |
__author__ = 'Steven Czerwinski <[email protected]>'
from scalyr_agent.json_lib.exceptions import JsonConversionException
from scalyr_agent.json_lib.exceptions import JsonMissingFieldException, JsonParseException
from scalyr_agent.json_lib.objects import JsonObject, JsonArray
from scalyr_agent.json_lib.parser import parse
from scalyr_agent.json_lib.serializer import serialize
from scalyr_agent.json_lib.serializer import serialize_as_length_prefixed_string
__all__ = ['parse', 'serialize', 'JsonObject', 'JsonArray', 'JsonConversionException', 'JsonMissingFieldException',
'JsonParseException', 'serialize_as_length_prefixed_string']
| true | true |
1c4989f318ebf96499779f9a58c688a9a5cb6cda | 28,860 | py | Python | test/functional/test_framework/script.py | Groestlcoin/groestlcoin | e081d1e38dea360fe48f0c8eb59a384900e6c6af | [
"MIT"
] | 49 | 2017-06-27T17:36:20.000Z | 2021-11-26T15:32:37.000Z | test/functional/test_framework/script.py | Groestlcoin/groestlcoin | e081d1e38dea360fe48f0c8eb59a384900e6c6af | [
"MIT"
] | 19 | 2016-11-06T21:44:47.000Z | 2021-01-14T21:33:06.000Z | test/functional/test_framework/script.py | Groestlcoin/groestlcoin | e081d1e38dea360fe48f0c8eb59a384900e6c6af | [
"MIT"
] | 31 | 2016-11-07T02:04:00.000Z | 2022-03-21T11:30:29.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Functionality to build scripts, as well as signature hash functions.
This file is modified from python-bitcoinlib.
"""
from collections import namedtuple
import hashlib
import struct
import unittest
from typing import List, Dict
from .key import TaggedHash, tweak_add_pubkey
from .messages import (
CTransaction,
CTxOut,
hash256,
ser_string,
ser_uint256,
sha256,
uint256_from_str,
)
MAX_SCRIPT_ELEMENT_SIZE = 520
LOCKTIME_THRESHOLD = 500000000
ANNEX_TAG = 0x50
LEAF_VERSION_TAPSCRIPT = 0xc0
def hash160(s):
return hashlib.new('ripemd160', sha256(s)).digest()
def bn2vch(v):
"""Convert number to bitcoin-specific little endian format."""
# We need v.bit_length() bits, plus a sign bit for every nonzero number.
n_bits = v.bit_length() + (v != 0)
# The number of bytes for that is:
n_bytes = (n_bits + 7) // 8
# Convert number to absolute value + sign in top bit.
encoded_v = 0 if v == 0 else abs(v) | ((v < 0) << (n_bytes * 8 - 1))
# Serialize to bytes
return encoded_v.to_bytes(n_bytes, 'little')
class CScriptOp(int):
"""A single script opcode"""
__slots__ = ()
@staticmethod
def encode_op_pushdata(d):
"""Encode a PUSHDATA op, returning bytes"""
if len(d) < 0x4c:
return b'' + bytes([len(d)]) + d # OP_PUSHDATA
elif len(d) <= 0xff:
return b'\x4c' + bytes([len(d)]) + d # OP_PUSHDATA1
elif len(d) <= 0xffff:
return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2
elif len(d) <= 0xffffffff:
return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4
else:
raise ValueError("Data too long to encode in a PUSHDATA op")
@staticmethod
def encode_op_n(n):
"""Encode a small integer op, returning an opcode"""
if not (0 <= n <= 16):
raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
if n == 0:
return OP_0
else:
return CScriptOp(OP_1 + n - 1)
def decode_op_n(self):
"""Decode a small integer opcode, returning an integer"""
if self == OP_0:
return 0
if not (self == OP_0 or OP_1 <= self <= OP_16):
raise ValueError('op %r is not an OP_N' % self)
return int(self - OP_1 + 1)
def is_small_int(self):
"""Return true if the op pushes a small integer to the stack"""
if 0x51 <= self <= 0x60 or self == 0:
return True
else:
return False
def __str__(self):
return repr(self)
def __repr__(self):
if self in OPCODE_NAMES:
return OPCODE_NAMES[self]
else:
return 'CScriptOp(0x%x)' % self
def __new__(cls, n):
try:
return _opcode_instances[n]
except IndexError:
assert len(_opcode_instances) == n
_opcode_instances.append(super().__new__(cls, n))
return _opcode_instances[n]
OPCODE_NAMES: Dict[CScriptOp, str] = {}
_opcode_instances: List[CScriptOp] = []
# Populate opcode instance table
for n in range(0xff + 1):
CScriptOp(n)
# push value
OP_0 = CScriptOp(0x00)
OP_FALSE = OP_0
OP_PUSHDATA1 = CScriptOp(0x4c)
OP_PUSHDATA2 = CScriptOp(0x4d)
OP_PUSHDATA4 = CScriptOp(0x4e)
OP_1NEGATE = CScriptOp(0x4f)
OP_RESERVED = CScriptOp(0x50)
OP_1 = CScriptOp(0x51)
OP_TRUE = OP_1
OP_2 = CScriptOp(0x52)
OP_3 = CScriptOp(0x53)
OP_4 = CScriptOp(0x54)
OP_5 = CScriptOp(0x55)
OP_6 = CScriptOp(0x56)
OP_7 = CScriptOp(0x57)
OP_8 = CScriptOp(0x58)
OP_9 = CScriptOp(0x59)
OP_10 = CScriptOp(0x5a)
OP_11 = CScriptOp(0x5b)
OP_12 = CScriptOp(0x5c)
OP_13 = CScriptOp(0x5d)
OP_14 = CScriptOp(0x5e)
OP_15 = CScriptOp(0x5f)
OP_16 = CScriptOp(0x60)
# control
OP_NOP = CScriptOp(0x61)
OP_VER = CScriptOp(0x62)
OP_IF = CScriptOp(0x63)
OP_NOTIF = CScriptOp(0x64)
OP_VERIF = CScriptOp(0x65)
OP_VERNOTIF = CScriptOp(0x66)
OP_ELSE = CScriptOp(0x67)
OP_ENDIF = CScriptOp(0x68)
OP_VERIFY = CScriptOp(0x69)
OP_RETURN = CScriptOp(0x6a)
# stack ops
OP_TOALTSTACK = CScriptOp(0x6b)
OP_FROMALTSTACK = CScriptOp(0x6c)
OP_2DROP = CScriptOp(0x6d)
OP_2DUP = CScriptOp(0x6e)
OP_3DUP = CScriptOp(0x6f)
OP_2OVER = CScriptOp(0x70)
OP_2ROT = CScriptOp(0x71)
OP_2SWAP = CScriptOp(0x72)
OP_IFDUP = CScriptOp(0x73)
OP_DEPTH = CScriptOp(0x74)
OP_DROP = CScriptOp(0x75)
OP_DUP = CScriptOp(0x76)
OP_NIP = CScriptOp(0x77)
OP_OVER = CScriptOp(0x78)
OP_PICK = CScriptOp(0x79)
OP_ROLL = CScriptOp(0x7a)
OP_ROT = CScriptOp(0x7b)
OP_SWAP = CScriptOp(0x7c)
OP_TUCK = CScriptOp(0x7d)
# splice ops
OP_CAT = CScriptOp(0x7e)
OP_SUBSTR = CScriptOp(0x7f)
OP_LEFT = CScriptOp(0x80)
OP_RIGHT = CScriptOp(0x81)
OP_SIZE = CScriptOp(0x82)
# bit logic
OP_INVERT = CScriptOp(0x83)
OP_AND = CScriptOp(0x84)
OP_OR = CScriptOp(0x85)
OP_XOR = CScriptOp(0x86)
OP_EQUAL = CScriptOp(0x87)
OP_EQUALVERIFY = CScriptOp(0x88)
OP_RESERVED1 = CScriptOp(0x89)
OP_RESERVED2 = CScriptOp(0x8a)
# numeric
OP_1ADD = CScriptOp(0x8b)
OP_1SUB = CScriptOp(0x8c)
OP_2MUL = CScriptOp(0x8d)
OP_2DIV = CScriptOp(0x8e)
OP_NEGATE = CScriptOp(0x8f)
OP_ABS = CScriptOp(0x90)
OP_NOT = CScriptOp(0x91)
OP_0NOTEQUAL = CScriptOp(0x92)
OP_ADD = CScriptOp(0x93)
OP_SUB = CScriptOp(0x94)
OP_MUL = CScriptOp(0x95)
OP_DIV = CScriptOp(0x96)
OP_MOD = CScriptOp(0x97)
OP_LSHIFT = CScriptOp(0x98)
OP_RSHIFT = CScriptOp(0x99)
OP_BOOLAND = CScriptOp(0x9a)
OP_BOOLOR = CScriptOp(0x9b)
OP_NUMEQUAL = CScriptOp(0x9c)
OP_NUMEQUALVERIFY = CScriptOp(0x9d)
OP_NUMNOTEQUAL = CScriptOp(0x9e)
OP_LESSTHAN = CScriptOp(0x9f)
OP_GREATERTHAN = CScriptOp(0xa0)
OP_LESSTHANOREQUAL = CScriptOp(0xa1)
OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
OP_MIN = CScriptOp(0xa3)
OP_MAX = CScriptOp(0xa4)
OP_WITHIN = CScriptOp(0xa5)
# crypto
OP_RIPEMD160 = CScriptOp(0xa6)
OP_SHA1 = CScriptOp(0xa7)
OP_SHA256 = CScriptOp(0xa8)
OP_HASH160 = CScriptOp(0xa9)
OP_HASH256 = CScriptOp(0xaa)
OP_CODESEPARATOR = CScriptOp(0xab)
OP_CHECKSIG = CScriptOp(0xac)
OP_CHECKSIGVERIFY = CScriptOp(0xad)
OP_CHECKMULTISIG = CScriptOp(0xae)
OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
# expansion
OP_NOP1 = CScriptOp(0xb0)
OP_CHECKLOCKTIMEVERIFY = CScriptOp(0xb1)
OP_CHECKSEQUENCEVERIFY = CScriptOp(0xb2)
OP_NOP4 = CScriptOp(0xb3)
OP_NOP5 = CScriptOp(0xb4)
OP_NOP6 = CScriptOp(0xb5)
OP_NOP7 = CScriptOp(0xb6)
OP_NOP8 = CScriptOp(0xb7)
OP_NOP9 = CScriptOp(0xb8)
OP_NOP10 = CScriptOp(0xb9)
# BIP 342 opcodes (Tapscript)
OP_CHECKSIGADD = CScriptOp(0xba)
OP_INVALIDOPCODE = CScriptOp(0xff)
OPCODE_NAMES.update({
OP_0: 'OP_0',
OP_PUSHDATA1: 'OP_PUSHDATA1',
OP_PUSHDATA2: 'OP_PUSHDATA2',
OP_PUSHDATA4: 'OP_PUSHDATA4',
OP_1NEGATE: 'OP_1NEGATE',
OP_RESERVED: 'OP_RESERVED',
OP_1: 'OP_1',
OP_2: 'OP_2',
OP_3: 'OP_3',
OP_4: 'OP_4',
OP_5: 'OP_5',
OP_6: 'OP_6',
OP_7: 'OP_7',
OP_8: 'OP_8',
OP_9: 'OP_9',
OP_10: 'OP_10',
OP_11: 'OP_11',
OP_12: 'OP_12',
OP_13: 'OP_13',
OP_14: 'OP_14',
OP_15: 'OP_15',
OP_16: 'OP_16',
OP_NOP: 'OP_NOP',
OP_VER: 'OP_VER',
OP_IF: 'OP_IF',
OP_NOTIF: 'OP_NOTIF',
OP_VERIF: 'OP_VERIF',
OP_VERNOTIF: 'OP_VERNOTIF',
OP_ELSE: 'OP_ELSE',
OP_ENDIF: 'OP_ENDIF',
OP_VERIFY: 'OP_VERIFY',
OP_RETURN: 'OP_RETURN',
OP_TOALTSTACK: 'OP_TOALTSTACK',
OP_FROMALTSTACK: 'OP_FROMALTSTACK',
OP_2DROP: 'OP_2DROP',
OP_2DUP: 'OP_2DUP',
OP_3DUP: 'OP_3DUP',
OP_2OVER: 'OP_2OVER',
OP_2ROT: 'OP_2ROT',
OP_2SWAP: 'OP_2SWAP',
OP_IFDUP: 'OP_IFDUP',
OP_DEPTH: 'OP_DEPTH',
OP_DROP: 'OP_DROP',
OP_DUP: 'OP_DUP',
OP_NIP: 'OP_NIP',
OP_OVER: 'OP_OVER',
OP_PICK: 'OP_PICK',
OP_ROLL: 'OP_ROLL',
OP_ROT: 'OP_ROT',
OP_SWAP: 'OP_SWAP',
OP_TUCK: 'OP_TUCK',
OP_CAT: 'OP_CAT',
OP_SUBSTR: 'OP_SUBSTR',
OP_LEFT: 'OP_LEFT',
OP_RIGHT: 'OP_RIGHT',
OP_SIZE: 'OP_SIZE',
OP_INVERT: 'OP_INVERT',
OP_AND: 'OP_AND',
OP_OR: 'OP_OR',
OP_XOR: 'OP_XOR',
OP_EQUAL: 'OP_EQUAL',
OP_EQUALVERIFY: 'OP_EQUALVERIFY',
OP_RESERVED1: 'OP_RESERVED1',
OP_RESERVED2: 'OP_RESERVED2',
OP_1ADD: 'OP_1ADD',
OP_1SUB: 'OP_1SUB',
OP_2MUL: 'OP_2MUL',
OP_2DIV: 'OP_2DIV',
OP_NEGATE: 'OP_NEGATE',
OP_ABS: 'OP_ABS',
OP_NOT: 'OP_NOT',
OP_0NOTEQUAL: 'OP_0NOTEQUAL',
OP_ADD: 'OP_ADD',
OP_SUB: 'OP_SUB',
OP_MUL: 'OP_MUL',
OP_DIV: 'OP_DIV',
OP_MOD: 'OP_MOD',
OP_LSHIFT: 'OP_LSHIFT',
OP_RSHIFT: 'OP_RSHIFT',
OP_BOOLAND: 'OP_BOOLAND',
OP_BOOLOR: 'OP_BOOLOR',
OP_NUMEQUAL: 'OP_NUMEQUAL',
OP_NUMEQUALVERIFY: 'OP_NUMEQUALVERIFY',
OP_NUMNOTEQUAL: 'OP_NUMNOTEQUAL',
OP_LESSTHAN: 'OP_LESSTHAN',
OP_GREATERTHAN: 'OP_GREATERTHAN',
OP_LESSTHANOREQUAL: 'OP_LESSTHANOREQUAL',
OP_GREATERTHANOREQUAL: 'OP_GREATERTHANOREQUAL',
OP_MIN: 'OP_MIN',
OP_MAX: 'OP_MAX',
OP_WITHIN: 'OP_WITHIN',
OP_RIPEMD160: 'OP_RIPEMD160',
OP_SHA1: 'OP_SHA1',
OP_SHA256: 'OP_SHA256',
OP_HASH160: 'OP_HASH160',
OP_HASH256: 'OP_HASH256',
OP_CODESEPARATOR: 'OP_CODESEPARATOR',
OP_CHECKSIG: 'OP_CHECKSIG',
OP_CHECKSIGVERIFY: 'OP_CHECKSIGVERIFY',
OP_CHECKMULTISIG: 'OP_CHECKMULTISIG',
OP_CHECKMULTISIGVERIFY: 'OP_CHECKMULTISIGVERIFY',
OP_NOP1: 'OP_NOP1',
OP_CHECKLOCKTIMEVERIFY: 'OP_CHECKLOCKTIMEVERIFY',
OP_CHECKSEQUENCEVERIFY: 'OP_CHECKSEQUENCEVERIFY',
OP_NOP4: 'OP_NOP4',
OP_NOP5: 'OP_NOP5',
OP_NOP6: 'OP_NOP6',
OP_NOP7: 'OP_NOP7',
OP_NOP8: 'OP_NOP8',
OP_NOP9: 'OP_NOP9',
OP_NOP10: 'OP_NOP10',
OP_CHECKSIGADD: 'OP_CHECKSIGADD',
OP_INVALIDOPCODE: 'OP_INVALIDOPCODE',
})
class CScriptInvalidError(Exception):
"""Base class for CScript exceptions"""
pass
class CScriptTruncatedPushDataError(CScriptInvalidError):
"""Invalid pushdata due to truncation"""
def __init__(self, msg, data):
self.data = data
super().__init__(msg)
# This is used, eg, for blockchain heights in coinbase scripts (bip34)
class CScriptNum:
__slots__ = ("value",)
def __init__(self, d=0):
self.value = d
@staticmethod
def encode(obj):
r = bytearray(0)
if obj.value == 0:
return bytes(r)
neg = obj.value < 0
absvalue = -obj.value if neg else obj.value
while (absvalue):
r.append(absvalue & 0xff)
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return bytes([len(r)]) + r
@staticmethod
def decode(vch):
result = 0
# We assume valid push_size and minimal encoding
value = vch[1:]
if len(value) == 0:
return result
for i, byte in enumerate(value):
result |= int(byte) << 8 * i
if value[-1] >= 0x80:
# Mask for all but the highest result bit
num_mask = (2**(len(value) * 8) - 1) >> 1
result &= num_mask
result *= -1
return result
class CScript(bytes):
"""Serialized script
A bytes subclass, so you can use this directly whenever bytes are accepted.
Note that this means that indexing does *not* work - you'll get an index by
byte rather than opcode. This format was chosen for efficiency so that the
general case would not require creating a lot of little CScriptOP objects.
iter(script) however does iterate by opcode.
"""
__slots__ = ()
@classmethod
def __coerce_instance(cls, other):
# Coerce other into bytes
if isinstance(other, CScriptOp):
other = bytes([other])
elif isinstance(other, CScriptNum):
if (other.value == 0):
other = bytes([CScriptOp(OP_0)])
else:
other = CScriptNum.encode(other)
elif isinstance(other, int):
if 0 <= other <= 16:
other = bytes([CScriptOp.encode_op_n(other)])
elif other == -1:
other = bytes([OP_1NEGATE])
else:
other = CScriptOp.encode_op_pushdata(bn2vch(other))
elif isinstance(other, (bytes, bytearray)):
other = CScriptOp.encode_op_pushdata(other)
return other
def __add__(self, other):
# add makes no sense for a CScript()
raise NotImplementedError
def join(self, iterable):
# join makes no sense for a CScript()
raise NotImplementedError
def __new__(cls, value=b''):
if isinstance(value, bytes) or isinstance(value, bytearray):
return super().__new__(cls, value)
else:
def coerce_iterable(iterable):
for instance in iterable:
yield cls.__coerce_instance(instance)
# Annoyingly on both python2 and python3 bytes.join() always
# returns a bytes instance even when subclassed.
return super().__new__(cls, b''.join(coerce_iterable(value)))
def raw_iter(self):
"""Raw iteration
Yields tuples of (opcode, data, sop_idx) so that the different possible
PUSHDATA encodings can be accurately distinguished, as well as
determining the exact opcode byte indexes. (sop_idx)
"""
i = 0
while i < len(self):
sop_idx = i
opcode = self[i]
i += 1
if opcode > OP_PUSHDATA4:
yield (opcode, None, sop_idx)
else:
datasize = None
pushdata_type = None
if opcode < OP_PUSHDATA1:
pushdata_type = 'PUSHDATA(%d)' % opcode
datasize = opcode
elif opcode == OP_PUSHDATA1:
pushdata_type = 'PUSHDATA1'
if i >= len(self):
raise CScriptInvalidError('PUSHDATA1: missing data length')
datasize = self[i]
i += 1
elif opcode == OP_PUSHDATA2:
pushdata_type = 'PUSHDATA2'
if i + 1 >= len(self):
raise CScriptInvalidError('PUSHDATA2: missing data length')
datasize = self[i] + (self[i + 1] << 8)
i += 2
elif opcode == OP_PUSHDATA4:
pushdata_type = 'PUSHDATA4'
if i + 3 >= len(self):
raise CScriptInvalidError('PUSHDATA4: missing data length')
datasize = self[i] + (self[i + 1] << 8) + (self[i + 2] << 16) + (self[i + 3] << 24)
i += 4
else:
assert False # shouldn't happen
data = bytes(self[i:i + datasize])
# Check for truncation
if len(data) < datasize:
raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)
i += datasize
yield (opcode, data, sop_idx)
def __iter__(self):
"""'Cooked' iteration
Returns either a CScriptOP instance, an integer, or bytes, as
appropriate.
See raw_iter() if you need to distinguish the different possible
PUSHDATA encodings.
"""
for (opcode, data, sop_idx) in self.raw_iter():
if data is not None:
yield data
else:
opcode = CScriptOp(opcode)
if opcode.is_small_int():
yield opcode.decode_op_n()
else:
yield CScriptOp(opcode)
def __repr__(self):
def _repr(o):
if isinstance(o, bytes):
return "x('%s')" % o.hex()
else:
return repr(o)
ops = []
i = iter(self)
while True:
op = None
try:
op = _repr(next(i))
except CScriptTruncatedPushDataError as err:
op = '%s...<ERROR: %s>' % (_repr(err.data), err)
break
except CScriptInvalidError as err:
op = '<ERROR: %s>' % err
break
except StopIteration:
break
finally:
if op is not None:
ops.append(op)
return "CScript([%s])" % ', '.join(ops)
def GetSigOpCount(self, fAccurate):
"""Get the SigOp count.
fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details.
Note that this is consensus-critical.
"""
n = 0
lastOpcode = OP_INVALIDOPCODE
for (opcode, data, sop_idx) in self.raw_iter():
if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
n += 1
elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
if fAccurate and (OP_1 <= lastOpcode <= OP_16):
n += opcode.decode_op_n()
else:
n += 20
lastOpcode = opcode
return n
SIGHASH_DEFAULT = 0 # Taproot-only default, semantics same as SIGHASH_ALL
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_ANYONECANPAY = 0x80
def FindAndDelete(script, sig):
"""Consensus critical, see FindAndDelete() in Satoshi codebase"""
r = b''
last_sop_idx = sop_idx = 0
skip = True
for (opcode, data, sop_idx) in script.raw_iter():
if not skip:
r += script[last_sop_idx:sop_idx]
last_sop_idx = sop_idx
if script[sop_idx:sop_idx + len(sig)] == sig:
skip = True
else:
skip = False
if not skip:
r += script[last_sop_idx:]
return CScript(r)
def LegacySignatureHash(script, txTo, inIdx, hashtype):
"""Consensus-correct SignatureHash
Returns (hash, err) to precisely match the consensus-critical behavior of
the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity)
"""
HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if inIdx >= len(txTo.vin):
return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin)))
txtmp = CTransaction(txTo)
for txin in txtmp.vin:
txin.scriptSig = b''
txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))
if (hashtype & 0x1f) == SIGHASH_NONE:
txtmp.vout = []
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
elif (hashtype & 0x1f) == SIGHASH_SINGLE:
outIdx = inIdx
if outIdx >= len(txtmp.vout):
return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout)))
tmp = txtmp.vout[outIdx]
txtmp.vout = []
for _ in range(outIdx):
txtmp.vout.append(CTxOut(-1))
txtmp.vout.append(tmp)
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
if hashtype & SIGHASH_ANYONECANPAY:
tmp = txtmp.vin[inIdx]
txtmp.vin = []
txtmp.vin.append(tmp)
s = txtmp.serialize_without_witness()
s += struct.pack(b"<I", hashtype)
hash = sha256(s)
return (hash, None)
# TODO: Allow cached hashPrevouts/hashSequence/hashOutputs to be provided.
# Performance optimization probably not necessary for python tests, however.
# Note that this corresponds to sigversion == 1 in EvalScript, which is used
# for version 0 witnesses.
def SegwitV0SignatureHash(script, txTo, inIdx, hashtype, amount):
hashPrevouts = 0
hashSequence = 0
hashOutputs = 0
if not (hashtype & SIGHASH_ANYONECANPAY):
serialize_prevouts = bytes()
for i in txTo.vin:
serialize_prevouts += i.prevout.serialize()
hashPrevouts = uint256_from_str(sha256(serialize_prevouts))
if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_sequence = bytes()
for i in txTo.vin:
serialize_sequence += struct.pack("<I", i.nSequence)
hashSequence = uint256_from_str(sha256(serialize_sequence))
if ((hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_outputs = bytes()
for o in txTo.vout:
serialize_outputs += o.serialize()
hashOutputs = uint256_from_str(sha256(serialize_outputs))
elif ((hashtype & 0x1f) == SIGHASH_SINGLE and inIdx < len(txTo.vout)):
serialize_outputs = txTo.vout[inIdx].serialize()
hashOutputs = uint256_from_str(sha256(serialize_outputs))
ss = bytes()
ss += struct.pack("<i", txTo.nVersion)
ss += ser_uint256(hashPrevouts)
ss += ser_uint256(hashSequence)
ss += txTo.vin[inIdx].prevout.serialize()
ss += ser_string(script)
ss += struct.pack("<q", amount)
ss += struct.pack("<I", txTo.vin[inIdx].nSequence)
ss += ser_uint256(hashOutputs)
ss += struct.pack("<i", txTo.nLockTime)
ss += struct.pack("<I", hashtype)
return sha256(ss)
class TestFrameworkScript(unittest.TestCase):
def test_bn2vch(self):
self.assertEqual(bn2vch(0), bytes([]))
self.assertEqual(bn2vch(1), bytes([0x01]))
self.assertEqual(bn2vch(-1), bytes([0x81]))
self.assertEqual(bn2vch(0x7F), bytes([0x7F]))
self.assertEqual(bn2vch(-0x7F), bytes([0xFF]))
self.assertEqual(bn2vch(0x80), bytes([0x80, 0x00]))
self.assertEqual(bn2vch(-0x80), bytes([0x80, 0x80]))
self.assertEqual(bn2vch(0xFF), bytes([0xFF, 0x00]))
self.assertEqual(bn2vch(-0xFF), bytes([0xFF, 0x80]))
self.assertEqual(bn2vch(0x100), bytes([0x00, 0x01]))
self.assertEqual(bn2vch(-0x100), bytes([0x00, 0x81]))
self.assertEqual(bn2vch(0x7FFF), bytes([0xFF, 0x7F]))
self.assertEqual(bn2vch(-0x8000), bytes([0x00, 0x80, 0x80]))
self.assertEqual(bn2vch(-0x7FFFFF), bytes([0xFF, 0xFF, 0xFF]))
self.assertEqual(bn2vch(0x80000000), bytes([0x00, 0x00, 0x00, 0x80, 0x00]))
self.assertEqual(bn2vch(-0x80000000), bytes([0x00, 0x00, 0x00, 0x80, 0x80]))
self.assertEqual(bn2vch(0xFFFFFFFF), bytes([0xFF, 0xFF, 0xFF, 0xFF, 0x00]))
self.assertEqual(bn2vch(123456789), bytes([0x15, 0xCD, 0x5B, 0x07]))
self.assertEqual(bn2vch(-54321), bytes([0x31, 0xD4, 0x80]))
def test_cscriptnum_encoding(self):
# round-trip negative and multi-byte CScriptNums
values = [0, 1, -1, -2, 127, 128, -255, 256, (1 << 15) - 1, -(1 << 16), (1 << 24) - 1, (1 << 31), 1 - (1 << 32), 1 << 40, 1500, -1500]
for value in values:
self.assertEqual(CScriptNum.decode(CScriptNum.encode(CScriptNum(value))), value)
def TaprootSignatureHash(txTo, spent_utxos, hash_type, input_index = 0, scriptpath = False, script = CScript(), codeseparator_pos = -1, annex = None, leaf_ver = LEAF_VERSION_TAPSCRIPT):
assert (len(txTo.vin) == len(spent_utxos))
assert (input_index < len(txTo.vin))
out_type = SIGHASH_ALL if hash_type == 0 else hash_type & 3
in_type = hash_type & SIGHASH_ANYONECANPAY
spk = spent_utxos[input_index].scriptPubKey
ss = bytes([0, hash_type]) # epoch, hash_type
ss += struct.pack("<i", txTo.nVersion)
ss += struct.pack("<I", txTo.nLockTime)
if in_type != SIGHASH_ANYONECANPAY:
ss += sha256(b"".join(i.prevout.serialize() for i in txTo.vin))
ss += sha256(b"".join(struct.pack("<q", u.nValue) for u in spent_utxos))
ss += sha256(b"".join(ser_string(u.scriptPubKey) for u in spent_utxos))
ss += sha256(b"".join(struct.pack("<I", i.nSequence) for i in txTo.vin))
if out_type == SIGHASH_ALL:
ss += sha256(b"".join(o.serialize() for o in txTo.vout))
spend_type = 0
if annex is not None:
spend_type |= 1
if (scriptpath):
spend_type |= 2
ss += bytes([spend_type])
if in_type == SIGHASH_ANYONECANPAY:
ss += txTo.vin[input_index].prevout.serialize()
ss += struct.pack("<q", spent_utxos[input_index].nValue)
ss += ser_string(spk)
ss += struct.pack("<I", txTo.vin[input_index].nSequence)
else:
ss += struct.pack("<I", input_index)
if (spend_type & 1):
ss += sha256(ser_string(annex))
if out_type == SIGHASH_SINGLE:
if input_index < len(txTo.vout):
ss += sha256(txTo.vout[input_index].serialize())
else:
ss += bytes(0 for _ in range(32))
if (scriptpath):
ss += TaggedHash("TapLeaf", bytes([leaf_ver]) + ser_string(script))
ss += bytes([0])
ss += struct.pack("<i", codeseparator_pos)
assert len(ss) == 175 - (in_type == SIGHASH_ANYONECANPAY) * 49 - (out_type != SIGHASH_ALL and out_type != SIGHASH_SINGLE) * 32 + (annex is not None) * 32 + scriptpath * 37
return TaggedHash("TapSighash", ss)
def taproot_tree_helper(scripts):
if len(scripts) == 0:
return ([], bytes())
if len(scripts) == 1:
# One entry: treat as a leaf
script = scripts[0]
assert(not callable(script))
if isinstance(script, list):
return taproot_tree_helper(script)
assert(isinstance(script, tuple))
version = LEAF_VERSION_TAPSCRIPT
name = script[0]
code = script[1]
if len(script) == 3:
version = script[2]
assert version & 1 == 0
assert isinstance(code, bytes)
h = TaggedHash("TapLeaf", bytes([version]) + ser_string(code))
if name is None:
return ([], h)
return ([(name, version, code, bytes())], h)
elif len(scripts) == 2 and callable(scripts[1]):
# Two entries, and the right one is a function
left, left_h = taproot_tree_helper(scripts[0:1])
right_h = scripts[1](left_h)
left = [(name, version, script, control + right_h) for name, version, script, control in left]
right = []
else:
# Two or more entries: descend into each side
split_pos = len(scripts) // 2
left, left_h = taproot_tree_helper(scripts[0:split_pos])
right, right_h = taproot_tree_helper(scripts[split_pos:])
left = [(name, version, script, control + right_h) for name, version, script, control in left]
right = [(name, version, script, control + left_h) for name, version, script, control in right]
if right_h < left_h:
right_h, left_h = left_h, right_h
h = TaggedHash("TapBranch", left_h + right_h)
return (left + right, h)
# A TaprootInfo object has the following fields:
# - scriptPubKey: the scriptPubKey (witness v1 CScript)
# - internal_pubkey: the internal pubkey (32 bytes)
# - negflag: whether the pubkey in the scriptPubKey was negated from internal_pubkey+tweak*G (bool).
# - tweak: the tweak (32 bytes)
# - leaves: a dict of name -> TaprootLeafInfo objects for all known leaves
TaprootInfo = namedtuple("TaprootInfo", "scriptPubKey,internal_pubkey,negflag,tweak,leaves")
# A TaprootLeafInfo object has the following fields:
# - script: the leaf script (CScript or bytes)
# - version: the leaf version (0xc0 for BIP342 tapscript)
# - merklebranch: the merkle branch to use for this leaf (32*N bytes)
TaprootLeafInfo = namedtuple("TaprootLeafInfo", "script,version,merklebranch")
def taproot_construct(pubkey, scripts=None):
"""Construct a tree of Taproot spending conditions
pubkey: a 32-byte xonly pubkey for the internal pubkey (bytes)
scripts: a list of items; each item is either:
- a (name, CScript or bytes, leaf version) tuple
- a (name, CScript or bytes) tuple (defaulting to leaf version 0xc0)
- another list of items (with the same structure)
- a list of two items; the first of which is an item itself, and the
second is a function. The function takes as input the Merkle root of the
first item, and produces a (fictitious) partner to hash with.
Returns: a TaprootInfo object
"""
if scripts is None:
scripts = []
ret, h = taproot_tree_helper(scripts)
tweak = TaggedHash("TapTweak", pubkey + h)
tweaked, negated = tweak_add_pubkey(pubkey, tweak)
leaves = dict((name, TaprootLeafInfo(script, version, merklebranch)) for name, version, script, merklebranch in ret)
return TaprootInfo(CScript([OP_1, tweaked]), pubkey, negated + 0, tweak, leaves)
def is_op_success(o):
return o == 0x50 or o == 0x62 or o == 0x89 or o == 0x8a or o == 0x8d or o == 0x8e or (o >= 0x7e and o <= 0x81) or (o >= 0x83 and o <= 0x86) or (o >= 0x95 and o <= 0x99) or (o >= 0xbb and o <= 0xfe)
| 33.325635 | 201 | 0.618919 |
from collections import namedtuple
import hashlib
import struct
import unittest
from typing import List, Dict
from .key import TaggedHash, tweak_add_pubkey
from .messages import (
CTransaction,
CTxOut,
hash256,
ser_string,
ser_uint256,
sha256,
uint256_from_str,
)
MAX_SCRIPT_ELEMENT_SIZE = 520
LOCKTIME_THRESHOLD = 500000000
ANNEX_TAG = 0x50
LEAF_VERSION_TAPSCRIPT = 0xc0
def hash160(s):
return hashlib.new('ripemd160', sha256(s)).digest()
def bn2vch(v):
n_bits = v.bit_length() + (v != 0)
n_bytes = (n_bits + 7) // 8
encoded_v = 0 if v == 0 else abs(v) | ((v < 0) << (n_bytes * 8 - 1))
return encoded_v.to_bytes(n_bytes, 'little')
class CScriptOp(int):
__slots__ = ()
@staticmethod
def encode_op_pushdata(d):
if len(d) < 0x4c:
return b'' + bytes([len(d)]) + d elif len(d) <= 0xff:
return b'\x4c' + bytes([len(d)]) + d elif len(d) <= 0xffff:
return b'\x4d' + struct.pack(b'<H', len(d)) + d elif len(d) <= 0xffffffff:
return b'\x4e' + struct.pack(b'<I', len(d)) + d else:
raise ValueError("Data too long to encode in a PUSHDATA op")
@staticmethod
def encode_op_n(n):
if not (0 <= n <= 16):
raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
if n == 0:
return OP_0
else:
return CScriptOp(OP_1 + n - 1)
def decode_op_n(self):
if self == OP_0:
return 0
if not (self == OP_0 or OP_1 <= self <= OP_16):
raise ValueError('op %r is not an OP_N' % self)
return int(self - OP_1 + 1)
def is_small_int(self):
if 0x51 <= self <= 0x60 or self == 0:
return True
else:
return False
def __str__(self):
return repr(self)
def __repr__(self):
if self in OPCODE_NAMES:
return OPCODE_NAMES[self]
else:
return 'CScriptOp(0x%x)' % self
def __new__(cls, n):
try:
return _opcode_instances[n]
except IndexError:
assert len(_opcode_instances) == n
_opcode_instances.append(super().__new__(cls, n))
return _opcode_instances[n]
OPCODE_NAMES: Dict[CScriptOp, str] = {}
_opcode_instances: List[CScriptOp] = []
for n in range(0xff + 1):
CScriptOp(n)
OP_0 = CScriptOp(0x00)
OP_FALSE = OP_0
OP_PUSHDATA1 = CScriptOp(0x4c)
OP_PUSHDATA2 = CScriptOp(0x4d)
OP_PUSHDATA4 = CScriptOp(0x4e)
OP_1NEGATE = CScriptOp(0x4f)
OP_RESERVED = CScriptOp(0x50)
OP_1 = CScriptOp(0x51)
OP_TRUE = OP_1
OP_2 = CScriptOp(0x52)
OP_3 = CScriptOp(0x53)
OP_4 = CScriptOp(0x54)
OP_5 = CScriptOp(0x55)
OP_6 = CScriptOp(0x56)
OP_7 = CScriptOp(0x57)
OP_8 = CScriptOp(0x58)
OP_9 = CScriptOp(0x59)
OP_10 = CScriptOp(0x5a)
OP_11 = CScriptOp(0x5b)
OP_12 = CScriptOp(0x5c)
OP_13 = CScriptOp(0x5d)
OP_14 = CScriptOp(0x5e)
OP_15 = CScriptOp(0x5f)
OP_16 = CScriptOp(0x60)
OP_NOP = CScriptOp(0x61)
OP_VER = CScriptOp(0x62)
OP_IF = CScriptOp(0x63)
OP_NOTIF = CScriptOp(0x64)
OP_VERIF = CScriptOp(0x65)
OP_VERNOTIF = CScriptOp(0x66)
OP_ELSE = CScriptOp(0x67)
OP_ENDIF = CScriptOp(0x68)
OP_VERIFY = CScriptOp(0x69)
OP_RETURN = CScriptOp(0x6a)
OP_TOALTSTACK = CScriptOp(0x6b)
OP_FROMALTSTACK = CScriptOp(0x6c)
OP_2DROP = CScriptOp(0x6d)
OP_2DUP = CScriptOp(0x6e)
OP_3DUP = CScriptOp(0x6f)
OP_2OVER = CScriptOp(0x70)
OP_2ROT = CScriptOp(0x71)
OP_2SWAP = CScriptOp(0x72)
OP_IFDUP = CScriptOp(0x73)
OP_DEPTH = CScriptOp(0x74)
OP_DROP = CScriptOp(0x75)
OP_DUP = CScriptOp(0x76)
OP_NIP = CScriptOp(0x77)
OP_OVER = CScriptOp(0x78)
OP_PICK = CScriptOp(0x79)
OP_ROLL = CScriptOp(0x7a)
OP_ROT = CScriptOp(0x7b)
OP_SWAP = CScriptOp(0x7c)
OP_TUCK = CScriptOp(0x7d)
OP_CAT = CScriptOp(0x7e)
OP_SUBSTR = CScriptOp(0x7f)
OP_LEFT = CScriptOp(0x80)
OP_RIGHT = CScriptOp(0x81)
OP_SIZE = CScriptOp(0x82)
OP_INVERT = CScriptOp(0x83)
OP_AND = CScriptOp(0x84)
OP_OR = CScriptOp(0x85)
OP_XOR = CScriptOp(0x86)
OP_EQUAL = CScriptOp(0x87)
OP_EQUALVERIFY = CScriptOp(0x88)
OP_RESERVED1 = CScriptOp(0x89)
OP_RESERVED2 = CScriptOp(0x8a)
OP_1ADD = CScriptOp(0x8b)
OP_1SUB = CScriptOp(0x8c)
OP_2MUL = CScriptOp(0x8d)
OP_2DIV = CScriptOp(0x8e)
OP_NEGATE = CScriptOp(0x8f)
OP_ABS = CScriptOp(0x90)
OP_NOT = CScriptOp(0x91)
OP_0NOTEQUAL = CScriptOp(0x92)
OP_ADD = CScriptOp(0x93)
OP_SUB = CScriptOp(0x94)
OP_MUL = CScriptOp(0x95)
OP_DIV = CScriptOp(0x96)
OP_MOD = CScriptOp(0x97)
OP_LSHIFT = CScriptOp(0x98)
OP_RSHIFT = CScriptOp(0x99)
OP_BOOLAND = CScriptOp(0x9a)
OP_BOOLOR = CScriptOp(0x9b)
OP_NUMEQUAL = CScriptOp(0x9c)
OP_NUMEQUALVERIFY = CScriptOp(0x9d)
OP_NUMNOTEQUAL = CScriptOp(0x9e)
OP_LESSTHAN = CScriptOp(0x9f)
OP_GREATERTHAN = CScriptOp(0xa0)
OP_LESSTHANOREQUAL = CScriptOp(0xa1)
OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
OP_MIN = CScriptOp(0xa3)
OP_MAX = CScriptOp(0xa4)
OP_WITHIN = CScriptOp(0xa5)
OP_RIPEMD160 = CScriptOp(0xa6)
OP_SHA1 = CScriptOp(0xa7)
OP_SHA256 = CScriptOp(0xa8)
OP_HASH160 = CScriptOp(0xa9)
OP_HASH256 = CScriptOp(0xaa)
OP_CODESEPARATOR = CScriptOp(0xab)
OP_CHECKSIG = CScriptOp(0xac)
OP_CHECKSIGVERIFY = CScriptOp(0xad)
OP_CHECKMULTISIG = CScriptOp(0xae)
OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
OP_NOP1 = CScriptOp(0xb0)
OP_CHECKLOCKTIMEVERIFY = CScriptOp(0xb1)
OP_CHECKSEQUENCEVERIFY = CScriptOp(0xb2)
OP_NOP4 = CScriptOp(0xb3)
OP_NOP5 = CScriptOp(0xb4)
OP_NOP6 = CScriptOp(0xb5)
OP_NOP7 = CScriptOp(0xb6)
OP_NOP8 = CScriptOp(0xb7)
OP_NOP9 = CScriptOp(0xb8)
OP_NOP10 = CScriptOp(0xb9)
OP_CHECKSIGADD = CScriptOp(0xba)
OP_INVALIDOPCODE = CScriptOp(0xff)
OPCODE_NAMES.update({
OP_0: 'OP_0',
OP_PUSHDATA1: 'OP_PUSHDATA1',
OP_PUSHDATA2: 'OP_PUSHDATA2',
OP_PUSHDATA4: 'OP_PUSHDATA4',
OP_1NEGATE: 'OP_1NEGATE',
OP_RESERVED: 'OP_RESERVED',
OP_1: 'OP_1',
OP_2: 'OP_2',
OP_3: 'OP_3',
OP_4: 'OP_4',
OP_5: 'OP_5',
OP_6: 'OP_6',
OP_7: 'OP_7',
OP_8: 'OP_8',
OP_9: 'OP_9',
OP_10: 'OP_10',
OP_11: 'OP_11',
OP_12: 'OP_12',
OP_13: 'OP_13',
OP_14: 'OP_14',
OP_15: 'OP_15',
OP_16: 'OP_16',
OP_NOP: 'OP_NOP',
OP_VER: 'OP_VER',
OP_IF: 'OP_IF',
OP_NOTIF: 'OP_NOTIF',
OP_VERIF: 'OP_VERIF',
OP_VERNOTIF: 'OP_VERNOTIF',
OP_ELSE: 'OP_ELSE',
OP_ENDIF: 'OP_ENDIF',
OP_VERIFY: 'OP_VERIFY',
OP_RETURN: 'OP_RETURN',
OP_TOALTSTACK: 'OP_TOALTSTACK',
OP_FROMALTSTACK: 'OP_FROMALTSTACK',
OP_2DROP: 'OP_2DROP',
OP_2DUP: 'OP_2DUP',
OP_3DUP: 'OP_3DUP',
OP_2OVER: 'OP_2OVER',
OP_2ROT: 'OP_2ROT',
OP_2SWAP: 'OP_2SWAP',
OP_IFDUP: 'OP_IFDUP',
OP_DEPTH: 'OP_DEPTH',
OP_DROP: 'OP_DROP',
OP_DUP: 'OP_DUP',
OP_NIP: 'OP_NIP',
OP_OVER: 'OP_OVER',
OP_PICK: 'OP_PICK',
OP_ROLL: 'OP_ROLL',
OP_ROT: 'OP_ROT',
OP_SWAP: 'OP_SWAP',
OP_TUCK: 'OP_TUCK',
OP_CAT: 'OP_CAT',
OP_SUBSTR: 'OP_SUBSTR',
OP_LEFT: 'OP_LEFT',
OP_RIGHT: 'OP_RIGHT',
OP_SIZE: 'OP_SIZE',
OP_INVERT: 'OP_INVERT',
OP_AND: 'OP_AND',
OP_OR: 'OP_OR',
OP_XOR: 'OP_XOR',
OP_EQUAL: 'OP_EQUAL',
OP_EQUALVERIFY: 'OP_EQUALVERIFY',
OP_RESERVED1: 'OP_RESERVED1',
OP_RESERVED2: 'OP_RESERVED2',
OP_1ADD: 'OP_1ADD',
OP_1SUB: 'OP_1SUB',
OP_2MUL: 'OP_2MUL',
OP_2DIV: 'OP_2DIV',
OP_NEGATE: 'OP_NEGATE',
OP_ABS: 'OP_ABS',
OP_NOT: 'OP_NOT',
OP_0NOTEQUAL: 'OP_0NOTEQUAL',
OP_ADD: 'OP_ADD',
OP_SUB: 'OP_SUB',
OP_MUL: 'OP_MUL',
OP_DIV: 'OP_DIV',
OP_MOD: 'OP_MOD',
OP_LSHIFT: 'OP_LSHIFT',
OP_RSHIFT: 'OP_RSHIFT',
OP_BOOLAND: 'OP_BOOLAND',
OP_BOOLOR: 'OP_BOOLOR',
OP_NUMEQUAL: 'OP_NUMEQUAL',
OP_NUMEQUALVERIFY: 'OP_NUMEQUALVERIFY',
OP_NUMNOTEQUAL: 'OP_NUMNOTEQUAL',
OP_LESSTHAN: 'OP_LESSTHAN',
OP_GREATERTHAN: 'OP_GREATERTHAN',
OP_LESSTHANOREQUAL: 'OP_LESSTHANOREQUAL',
OP_GREATERTHANOREQUAL: 'OP_GREATERTHANOREQUAL',
OP_MIN: 'OP_MIN',
OP_MAX: 'OP_MAX',
OP_WITHIN: 'OP_WITHIN',
OP_RIPEMD160: 'OP_RIPEMD160',
OP_SHA1: 'OP_SHA1',
OP_SHA256: 'OP_SHA256',
OP_HASH160: 'OP_HASH160',
OP_HASH256: 'OP_HASH256',
OP_CODESEPARATOR: 'OP_CODESEPARATOR',
OP_CHECKSIG: 'OP_CHECKSIG',
OP_CHECKSIGVERIFY: 'OP_CHECKSIGVERIFY',
OP_CHECKMULTISIG: 'OP_CHECKMULTISIG',
OP_CHECKMULTISIGVERIFY: 'OP_CHECKMULTISIGVERIFY',
OP_NOP1: 'OP_NOP1',
OP_CHECKLOCKTIMEVERIFY: 'OP_CHECKLOCKTIMEVERIFY',
OP_CHECKSEQUENCEVERIFY: 'OP_CHECKSEQUENCEVERIFY',
OP_NOP4: 'OP_NOP4',
OP_NOP5: 'OP_NOP5',
OP_NOP6: 'OP_NOP6',
OP_NOP7: 'OP_NOP7',
OP_NOP8: 'OP_NOP8',
OP_NOP9: 'OP_NOP9',
OP_NOP10: 'OP_NOP10',
OP_CHECKSIGADD: 'OP_CHECKSIGADD',
OP_INVALIDOPCODE: 'OP_INVALIDOPCODE',
})
class CScriptInvalidError(Exception):
pass
class CScriptTruncatedPushDataError(CScriptInvalidError):
def __init__(self, msg, data):
self.data = data
super().__init__(msg)
class CScriptNum:
__slots__ = ("value",)
def __init__(self, d=0):
self.value = d
@staticmethod
def encode(obj):
r = bytearray(0)
if obj.value == 0:
return bytes(r)
neg = obj.value < 0
absvalue = -obj.value if neg else obj.value
while (absvalue):
r.append(absvalue & 0xff)
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return bytes([len(r)]) + r
@staticmethod
def decode(vch):
result = 0
value = vch[1:]
if len(value) == 0:
return result
for i, byte in enumerate(value):
result |= int(byte) << 8 * i
if value[-1] >= 0x80:
num_mask = (2**(len(value) * 8) - 1) >> 1
result &= num_mask
result *= -1
return result
class CScript(bytes):
__slots__ = ()
@classmethod
def __coerce_instance(cls, other):
if isinstance(other, CScriptOp):
other = bytes([other])
elif isinstance(other, CScriptNum):
if (other.value == 0):
other = bytes([CScriptOp(OP_0)])
else:
other = CScriptNum.encode(other)
elif isinstance(other, int):
if 0 <= other <= 16:
other = bytes([CScriptOp.encode_op_n(other)])
elif other == -1:
other = bytes([OP_1NEGATE])
else:
other = CScriptOp.encode_op_pushdata(bn2vch(other))
elif isinstance(other, (bytes, bytearray)):
other = CScriptOp.encode_op_pushdata(other)
return other
def __add__(self, other):
raise NotImplementedError
def join(self, iterable):
raise NotImplementedError
def __new__(cls, value=b''):
if isinstance(value, bytes) or isinstance(value, bytearray):
return super().__new__(cls, value)
else:
def coerce_iterable(iterable):
for instance in iterable:
yield cls.__coerce_instance(instance)
return super().__new__(cls, b''.join(coerce_iterable(value)))
def raw_iter(self):
i = 0
while i < len(self):
sop_idx = i
opcode = self[i]
i += 1
if opcode > OP_PUSHDATA4:
yield (opcode, None, sop_idx)
else:
datasize = None
pushdata_type = None
if opcode < OP_PUSHDATA1:
pushdata_type = 'PUSHDATA(%d)' % opcode
datasize = opcode
elif opcode == OP_PUSHDATA1:
pushdata_type = 'PUSHDATA1'
if i >= len(self):
raise CScriptInvalidError('PUSHDATA1: missing data length')
datasize = self[i]
i += 1
elif opcode == OP_PUSHDATA2:
pushdata_type = 'PUSHDATA2'
if i + 1 >= len(self):
raise CScriptInvalidError('PUSHDATA2: missing data length')
datasize = self[i] + (self[i + 1] << 8)
i += 2
elif opcode == OP_PUSHDATA4:
pushdata_type = 'PUSHDATA4'
if i + 3 >= len(self):
raise CScriptInvalidError('PUSHDATA4: missing data length')
datasize = self[i] + (self[i + 1] << 8) + (self[i + 2] << 16) + (self[i + 3] << 24)
i += 4
else:
assert False
data = bytes(self[i:i + datasize])
# Check for truncation
if len(data) < datasize:
raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)
i += datasize
yield (opcode, data, sop_idx)
def __iter__(self):
for (opcode, data, sop_idx) in self.raw_iter():
if data is not None:
yield data
else:
opcode = CScriptOp(opcode)
if opcode.is_small_int():
yield opcode.decode_op_n()
else:
yield CScriptOp(opcode)
def __repr__(self):
def _repr(o):
if isinstance(o, bytes):
return "x('%s')" % o.hex()
else:
return repr(o)
ops = []
i = iter(self)
while True:
op = None
try:
op = _repr(next(i))
except CScriptTruncatedPushDataError as err:
op = '%s...<ERROR: %s>' % (_repr(err.data), err)
break
except CScriptInvalidError as err:
op = '<ERROR: %s>' % err
break
except StopIteration:
break
finally:
if op is not None:
ops.append(op)
return "CScript([%s])" % ', '.join(ops)
def GetSigOpCount(self, fAccurate):
n = 0
lastOpcode = OP_INVALIDOPCODE
for (opcode, data, sop_idx) in self.raw_iter():
if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
n += 1
elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
if fAccurate and (OP_1 <= lastOpcode <= OP_16):
n += opcode.decode_op_n()
else:
n += 20
lastOpcode = opcode
return n
SIGHASH_DEFAULT = 0 # Taproot-only default, semantics same as SIGHASH_ALL
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_ANYONECANPAY = 0x80
def FindAndDelete(script, sig):
r = b''
last_sop_idx = sop_idx = 0
skip = True
for (opcode, data, sop_idx) in script.raw_iter():
if not skip:
r += script[last_sop_idx:sop_idx]
last_sop_idx = sop_idx
if script[sop_idx:sop_idx + len(sig)] == sig:
skip = True
else:
skip = False
if not skip:
r += script[last_sop_idx:]
return CScript(r)
def LegacySignatureHash(script, txTo, inIdx, hashtype):
HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if inIdx >= len(txTo.vin):
return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin)))
txtmp = CTransaction(txTo)
for txin in txtmp.vin:
txin.scriptSig = b''
txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))
if (hashtype & 0x1f) == SIGHASH_NONE:
txtmp.vout = []
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
elif (hashtype & 0x1f) == SIGHASH_SINGLE:
outIdx = inIdx
if outIdx >= len(txtmp.vout):
return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout)))
tmp = txtmp.vout[outIdx]
txtmp.vout = []
for _ in range(outIdx):
txtmp.vout.append(CTxOut(-1))
txtmp.vout.append(tmp)
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
if hashtype & SIGHASH_ANYONECANPAY:
tmp = txtmp.vin[inIdx]
txtmp.vin = []
txtmp.vin.append(tmp)
s = txtmp.serialize_without_witness()
s += struct.pack(b"<I", hashtype)
hash = sha256(s)
return (hash, None)
# TODO: Allow cached hashPrevouts/hashSequence/hashOutputs to be provided.
# Performance optimization probably not necessary for python tests, however.
# Note that this corresponds to sigversion == 1 in EvalScript, which is used
# for version 0 witnesses.
def SegwitV0SignatureHash(script, txTo, inIdx, hashtype, amount):
hashPrevouts = 0
hashSequence = 0
hashOutputs = 0
if not (hashtype & SIGHASH_ANYONECANPAY):
serialize_prevouts = bytes()
for i in txTo.vin:
serialize_prevouts += i.prevout.serialize()
hashPrevouts = uint256_from_str(sha256(serialize_prevouts))
if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_sequence = bytes()
for i in txTo.vin:
serialize_sequence += struct.pack("<I", i.nSequence)
hashSequence = uint256_from_str(sha256(serialize_sequence))
if ((hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_outputs = bytes()
for o in txTo.vout:
serialize_outputs += o.serialize()
hashOutputs = uint256_from_str(sha256(serialize_outputs))
elif ((hashtype & 0x1f) == SIGHASH_SINGLE and inIdx < len(txTo.vout)):
serialize_outputs = txTo.vout[inIdx].serialize()
hashOutputs = uint256_from_str(sha256(serialize_outputs))
ss = bytes()
ss += struct.pack("<i", txTo.nVersion)
ss += ser_uint256(hashPrevouts)
ss += ser_uint256(hashSequence)
ss += txTo.vin[inIdx].prevout.serialize()
ss += ser_string(script)
ss += struct.pack("<q", amount)
ss += struct.pack("<I", txTo.vin[inIdx].nSequence)
ss += ser_uint256(hashOutputs)
ss += struct.pack("<i", txTo.nLockTime)
ss += struct.pack("<I", hashtype)
return sha256(ss)
class TestFrameworkScript(unittest.TestCase):
def test_bn2vch(self):
self.assertEqual(bn2vch(0), bytes([]))
self.assertEqual(bn2vch(1), bytes([0x01]))
self.assertEqual(bn2vch(-1), bytes([0x81]))
self.assertEqual(bn2vch(0x7F), bytes([0x7F]))
self.assertEqual(bn2vch(-0x7F), bytes([0xFF]))
self.assertEqual(bn2vch(0x80), bytes([0x80, 0x00]))
self.assertEqual(bn2vch(-0x80), bytes([0x80, 0x80]))
self.assertEqual(bn2vch(0xFF), bytes([0xFF, 0x00]))
self.assertEqual(bn2vch(-0xFF), bytes([0xFF, 0x80]))
self.assertEqual(bn2vch(0x100), bytes([0x00, 0x01]))
self.assertEqual(bn2vch(-0x100), bytes([0x00, 0x81]))
self.assertEqual(bn2vch(0x7FFF), bytes([0xFF, 0x7F]))
self.assertEqual(bn2vch(-0x8000), bytes([0x00, 0x80, 0x80]))
self.assertEqual(bn2vch(-0x7FFFFF), bytes([0xFF, 0xFF, 0xFF]))
self.assertEqual(bn2vch(0x80000000), bytes([0x00, 0x00, 0x00, 0x80, 0x00]))
self.assertEqual(bn2vch(-0x80000000), bytes([0x00, 0x00, 0x00, 0x80, 0x80]))
self.assertEqual(bn2vch(0xFFFFFFFF), bytes([0xFF, 0xFF, 0xFF, 0xFF, 0x00]))
self.assertEqual(bn2vch(123456789), bytes([0x15, 0xCD, 0x5B, 0x07]))
self.assertEqual(bn2vch(-54321), bytes([0x31, 0xD4, 0x80]))
def test_cscriptnum_encoding(self):
# round-trip negative and multi-byte CScriptNums
values = [0, 1, -1, -2, 127, 128, -255, 256, (1 << 15) - 1, -(1 << 16), (1 << 24) - 1, (1 << 31), 1 - (1 << 32), 1 << 40, 1500, -1500]
for value in values:
self.assertEqual(CScriptNum.decode(CScriptNum.encode(CScriptNum(value))), value)
def TaprootSignatureHash(txTo, spent_utxos, hash_type, input_index = 0, scriptpath = False, script = CScript(), codeseparator_pos = -1, annex = None, leaf_ver = LEAF_VERSION_TAPSCRIPT):
assert (len(txTo.vin) == len(spent_utxos))
assert (input_index < len(txTo.vin))
out_type = SIGHASH_ALL if hash_type == 0 else hash_type & 3
in_type = hash_type & SIGHASH_ANYONECANPAY
spk = spent_utxos[input_index].scriptPubKey
ss = bytes([0, hash_type]) # epoch, hash_type
ss += struct.pack("<i", txTo.nVersion)
ss += struct.pack("<I", txTo.nLockTime)
if in_type != SIGHASH_ANYONECANPAY:
ss += sha256(b"".join(i.prevout.serialize() for i in txTo.vin))
ss += sha256(b"".join(struct.pack("<q", u.nValue) for u in spent_utxos))
ss += sha256(b"".join(ser_string(u.scriptPubKey) for u in spent_utxos))
ss += sha256(b"".join(struct.pack("<I", i.nSequence) for i in txTo.vin))
if out_type == SIGHASH_ALL:
ss += sha256(b"".join(o.serialize() for o in txTo.vout))
spend_type = 0
if annex is not None:
spend_type |= 1
if (scriptpath):
spend_type |= 2
ss += bytes([spend_type])
if in_type == SIGHASH_ANYONECANPAY:
ss += txTo.vin[input_index].prevout.serialize()
ss += struct.pack("<q", spent_utxos[input_index].nValue)
ss += ser_string(spk)
ss += struct.pack("<I", txTo.vin[input_index].nSequence)
else:
ss += struct.pack("<I", input_index)
if (spend_type & 1):
ss += sha256(ser_string(annex))
if out_type == SIGHASH_SINGLE:
if input_index < len(txTo.vout):
ss += sha256(txTo.vout[input_index].serialize())
else:
ss += bytes(0 for _ in range(32))
if (scriptpath):
ss += TaggedHash("TapLeaf", bytes([leaf_ver]) + ser_string(script))
ss += bytes([0])
ss += struct.pack("<i", codeseparator_pos)
assert len(ss) == 175 - (in_type == SIGHASH_ANYONECANPAY) * 49 - (out_type != SIGHASH_ALL and out_type != SIGHASH_SINGLE) * 32 + (annex is not None) * 32 + scriptpath * 37
return TaggedHash("TapSighash", ss)
def taproot_tree_helper(scripts):
if len(scripts) == 0:
return ([], bytes())
if len(scripts) == 1:
# One entry: treat as a leaf
script = scripts[0]
assert(not callable(script))
if isinstance(script, list):
return taproot_tree_helper(script)
assert(isinstance(script, tuple))
version = LEAF_VERSION_TAPSCRIPT
name = script[0]
code = script[1]
if len(script) == 3:
version = script[2]
assert version & 1 == 0
assert isinstance(code, bytes)
h = TaggedHash("TapLeaf", bytes([version]) + ser_string(code))
if name is None:
return ([], h)
return ([(name, version, code, bytes())], h)
elif len(scripts) == 2 and callable(scripts[1]):
# Two entries, and the right one is a function
left, left_h = taproot_tree_helper(scripts[0:1])
right_h = scripts[1](left_h)
left = [(name, version, script, control + right_h) for name, version, script, control in left]
right = []
else:
# Two or more entries: descend into each side
split_pos = len(scripts) // 2
left, left_h = taproot_tree_helper(scripts[0:split_pos])
right, right_h = taproot_tree_helper(scripts[split_pos:])
left = [(name, version, script, control + right_h) for name, version, script, control in left]
right = [(name, version, script, control + left_h) for name, version, script, control in right]
if right_h < left_h:
right_h, left_h = left_h, right_h
h = TaggedHash("TapBranch", left_h + right_h)
return (left + right, h)
# A TaprootInfo object has the following fields:
# - scriptPubKey: the scriptPubKey (witness v1 CScript)
# - internal_pubkey: the internal pubkey (32 bytes)
# - negflag: whether the pubkey in the scriptPubKey was negated from internal_pubkey+tweak*G (bool).
# - tweak: the tweak (32 bytes)
# - leaves: a dict of name -> TaprootLeafInfo objects for all known leaves
TaprootInfo = namedtuple("TaprootInfo", "scriptPubKey,internal_pubkey,negflag,tweak,leaves")
# A TaprootLeafInfo object has the following fields:
# - script: the leaf script (CScript or bytes)
# - version: the leaf version (0xc0 for BIP342 tapscript)
# - merklebranch: the merkle branch to use for this leaf (32*N bytes)
TaprootLeafInfo = namedtuple("TaprootLeafInfo", "script,version,merklebranch")
def taproot_construct(pubkey, scripts=None):
if scripts is None:
scripts = []
ret, h = taproot_tree_helper(scripts)
tweak = TaggedHash("TapTweak", pubkey + h)
tweaked, negated = tweak_add_pubkey(pubkey, tweak)
leaves = dict((name, TaprootLeafInfo(script, version, merklebranch)) for name, version, script, merklebranch in ret)
return TaprootInfo(CScript([OP_1, tweaked]), pubkey, negated + 0, tweak, leaves)
def is_op_success(o):
return o == 0x50 or o == 0x62 or o == 0x89 or o == 0x8a or o == 0x8d or o == 0x8e or (o >= 0x7e and o <= 0x81) or (o >= 0x83 and o <= 0x86) or (o >= 0x95 and o <= 0x99) or (o >= 0xbb and o <= 0xfe)
| true | true |
1c4989fdbdd50273e32b2fa29a924ec8d6080b4c | 1,729 | py | Python | joss_paper/figures/gen_phold_space_time_plot.py | KarrLab/desim | 6f189d8c8e850e092d816f6be3d6f87b4f983ac2 | [
"MIT"
] | 16 | 2019-12-12T15:49:17.000Z | 2022-03-31T20:34:36.000Z | joss_paper/figures/gen_phold_space_time_plot.py | KarrLab/desim | 6f189d8c8e850e092d816f6be3d6f87b4f983ac2 | [
"MIT"
] | 65 | 2019-08-15T14:50:38.000Z | 2020-12-17T14:36:04.000Z | joss_paper/figures/gen_phold_space_time_plot.py | KarrLab/desim | 6f189d8c8e850e092d816f6be3d6f87b4f983ac2 | [
"MIT"
] | 5 | 2020-07-16T22:15:47.000Z | 2021-08-16T02:16:17.000Z | """ Generate a space-time plot of PHOLD
:Author: Arthur Goldberg <[email protected]>
:Date: 2020-06-22
:Copyright: 2020, Karr Lab
:License: MIT
"""
from argparse import Namespace
import os
import tempfile
from de_sim.examples.phold import RunPhold
from de_sim.testing.utilities_for_testing import unset_env_var
from de_sim.visualize import SpaceTime
from wc_utils.util.environ import EnvironUtils
import de_sim
def run_phold(max_time, num_phold_procs=3, frac_self_events=0.5):
""" Run PHOLD, and generate a plot log
Args:
extra (:obj:`float`): simulation duration
num_phold_procs (:obj:`int`, optional): number of PHOLD processes to run
frac_self_events (:obj:`float`, optional): fraction of events sent to self
"""
args = Namespace(max_time=max_time, num_phold_procs=num_phold_procs,
frac_self_events=frac_self_events)
RunPhold.main(args)
def create_phold_space_time_diagram():
""" Run PHOLD, and use plot log to generate a space-time diagram """
plot_log = os.path.expanduser('~/.wc/log/de_sim.plot.log')
try:
os.remove(plot_log)
except FileNotFoundError:
pass
run_phold(8)
space_time = SpaceTime()
space_time.get_data(plot_log)
temp_dir = tempfile.TemporaryDirectory()
space_time_plot = os.path.join(temp_dir.name, "phold_space_time_plot.pdf")
with unset_env_var('DISPLAY'):
space_time.plot_data(space_time_plot)
print('space-time diagram written to', space_time_plot)
with EnvironUtils.temp_config_env(((['de_sim', 'log_events'], 'True'),
(['debug_logs', 'handlers', 'plot.file', 'level'], 'debug'))):
create_phold_space_time_diagram()
| 32.622642 | 97 | 0.70561 |
from argparse import Namespace
import os
import tempfile
from de_sim.examples.phold import RunPhold
from de_sim.testing.utilities_for_testing import unset_env_var
from de_sim.visualize import SpaceTime
from wc_utils.util.environ import EnvironUtils
import de_sim
def run_phold(max_time, num_phold_procs=3, frac_self_events=0.5):
args = Namespace(max_time=max_time, num_phold_procs=num_phold_procs,
frac_self_events=frac_self_events)
RunPhold.main(args)
def create_phold_space_time_diagram():
plot_log = os.path.expanduser('~/.wc/log/de_sim.plot.log')
try:
os.remove(plot_log)
except FileNotFoundError:
pass
run_phold(8)
space_time = SpaceTime()
space_time.get_data(plot_log)
temp_dir = tempfile.TemporaryDirectory()
space_time_plot = os.path.join(temp_dir.name, "phold_space_time_plot.pdf")
with unset_env_var('DISPLAY'):
space_time.plot_data(space_time_plot)
print('space-time diagram written to', space_time_plot)
with EnvironUtils.temp_config_env(((['de_sim', 'log_events'], 'True'),
(['debug_logs', 'handlers', 'plot.file', 'level'], 'debug'))):
create_phold_space_time_diagram()
| true | true |
1c4989ff4081d21eafb011936a27765d85b3e3f2 | 2,127 | py | Python | webapp/libs/plugins/saplugin.py | crocodilered/TheObjectRating | 2f44eb9cf7f39d3ab95cbc4ea720995a29344349 | [
"MIT"
] | null | null | null | webapp/libs/plugins/saplugin.py | crocodilered/TheObjectRating | 2f44eb9cf7f39d3ab95cbc4ea720995a29344349 | [
"MIT"
] | null | null | null | webapp/libs/plugins/saplugin.py | crocodilered/TheObjectRating | 2f44eb9cf7f39d3ab95cbc4ea720995a29344349 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import cherrypy
from cherrypy.process import wspbus, plugins
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
__all__ = ['SAEnginePlugin']
class SAEnginePlugin(plugins.SimplePlugin):
def __init__(self, bus, connection_string=None):
"""
The plugin is registered to the CherryPy engine and therefore
is part of the bus (the engine *is* a bus) registery.
We use this plugin to create the SA engine. At the same time,
when the plugin starts we create the tables into the database
using the mapped class of the global metadata.
"""
plugins.SimplePlugin.__init__(self, bus)
self.sa_engine = None
self.connection_string = connection_string
self.session = scoped_session(sessionmaker(autoflush=True, autocommit=False))
def start(self):
self.bus.log('Starting up DB access')
self.sa_engine = create_engine(self.connection_string, echo=False)
self.bus.subscribe("bind-session", self.bind)
self.bus.subscribe("commit-session", self.commit)
def stop(self):
self.bus.log('Stopping down DB access')
self.bus.unsubscribe("bind-session", self.bind)
self.bus.unsubscribe("commit-session", self.commit)
if self.sa_engine:
self.sa_engine.dispose()
self.sa_engine = None
def bind(self):
"""
Whenever this plugin receives the 'bind-session' command, it applies
this method and to bind the current session to the engine.
It then returns the session to the caller.
"""
self.session.configure(bind=self.sa_engine)
return self.session
def commit(self):
"""
Commits the current transaction or rollbacks if an error occurs.
In all cases, the current session is unbound and therefore
not usable any longer.
"""
try:
self.session.commit()
except:
self.session.rollback()
raise
finally:
self.session.remove()
| 33.761905 | 85 | 0.64504 | import cherrypy
from cherrypy.process import wspbus, plugins
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
__all__ = ['SAEnginePlugin']
class SAEnginePlugin(plugins.SimplePlugin):
def __init__(self, bus, connection_string=None):
plugins.SimplePlugin.__init__(self, bus)
self.sa_engine = None
self.connection_string = connection_string
self.session = scoped_session(sessionmaker(autoflush=True, autocommit=False))
def start(self):
self.bus.log('Starting up DB access')
self.sa_engine = create_engine(self.connection_string, echo=False)
self.bus.subscribe("bind-session", self.bind)
self.bus.subscribe("commit-session", self.commit)
def stop(self):
self.bus.log('Stopping down DB access')
self.bus.unsubscribe("bind-session", self.bind)
self.bus.unsubscribe("commit-session", self.commit)
if self.sa_engine:
self.sa_engine.dispose()
self.sa_engine = None
def bind(self):
self.session.configure(bind=self.sa_engine)
return self.session
def commit(self):
try:
self.session.commit()
except:
self.session.rollback()
raise
finally:
self.session.remove()
| true | true |
1c498a3ccb22414034d40442b3f457d23a2b3520 | 3,508 | py | Python | portfolio/Python/scrapy/americanrv/streetsideauto.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | null | null | null | portfolio/Python/scrapy/americanrv/streetsideauto.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | null | null | null | portfolio/Python/scrapy/americanrv/streetsideauto.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | 5 | 2016-03-22T07:40:46.000Z | 2021-05-30T16:12:21.000Z | import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
import csv
from product_spiders.items import Product, ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__))
class StreetSideAutoSpider(BaseSpider):
name = 'streetsideauto.com'
allowed_domains = ['www.streetsideauto.com']
start_urls = ('http://www.streetsideauto.com/',)
def __init__(self, *args, **kwargs):
super(StreetSideAutoSpider, self).__init__(*args, **kwargs)
csv_file = csv.reader(open(os.path.join(HERE, 'americanrv_products.csv')))
csv_file.next()
self.product_ids = {}
for row in csv_file:
ids = set()
ids.add(row[0])
self.product_ids[row[0]] = {'mfrgid': row[2], 'ids': frozenset(ids)}
def start_requests(self):
for sku, data in self.product_ids.items():
for id in data['ids']:
url = 'http://www.streetsideauto.com/search.asp?keywords=' + re.sub(' ','+', id)
req = Request(url)
req.meta['sku'] = sku
req.meta['mfrgid'] = data['mfrgid']
yield req
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
# pagination
# next_page = hxs.select(u'//dl[@class="pages"]/dd/a[contains(text(),"Next")]/@href').extract()
# if next_page:
# next_page = urljoin_rfc(get_base_url(response), next_page[0])
# req = Request(next_page, meta={'sku': response.meta['sku']})
# yield req
# products
products = hxs.select(u'//div[@class="p-summary leaf"]/a[@class="part-title"]/@href').extract()
for url in products:
url = urljoin_rfc(get_base_url(response), url)
req = Request(url, callback=self.parse_product)
req.meta['sku'] = response.meta['sku']
req.meta['mfrgid'] = response.meta['mfrgid']
yield req
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
product_loader = ProductLoader(item=Product(), response=response)
product_loader.add_xpath('price', u'//div[@id="conv-box"]//dd[@class="amount"]/text()')
if not product_loader.get_output_value('price'):
product_loader.add_xpath('price', u'//dl[@class="ssa-price-dl"]/dd[@class="ssa-price"]/text()')
product_loader.add_value('url', response.url)
product_loader.add_value('sku', response.meta['sku'])
product_loader.add_value('identifier', response.meta['sku'].lower())
name = hxs.select(u'//div[@class="right-column-left"]/div[@class="title"]/h2/text()').extract()[0].strip()
product_loader.add_value('name', name)
# sku = response.meta['sku'].lower().split(' ')
# name = product_loader.get_output_value('name').lower()
# sku = filter(lambda x: x != '' and x in name, sku)
part_number = hxs.select(u'//div[@class="title"]/h2/span/text()').re('Part No. (.*)')[0]
mfrgid = response.meta['mfrgid']
if part_number == mfrgid and product_loader.get_output_value('price'):
yield product_loader.load_item()
| 38.977778 | 114 | 0.61488 | import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
import csv
from product_spiders.items import Product, ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__))
class StreetSideAutoSpider(BaseSpider):
name = 'streetsideauto.com'
allowed_domains = ['www.streetsideauto.com']
start_urls = ('http://www.streetsideauto.com/',)
def __init__(self, *args, **kwargs):
super(StreetSideAutoSpider, self).__init__(*args, **kwargs)
csv_file = csv.reader(open(os.path.join(HERE, 'americanrv_products.csv')))
csv_file.next()
self.product_ids = {}
for row in csv_file:
ids = set()
ids.add(row[0])
self.product_ids[row[0]] = {'mfrgid': row[2], 'ids': frozenset(ids)}
def start_requests(self):
for sku, data in self.product_ids.items():
for id in data['ids']:
url = 'http://www.streetsideauto.com/search.asp?keywords=' + re.sub(' ','+', id)
req = Request(url)
req.meta['sku'] = sku
req.meta['mfrgid'] = data['mfrgid']
yield req
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
products = hxs.select(u'//div[@class="p-summary leaf"]/a[@class="part-title"]/@href').extract()
for url in products:
url = urljoin_rfc(get_base_url(response), url)
req = Request(url, callback=self.parse_product)
req.meta['sku'] = response.meta['sku']
req.meta['mfrgid'] = response.meta['mfrgid']
yield req
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
product_loader = ProductLoader(item=Product(), response=response)
product_loader.add_xpath('price', u'//div[@id="conv-box"]//dd[@class="amount"]/text()')
if not product_loader.get_output_value('price'):
product_loader.add_xpath('price', u'//dl[@class="ssa-price-dl"]/dd[@class="ssa-price"]/text()')
product_loader.add_value('url', response.url)
product_loader.add_value('sku', response.meta['sku'])
product_loader.add_value('identifier', response.meta['sku'].lower())
name = hxs.select(u'//div[@class="right-column-left"]/div[@class="title"]/h2/text()').extract()[0].strip()
product_loader.add_value('name', name)
part_number = hxs.select(u'//div[@class="title"]/h2/span/text()').re('Part No. (.*)')[0]
mfrgid = response.meta['mfrgid']
if part_number == mfrgid and product_loader.get_output_value('price'):
yield product_loader.load_item()
| true | true |
1c498d0e059a2c2fff70bac574fa1aba4e9dd83e | 59 | py | Python | SPLIT.py | anayakoti/FirstSample | 8ef05772991644e63a4fd6759458f449cd2b00c0 | [
"bzip2-1.0.6"
] | null | null | null | SPLIT.py | anayakoti/FirstSample | 8ef05772991644e63a4fd6759458f449cd2b00c0 | [
"bzip2-1.0.6"
] | null | null | null | SPLIT.py | anayakoti/FirstSample | 8ef05772991644e63a4fd6759458f449cd2b00c0 | [
"bzip2-1.0.6"
] | null | null | null | WORD="tHIS IS ANUDEEP";
lister=WORD.spit();
print(lister);
| 14.75 | 23 | 0.711864 | WORD="tHIS IS ANUDEEP";
lister=WORD.spit();
print(lister);
| true | true |
1c498da49fdf2f6ac2d0d58c9f1b429a18e01773 | 9,475 | py | Python | sdk/python/pulumi_azure_native/hanaonazure/get_hana_instance.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/hanaonazure/get_hana_instance.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/hanaonazure/get_hana_instance.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetHanaInstanceResult',
'AwaitableGetHanaInstanceResult',
'get_hana_instance',
]
@pulumi.output_type
class GetHanaInstanceResult:
"""
HANA instance info on Azure (ARM properties and HANA properties)
"""
def __init__(__self__, hana_instance_id=None, hardware_profile=None, hw_revision=None, id=None, location=None, name=None, network_profile=None, os_profile=None, partner_node_id=None, power_state=None, provisioning_state=None, proximity_placement_group=None, storage_profile=None, tags=None, type=None):
if hana_instance_id and not isinstance(hana_instance_id, str):
raise TypeError("Expected argument 'hana_instance_id' to be a str")
pulumi.set(__self__, "hana_instance_id", hana_instance_id)
if hardware_profile and not isinstance(hardware_profile, dict):
raise TypeError("Expected argument 'hardware_profile' to be a dict")
pulumi.set(__self__, "hardware_profile", hardware_profile)
if hw_revision and not isinstance(hw_revision, str):
raise TypeError("Expected argument 'hw_revision' to be a str")
pulumi.set(__self__, "hw_revision", hw_revision)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_profile and not isinstance(network_profile, dict):
raise TypeError("Expected argument 'network_profile' to be a dict")
pulumi.set(__self__, "network_profile", network_profile)
if os_profile and not isinstance(os_profile, dict):
raise TypeError("Expected argument 'os_profile' to be a dict")
pulumi.set(__self__, "os_profile", os_profile)
if partner_node_id and not isinstance(partner_node_id, str):
raise TypeError("Expected argument 'partner_node_id' to be a str")
pulumi.set(__self__, "partner_node_id", partner_node_id)
if power_state and not isinstance(power_state, str):
raise TypeError("Expected argument 'power_state' to be a str")
pulumi.set(__self__, "power_state", power_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if proximity_placement_group and not isinstance(proximity_placement_group, str):
raise TypeError("Expected argument 'proximity_placement_group' to be a str")
pulumi.set(__self__, "proximity_placement_group", proximity_placement_group)
if storage_profile and not isinstance(storage_profile, dict):
raise TypeError("Expected argument 'storage_profile' to be a dict")
pulumi.set(__self__, "storage_profile", storage_profile)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="hanaInstanceId")
def hana_instance_id(self) -> str:
"""
Specifies the HANA instance unique ID.
"""
return pulumi.get(self, "hana_instance_id")
@property
@pulumi.getter(name="hardwareProfile")
def hardware_profile(self) -> Optional['outputs.HardwareProfileResponse']:
"""
Specifies the hardware settings for the HANA instance.
"""
return pulumi.get(self, "hardware_profile")
@property
@pulumi.getter(name="hwRevision")
def hw_revision(self) -> str:
"""
Hardware revision of a HANA instance
"""
return pulumi.get(self, "hw_revision")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkProfile")
def network_profile(self) -> Optional['outputs.NetworkProfileResponse']:
"""
Specifies the network settings for the HANA instance.
"""
return pulumi.get(self, "network_profile")
@property
@pulumi.getter(name="osProfile")
def os_profile(self) -> Optional['outputs.OSProfileResponse']:
"""
Specifies the operating system settings for the HANA instance.
"""
return pulumi.get(self, "os_profile")
@property
@pulumi.getter(name="partnerNodeId")
def partner_node_id(self) -> Optional[str]:
"""
ARM ID of another HanaInstance that will share a network with this HanaInstance
"""
return pulumi.get(self, "partner_node_id")
@property
@pulumi.getter(name="powerState")
def power_state(self) -> str:
"""
Resource power state
"""
return pulumi.get(self, "power_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
State of provisioning of the HanaInstance
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="proximityPlacementGroup")
def proximity_placement_group(self) -> str:
"""
Resource proximity placement group
"""
return pulumi.get(self, "proximity_placement_group")
@property
@pulumi.getter(name="storageProfile")
def storage_profile(self) -> Optional['outputs.StorageProfileResponse']:
"""
Specifies the storage settings for the HANA instance disks.
"""
return pulumi.get(self, "storage_profile")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
class AwaitableGetHanaInstanceResult(GetHanaInstanceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetHanaInstanceResult(
hana_instance_id=self.hana_instance_id,
hardware_profile=self.hardware_profile,
hw_revision=self.hw_revision,
id=self.id,
location=self.location,
name=self.name,
network_profile=self.network_profile,
os_profile=self.os_profile,
partner_node_id=self.partner_node_id,
power_state=self.power_state,
provisioning_state=self.provisioning_state,
proximity_placement_group=self.proximity_placement_group,
storage_profile=self.storage_profile,
tags=self.tags,
type=self.type)
def get_hana_instance(hana_instance_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetHanaInstanceResult:
"""
HANA instance info on Azure (ARM properties and HANA properties)
API Version: 2017-11-03-preview.
:param str hana_instance_name: Name of the SAP HANA on Azure instance.
:param str resource_group_name: Name of the resource group.
"""
__args__ = dict()
__args__['hanaInstanceName'] = hana_instance_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:hanaonazure:getHanaInstance', __args__, opts=opts, typ=GetHanaInstanceResult).value
return AwaitableGetHanaInstanceResult(
hana_instance_id=__ret__.hana_instance_id,
hardware_profile=__ret__.hardware_profile,
hw_revision=__ret__.hw_revision,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
network_profile=__ret__.network_profile,
os_profile=__ret__.os_profile,
partner_node_id=__ret__.partner_node_id,
power_state=__ret__.power_state,
provisioning_state=__ret__.provisioning_state,
proximity_placement_group=__ret__.proximity_placement_group,
storage_profile=__ret__.storage_profile,
tags=__ret__.tags,
type=__ret__.type)
| 37.9 | 306 | 0.661214 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetHanaInstanceResult',
'AwaitableGetHanaInstanceResult',
'get_hana_instance',
]
@pulumi.output_type
class GetHanaInstanceResult:
def __init__(__self__, hana_instance_id=None, hardware_profile=None, hw_revision=None, id=None, location=None, name=None, network_profile=None, os_profile=None, partner_node_id=None, power_state=None, provisioning_state=None, proximity_placement_group=None, storage_profile=None, tags=None, type=None):
if hana_instance_id and not isinstance(hana_instance_id, str):
raise TypeError("Expected argument 'hana_instance_id' to be a str")
pulumi.set(__self__, "hana_instance_id", hana_instance_id)
if hardware_profile and not isinstance(hardware_profile, dict):
raise TypeError("Expected argument 'hardware_profile' to be a dict")
pulumi.set(__self__, "hardware_profile", hardware_profile)
if hw_revision and not isinstance(hw_revision, str):
raise TypeError("Expected argument 'hw_revision' to be a str")
pulumi.set(__self__, "hw_revision", hw_revision)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_profile and not isinstance(network_profile, dict):
raise TypeError("Expected argument 'network_profile' to be a dict")
pulumi.set(__self__, "network_profile", network_profile)
if os_profile and not isinstance(os_profile, dict):
raise TypeError("Expected argument 'os_profile' to be a dict")
pulumi.set(__self__, "os_profile", os_profile)
if partner_node_id and not isinstance(partner_node_id, str):
raise TypeError("Expected argument 'partner_node_id' to be a str")
pulumi.set(__self__, "partner_node_id", partner_node_id)
if power_state and not isinstance(power_state, str):
raise TypeError("Expected argument 'power_state' to be a str")
pulumi.set(__self__, "power_state", power_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if proximity_placement_group and not isinstance(proximity_placement_group, str):
raise TypeError("Expected argument 'proximity_placement_group' to be a str")
pulumi.set(__self__, "proximity_placement_group", proximity_placement_group)
if storage_profile and not isinstance(storage_profile, dict):
raise TypeError("Expected argument 'storage_profile' to be a dict")
pulumi.set(__self__, "storage_profile", storage_profile)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="hanaInstanceId")
def hana_instance_id(self) -> str:
return pulumi.get(self, "hana_instance_id")
@property
@pulumi.getter(name="hardwareProfile")
def hardware_profile(self) -> Optional['outputs.HardwareProfileResponse']:
return pulumi.get(self, "hardware_profile")
@property
@pulumi.getter(name="hwRevision")
def hw_revision(self) -> str:
return pulumi.get(self, "hw_revision")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkProfile")
def network_profile(self) -> Optional['outputs.NetworkProfileResponse']:
return pulumi.get(self, "network_profile")
@property
@pulumi.getter(name="osProfile")
def os_profile(self) -> Optional['outputs.OSProfileResponse']:
return pulumi.get(self, "os_profile")
@property
@pulumi.getter(name="partnerNodeId")
def partner_node_id(self) -> Optional[str]:
return pulumi.get(self, "partner_node_id")
@property
@pulumi.getter(name="powerState")
def power_state(self) -> str:
return pulumi.get(self, "power_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="proximityPlacementGroup")
def proximity_placement_group(self) -> str:
return pulumi.get(self, "proximity_placement_group")
@property
@pulumi.getter(name="storageProfile")
def storage_profile(self) -> Optional['outputs.StorageProfileResponse']:
return pulumi.get(self, "storage_profile")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetHanaInstanceResult(GetHanaInstanceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetHanaInstanceResult(
hana_instance_id=self.hana_instance_id,
hardware_profile=self.hardware_profile,
hw_revision=self.hw_revision,
id=self.id,
location=self.location,
name=self.name,
network_profile=self.network_profile,
os_profile=self.os_profile,
partner_node_id=self.partner_node_id,
power_state=self.power_state,
provisioning_state=self.provisioning_state,
proximity_placement_group=self.proximity_placement_group,
storage_profile=self.storage_profile,
tags=self.tags,
type=self.type)
def get_hana_instance(hana_instance_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetHanaInstanceResult:
__args__ = dict()
__args__['hanaInstanceName'] = hana_instance_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:hanaonazure:getHanaInstance', __args__, opts=opts, typ=GetHanaInstanceResult).value
return AwaitableGetHanaInstanceResult(
hana_instance_id=__ret__.hana_instance_id,
hardware_profile=__ret__.hardware_profile,
hw_revision=__ret__.hw_revision,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
network_profile=__ret__.network_profile,
os_profile=__ret__.os_profile,
partner_node_id=__ret__.partner_node_id,
power_state=__ret__.power_state,
provisioning_state=__ret__.provisioning_state,
proximity_placement_group=__ret__.proximity_placement_group,
storage_profile=__ret__.storage_profile,
tags=__ret__.tags,
type=__ret__.type)
| true | true |
1c498df9a78bd33534951fe5b48a871a66008a16 | 740 | py | Python | static/generaMapas/generaCalendarioPolinico.py | othesoluciones/TFM | 8ed46985604c83c517612b38326b39a61b4cf102 | [
"MIT"
] | null | null | null | static/generaMapas/generaCalendarioPolinico.py | othesoluciones/TFM | 8ed46985604c83c517612b38326b39a61b4cf102 | [
"MIT"
] | null | null | null | static/generaMapas/generaCalendarioPolinico.py | othesoluciones/TFM | 8ed46985604c83c517612b38326b39a61b4cf102 | [
"MIT"
] | null | null | null | #Conectamos a la base de datos
import base64
import json
from pymongo import MongoClient as Connection
cadenaCon= 'mongodb://othesoluciones:'+base64.b64decode("b3RoZXNvbHVjaW9uZXM=")+'@ds029635.mlab.com:29635/othesoluciones1'
MONGODB_URI =cadenaCon
conexion = Connection(MONGODB_URI)
db = conexion.othesoluciones1
import pandas as pd
#Calendario polinico (http://encuentralainspiracion.es/la-alergia-respiratoria/tipos-de-alergenos/alergia-al-polen/calendario-de-polinizacion/)
columnas =['Mes','Nivel']
datos = [(1,0),(2,0),(3,1),(4,2),(5,2),(6,2),(7,1),(8,0),(9,0),(10,0),(11,0),(12,0)]
df=pd.DataFrame(datos,columns=columnas)
recordsdf = json.loads(df.T.to_json()).values()
db.calendarioPolen.insert_many(recordsdf)
conexion.close() | 35.238095 | 143 | 0.758108 | import base64
import json
from pymongo import MongoClient as Connection
cadenaCon= 'mongodb://othesoluciones:'+base64.b64decode("b3RoZXNvbHVjaW9uZXM=")+'@ds029635.mlab.com:29635/othesoluciones1'
MONGODB_URI =cadenaCon
conexion = Connection(MONGODB_URI)
db = conexion.othesoluciones1
import pandas as pd
columnas =['Mes','Nivel']
datos = [(1,0),(2,0),(3,1),(4,2),(5,2),(6,2),(7,1),(8,0),(9,0),(10,0),(11,0),(12,0)]
df=pd.DataFrame(datos,columns=columnas)
recordsdf = json.loads(df.T.to_json()).values()
db.calendarioPolen.insert_many(recordsdf)
conexion.close() | true | true |
1c498e29631e47183b4ba18522e66da6b0b16e5d | 1,139 | py | Python | build.py | db4/conan-opencv | c624fdc34452ba0206f9862c16c8e5b52f1738f9 | [
"MIT"
] | null | null | null | build.py | db4/conan-opencv | c624fdc34452ba0206f9862c16c8e5b52f1738f9 | [
"MIT"
] | null | null | null | build.py | db4/conan-opencv | c624fdc34452ba0206f9862c16c8e5b52f1738f9 | [
"MIT"
] | 1 | 2018-09-14T10:18:35.000Z | 2018-09-14T10:18:35.000Z | from conan.packager import ConanMultiPackager
import os
available_versions = ["3.1.0", "3.4.0"]
def main():
"""
Main function.
"""
builder = ConanMultiPackager(build_policy="outdated")
if "CONAN_REFERENCE" in os.environ:
builder.add_common_builds(shared_option_name="OpenCV:shared", pure_c=False)
else:
for version in available_versions:
builder.add_common_builds(shared_option_name="OpenCV:shared", pure_c=False, reference="OpenCV/%s@%s/%s" %
(version, os.environ["CONAN_USERNAME"], os.environ["CONAN_CHANNEL"]))
filtered_builds = []
for settings, options, env_vars, build_requires in builder.builds:
with_ipp_tbb_list = [True] if options['OpenCV:shared'] else [True, False]
for with_ipp_tbb in with_ipp_tbb_list:
opts = dict(options)
opts['OpenCV:with_ipp'] = with_ipp_tbb
opts['OpenCV:with_tbb'] = with_ipp_tbb
filtered_builds.append([settings, opts, env_vars, build_requires])
builder.builds = filtered_builds
builder.run()
if __name__ == "__main__":
main()
| 35.59375 | 117 | 0.656716 | from conan.packager import ConanMultiPackager
import os
available_versions = ["3.1.0", "3.4.0"]
def main():
builder = ConanMultiPackager(build_policy="outdated")
if "CONAN_REFERENCE" in os.environ:
builder.add_common_builds(shared_option_name="OpenCV:shared", pure_c=False)
else:
for version in available_versions:
builder.add_common_builds(shared_option_name="OpenCV:shared", pure_c=False, reference="OpenCV/%s@%s/%s" %
(version, os.environ["CONAN_USERNAME"], os.environ["CONAN_CHANNEL"]))
filtered_builds = []
for settings, options, env_vars, build_requires in builder.builds:
with_ipp_tbb_list = [True] if options['OpenCV:shared'] else [True, False]
for with_ipp_tbb in with_ipp_tbb_list:
opts = dict(options)
opts['OpenCV:with_ipp'] = with_ipp_tbb
opts['OpenCV:with_tbb'] = with_ipp_tbb
filtered_builds.append([settings, opts, env_vars, build_requires])
builder.builds = filtered_builds
builder.run()
if __name__ == "__main__":
main()
| true | true |
1c498e2f7add5e8cba6aa3ffe578938129f969b3 | 507 | py | Python | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/cli.py | romnn/cookiecutter-pypackage | 469228de74a6cd0a8065270ff7c930d016e2f045 | [
"BSD-3-Clause"
] | 1 | 2021-01-30T04:10:24.000Z | 2021-01-30T04:10:24.000Z | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/cli.py | romnnn/cookiecutter-pypackage | 469228de74a6cd0a8065270ff7c930d016e2f045 | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/cli.py | romnnn/cookiecutter-pypackage | 469228de74a6cd0a8065270ff7c930d016e2f045 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Console script for {{cookiecutter.project_slug}}."""
import sys
import typing
import click
@click.command()
def main(args: typing.Optional[str] = None) -> int:
"""Console script for {{cookiecutter.project_slug}}."""
click.echo("Replace this message by putting your code into {{cookiecutter.project_slug}}.cli.main")
click.echo("See click documentation at http://click.pocoo.org/")
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| 25.35 | 103 | 0.682446 |
import sys
import typing
import click
@click.command()
def main(args: typing.Optional[str] = None) -> int:
click.echo("Replace this message by putting your code into {{cookiecutter.project_slug}}.cli.main")
click.echo("See click documentation at http://click.pocoo.org/")
return 0
if __name__ == "__main__":
sys.exit(main()) | true | true |
1c49903e1f2ffea5052f9e556c6d9cd76d45ad77 | 80,689 | py | Python | selfdrive/car/toyota/values.py | mohammedx49/ArnePilot01 | 81af1abadc9a9d572919bafeeb698f2a989d363b | [
"MIT"
] | null | null | null | selfdrive/car/toyota/values.py | mohammedx49/ArnePilot01 | 81af1abadc9a9d572919bafeeb698f2a989d363b | [
"MIT"
] | null | null | null | selfdrive/car/toyota/values.py | mohammedx49/ArnePilot01 | 81af1abadc9a9d572919bafeeb698f2a989d363b | [
"MIT"
] | null | null | null | # flake8: noqa
from selfdrive.car import dbc_dict
from cereal import car
Ecu = car.CarParams.Ecu
# Steer torque limits
class SteerLimitParams:
STEER_MAX = 1500
STEER_DELTA_UP = 10 # 1.5s time to peak torque
STEER_DELTA_DOWN = 44 # always lower than 45 otherwise the Rav4 faults (Prius seems ok with 50)
STEER_ERROR_MAX = 350 # max delta between torque cmd and torque motor
class CAR:
PRIUS = "TOYOTA PRIUS 2017"
PRIUS_2019 = "TOYOTA PRIUS 2019"
RAV4H = "TOYOTA RAV4 HYBRID 2017"
RAV4 = "TOYOTA RAV4 2017"
COROLLA = "TOYOTA COROLLA 2017"
COROLLA_2015 = "TOYOTA COROLLA 2015"
LEXUS_RX = "LEXUS RX 350 2017"
LEXUS_RXH = "LEXUS RX HYBRID 2017"
LEXUS_RX_TSS2 = "LEXUS RX350 2020"
LEXUS_RXH_TSS2 = "LEXUS RX450 HYBRID 2020"
CHR = "TOYOTA C-HR 2018"
CHRH = "TOYOTA C-HR HYBRID 2018"
CAMRY = "TOYOTA CAMRY 2018"
CAMRYH = "TOYOTA CAMRY HYBRID 2018"
HIGHLANDER = "TOYOTA HIGHLANDER 2017"
HIGHLANDER_TSS2 = "TOYOTA HIGHLANDER 2020"
HIGHLANDERH = "TOYOTA HIGHLANDER HYBRID 2018"
HIGHLANDERH_TSS2 = "TOYOTA HIGHLANDER HYBRID 2020"
AVALON = "TOYOTA AVALON 2016"
RAV4_TSS2 = "TOYOTA RAV4 2019"
COROLLA_TSS2 = "TOYOTA COROLLA TSS2 2019"
COROLLAH_TSS2 = "TOYOTA COROLLA HYBRID TSS2 2019"
LEXUS_ES_TSS2 = "LEXUS ES 2019"
LEXUS_ESH_TSS2 = "LEXUS ES 300H 2019"
SIENNA = "TOYOTA SIENNA XLE 2018"
LEXUS_IS = "LEXUS IS300 2018"
LEXUS_CTH = "LEXUS CT 200H 2018"
RAV4H_TSS2 = "TOYOTA RAV4 HYBRID 2019"
LEXUS_ISH = "LEXUS IS HYBRID 2017"
LEXUS_NXH = "LEXUS NX300H 2018"
LEXUS_UXH_TSS2 = "LEXUS UX 250H 2019"
class ECU:
CAM = Ecu.fwdCamera # camera
DSU = Ecu.dsu # driving support unit
APGS = Ecu.apgs # advanced parking guidance system
SMART = Ecu.unknown
# addr: (ecu, cars, bus, 1/freq*100, vl)
STATIC_MSGS = [
(0x130, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 1, 100, b'\x00\x00\x00\x00\x00\x00\x38'),
(0x240, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 1, 5, b'\x00\x10\x01\x00\x10\x01\x00'),
(0x241, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 1, 5, b'\x00\x10\x01\x00\x10\x01\x00'),
(0x244, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 1, 5, b'\x00\x10\x01\x00\x10\x01\x00'),
(0x245, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 1, 5, b'\x00\x10\x01\x00\x10\x01\x00'),
(0x248, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 1, 5, b'\x00\x00\x00\x00\x00\x00\x01'),
(0x367, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 0, 40, b'\x06\x00'),
(0x414, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 0, 100, b'\x00\x00\x00\x00\x00\x00\x17\x00'),
(0x466, Ecu.fwdCamera, (CAR.COROLLA, CAR.COROLLA_2015), 1, 100, b'\x24\x20\xB1'),
(0x489, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 0, 100, b'\x00\x00\x00\x00\x00\x00\x00'),
(0x48a, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 0, 100, b'\x00\x00\x00\x00\x00\x00\x00'),
(0x48b, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 0, 100, b'\x66\x06\x08\x0a\x02\x00\x00\x00'),
(0x4d3, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015), 0, 100, b'\x1C\x00\x00\x01\x00\x00\x00\x00'),
(0x128, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.AVALON), 1, 3, b'\xf4\x01\x90\x83\x00\x37'),
(0x128, Ecu.dsu, (CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.SIENNA, CAR.LEXUS_CTH), 1, 3, b'\x03\x00\x20\x00\x00\x52'),
(0x141, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 1, 2, b'\x00\x00\x00\x46'),
(0x160, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 1, 7, b'\x00\x00\x08\x12\x01\x31\x9c\x51'),
(0x161, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.AVALON, CAR.LEXUS_RX), 1, 7, b'\x00\x1e\x00\x00\x00\x80\x07'),
(0X161, Ecu.dsu, (CAR.HIGHLANDERH, CAR.HIGHLANDER, CAR.SIENNA, CAR.LEXUS_CTH), 1, 7, b'\x00\x1e\x00\xd4\x00\x00\x5b'),
(0x283, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 0, 3, b'\x00\x00\x00\x00\x00\x00\x8c'),
(0x2E6, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH), 0, 3, b'\xff\xf8\x00\x08\x7f\xe0\x00\x4e'),
(0x2E7, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH), 0, 3, b'\xa8\x9c\x31\x9c\x00\x00\x00\x02'),
(0x33E, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH), 0, 20, b'\x0f\xff\x26\x40\x00\x1f\x00'),
(0x2E6, Ecu.unknown, (CAR.RAV4H, CAR.LEXUS_RXH), 0, 3, b'\xff\xf8\x00\x08\x7f\xe0\x00\x4e'),
(0x2E7, Ecu.unknown, (CAR.RAV4H, CAR.LEXUS_RXH), 0, 3, b'\xa8\x9c\x31\x9c\x00\x00\x00\x02'),
(0x33E, Ecu.unknown, (CAR.RAV4H, CAR.LEXUS_RXH), 0, 20, b'\x0f\xff\x26\x40\x00\x1f\x00'),
(0x344, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 0, 5, b'\x00\x00\x01\x00\x00\x00\x00\x50'),
(0x365, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.HIGHLANDERH), 0, 20, b'\x00\x00\x00\x80\x03\x00\x08'),
(0x365, Ecu.dsu, (CAR.RAV4, CAR.RAV4H, CAR.COROLLA, CAR.HIGHLANDER, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 0, 20, b'\x00\x00\x00\x80\xfc\x00\x08'),
(0x366, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.HIGHLANDERH), 0, 20, b'\x00\x00\x4d\x82\x40\x02\x00'),
(0x366, Ecu.dsu, (CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 0, 20, b'\x00\x72\x07\xff\x09\xfe\x00'),
(0x470, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.LEXUS_RXH), 1, 100, b'\x00\x00\x02\x7a'),
(0x470, Ecu.dsu, (CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.RAV4H, CAR.SIENNA, CAR.LEXUS_CTH), 1, 100, b'\x00\x00\x01\x79'),
(0x4CB, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDERH, CAR.HIGHLANDER, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 0, 100, b'\x0c\x00\x00\x00\x00\x00\x00\x00'),
]
ECU_FINGERPRINT = {
Ecu.fwdCamera: [0x2e4], # steer torque cmd
Ecu.dsu: [0x283], # accel cmd
}
FINGERPRINTS = {
CAR.RAV4: [
{36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 512: 6, 513: 6, 547: 8, 548: 8, 552: 4, 562: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 767:4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 4, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1008: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1596: 8, 1597: 8, 1600: 8, 1656: 8, 1664: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2015: 8, 2024: 8}
],
CAR.RAV4H: [
{36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 218: 8, 296: 8, 355: 5, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 512: 6, 513: 6, 515: 3, 547: 8, 548: 8, 550: 8, 552: 4, 560: 7, 562: 4, 581: 5, 608: 8, 610: 5, 643: 7, 705: 8, 713: 8, 725: 2, 740: 5, 742: 8, 743: 8, 767: 4, 800: 8, 830: 7, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 922: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 3, 955: 8, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1008: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1207: 8, 1212: 8, 1227: 8, 1228: 8, 1232: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1596: 8, 1597: 8, 1600: 8, 1656: 8, 1664: 8, 1728: 8, 1745: 8, 1779: 8, 1792: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1953: 8, 1961: 8, 1968: 8, 1976: 8, 1984: 8, 1990: 8, 1992: 8, 1998: 8, 2016: 8, 2018: 8, 2019: 8, 2022: 8, 2024: 8, 2026: 8}
],
CAR.PRIUS: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 512: 6, 513: 6, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 764: 8, 767: 4, 800: 8, 810: 2, 814: 8, 824: 2, 825: 8, 829: 2, 830: 7, 835: 8, 836: 8, 845: 5, 861: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 875: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 974: 8, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1005: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1083: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1130: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1175: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1595: 8, 1649: 8, 1681: 8, 1767:4, 1777: 8, 1779: 8, 1792: 8, 1840: 8, 1863:8, 1872: 8, 1904: 8, 1912: 8, 1941: 8, 1949: 8, 1952: 8, 1960: 8, 1984: 8, 1988: 8, 1990: 8, 1992: 8, 1996:8, 1998: 8, 2004: 8, 2010: 8, 2012: 8, 2015: 8, 2016: 8, 2018: 8, 2024: 8, 2026: 8, 2027: 8, 2029: 8, 2030: 8, 2031: 8}
],
CAR.PRIUS_2019: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 764: 8, 767: 4, 800: 8, 810: 2, 814: 8, 818: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 845: 5, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 889: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 974: 8, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1227: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1595: 8, 1649: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2002: 8, 2010: 8}
],
CAR.COROLLA: [
{36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 512: 6, 513: 6, 547: 8, 548: 8, 552: 4, 608: 8, 610: 5, 643: 7, 705: 8, 740: 5, 767:4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 2, 921: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 4, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1196: 8, 1227: 8, 1235: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1596: 8, 1597: 8, 1600: 8, 1664: 8, 1728: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2016: 8, 2017: 8, 2018: 8, 2019: 8, 2020: 8, 2021: 8, 2022: 8, 2023: 8, 2024: 8}
],
CAR.COROLLA_2015: [
{32: 4, 36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 456: 8, 464: 8, 466: 8, 467: 8, 513: 6, 547: 8, 548: 8, 552: 4, 608: 8, 610: 5, 611: 7, 705: 8, 800: 8, 849: 4, 852: 1, 865: 8, 896: 8, 897: 8, 898: 8, 899: 8, 900: 6, 902: 6, 903: 8, 905: 8, 906: 5, 910: 8, 911: 8, 916: 2, 921: 8, 928: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 4, 956: 8, 976: 1, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1017: 8, 1024: 8, 1043: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1078: 8, 1079: 8, 1088: 8, 1090: 8, 1091: 8, 1196: 8, 1217: 8, 1219: 8, 1222: 8, 1224: 8, 1244: 8, 1245: 8, 1279: 8, 1552: 8, 1553: 8, 1555: 8, 1556: 8, 1557: 8, 1560: 8, 1561: 8, 1562: 8, 1564: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1574: 8, 1592: 8, 1596: 8, 1597: 8, 1600: 8, 1664: 8, 1761: 8, 1762: 8}
],
CAR.LEXUS_RX: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 5, 643: 7, 658: 8, 705: 8, 740: 5, 742: 8, 743: 8, 767: 4, 800: 8, 810: 2, 812: 3, 814: 8, 818: 8, 819: 8, 820: 8, 821: 8, 822: 8, 830: 7, 835: 8, 836: 8, 845: 5, 869: 7, 870: 7, 871: 2, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1063: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1349: 8, 1350: 8, 1351: 8, 1413: 8, 1414: 8, 1415: 8, 1416: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1595: 8, 1777: 8, 1779: 8, 1792: 8, 1800: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1945: 8, 1953: 8, 1961: 8, 1968: 8, 1976: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.LEXUS_RXH: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 512: 6, 513: 6, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 5, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 744: 8, 767: 4, 800: 8, 810: 2, 812: 3, 814: 8, 818: 8, 819: 8, 820: 8, 821: 8, 822: 8, 830: 7, 835: 8, 836: 8, 845: 5, 863: 8, 869: 7, 869: 7, 870: 7, 871: 2, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 6, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1059: 1, 1063: 8, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1349: 8, 1350: 8, 1351: 8, 1413: 8, 1414: 8, 1415: 8, 1416: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1592: 8, 1594: 8, 1595: 8, 1745: 8, 1777: 8, 1779: 8, 1808: 8, 1810: 8, 1816: 8, 1818: 8, 1840: 8, 1848: 8, 1904: 8, 1912: 8, 1940: 8, 1941: 8, 1948: 8, 1949: 8, 1952: 8, 1956: 8, 1960: 8, 1964: 8, 1986: 8, 1990: 8, 1994: 8, 1998: 8, 2004: 8, 2012: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.LEXUS_RX_TSS2: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 401: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 740: 5, 742: 8, 743: 8, 764: 8, 765: 8, 767: 4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 824: 8, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 987: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1063: 8, 1076: 8, 1077: 8,1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1592: 8, 1594:8, 1595: 8, 1600: 8, 1649: 8, 1775: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1792: 8, 1800: 8, 1808: 8, 1816: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1945: 8, 1953: 8, 1961: 8, 1968: 8, 1976: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.CHR: [
{36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 705: 8, 740: 5, 767:4, 800: 8, 810: 2, 812: 8, 814: 8, 830: 7, 835: 8, 836: 8, 845: 5, 869: 7, 870: 7, 871: 2, 898: 8, 913: 8, 918: 8, 921: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 1014: 8, 1017: 8, 1020: 8, 1021: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1082: 8, 1083: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1175: 8, 1228: 8, 1235: 8, 1237: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1745: 8, 1779: 8}
],
CAR.CHRH: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 767:4, 800: 8, 810: 2, 812: 8, 814: 8, 829: 2, 830: 7, 835: 8, 836: 8, 845: 5, 869: 7, 870: 7, 871: 2, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1014: 8, 1017: 8, 1020: 8, 1021: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1083: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1175: 8, 1228: 8, 1235: 8, 1237: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2015: 8, 2024: 8, 2026: 8, 2030: 8}
],
CAR.CAMRY: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 513: 6, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 728: 8, 740: 5, 761: 8, 764: 8, 767: 4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 888: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 942: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 983: 8, 984: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1011: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1412: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1745: 8, 1767: 4, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1792: 8, 1808: 8, 1816: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1945: 8, 1953: 8, 1956: 8, 1961: 8, 1964: 8, 1968: 8, 1976: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.CAMRYH: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 658: 8, 713: 8, 728: 8, 740: 5, 761: 8, 764: 8, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 888: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 942: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 983: 8, 984: 8, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1011: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1412: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1745: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1808: 8, 1810: 8, 1816: 8, 1818: 8, 1872: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.HIGHLANDER: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 355: 5, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 545: 5, 550: 8, 552: 4, 562: 6, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 845: 5, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 900: 6, 902: 6, 905: 8, 911: 8, 913: 8, 916: 3, 918: 7, 921: 8, 922: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1008: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1200: 8, 1201: 8, 1202: 8, 1203: 8, 1206: 8, 1207: 8, 1212: 8, 1227: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1585: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1984: 8, 1988: 8, 1990: 8, 1992: 8, 1996: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.HIGHLANDER_TSS2: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 355: 5, 401: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 565: 8, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 728: 8, 740: 5, 761: 8, 764: 8, 765: 8, 767:4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 824: 8, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 885: 8, 889: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 987: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1063: 8, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1696: 8, 1775: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1808: 8, 1816: 8, 1904: 8, 1912: 8, 1952: 8, 1960: 8, 1990: 8, 1998: 8}
],
CAR.HIGHLANDERH: [
{36: 8, 37: 8, 170: 8, 180: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 581: 5, 608: 8, 610: 5, 643: 7, 713: 8, 740: 5, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 3, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1076: 8, 1077: 8, 1112: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1212: 8, 1227: 8, 1232: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1552: 8, 1553: 8, 1554: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.HIGHLANDERH_TSS2: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 658: 8, 713: 8, 728: 8, 740: 5, 761: 8, 764: 8, 765: 8, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 885: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 942: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 987: 8, 993: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1063: 8, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1263: 8, 1264: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1696: 8, 1745: 8, 1775: 8, 1779: 8}
],
CAR.AVALON: [
{36: 8, 37: 8, 170: 8, 180: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 547: 8, 550: 8, 552: 4, 562: 6, 608: 8, 610: 5, 643: 7, 705: 8, 740: 5, 767:4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 905: 8, 911: 1, 916: 2, 921: 8, 933: 6, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 1005: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1200: 8, 1201: 8, 1202: 8, 1203: 8, 1206: 8, 1227: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1558: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1596: 8, 1597: 8, 1664: 8, 1728: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.RAV4_TSS2: [
{36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 355: 5, 401: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 565: 8, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 728: 8, 740: 5, 742: 8, 743: 8, 761: 8, 764: 8, 765: 8, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 882: 8, 885: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 987: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1063: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1600: 8, 1649: 8, 1696: 8, 1745: 8, 1775: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1872: 8, 1880:8 , 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.COROLLA_TSS2: [
{36: 8, 37: 8, 114: 5, 170: 8, 180: 8, 186: 4, 401: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 643: 7, 705: 8, 728: 8, 740: 5, 742: 8, 743: 8, 761: 8, 764: 8, 765: 8, 767:4, 800: 8, 810: 2, 812: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1235: 8, 1237: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1595: 8, 1649: 8, 1745: 8, 1775: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1808: 8, 1809: 8, 1816: 8, 1817: 8, 1840: 8, 1848: 8, 1904: 8, 1912: 8, 1940: 8, 1941: 8, 1948: 8, 1949: 8, 1952: 8, 1960: 8, 1981: 8, 1986: 8, 1990: 8, 1994: 8, 1998: 8, 2004: 8}
],
CAR.COROLLAH_TSS2: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 713: 8, 728: 8, 740: 5, 742: 8, 743: 8, 761: 8, 764: 8, 765: 8, 767: 4, 800: 8, 810: 2, 812: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 885: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 7, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 987: 8, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1112: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1235: 8, 1237: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1600: 8, 1649: 8, 1745: 8, 1775: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.LEXUS_ES_TSS2: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 401: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 728: 8, 740: 5, 761: 8, 764: 8, 765: 8, 767:4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 824: 8, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 882: 8, 885: 8, 889: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 987: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1696: 8, 1775: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.LEXUS_ESH_TSS2: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 658: 8, 713: 8, 728: 8, 740: 5, 742: 8, 743: 8, 744: 8, 761: 8, 764: 8, 765: 8, 767:4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 882: 8, 885: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 942: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 983: 8, 984: 8, 987: 8, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1696: 8, 1775: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.SIENNA: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 545: 5, 548: 8, 550: 8, 552: 4, 562: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 764: 8, 800: 8, 824: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 888: 8, 896: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 1, 918: 7, 921: 8, 933: 8, 944: 6, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1008: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1114: 8, 1160: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1200: 8, 1201: 8, 1202: 8, 1203: 8, 1212: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1279: 8, 1552: 8, 1553: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1656: 8, 1664: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.LEXUS_IS: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 400: 6, 426: 6, 452: 8, 464: 8, 466: 8, 467: 5, 544: 4, 550: 8, 552: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 738: 2, 740: 5, 744: 8, 800: 8, 815: 8, 836: 8, 845: 5, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 913: 8, 914: 2, 916: 3, 917: 5, 918: 7, 921: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1008: 2, 1009: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1112: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1168: 1, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1184: 8, 1185: 8, 1186: 8, 1187: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1193: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1200: 8, 1201: 8, 1202: 8, 1203: 8, 1206: 8, 1208: 8, 1212: 8, 1220: 8, 1226: 8, 1227: 8, 1235: 8, 1237: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1584: 8, 1589: 8, 1590: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1648: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.LEXUS_ISH: [
{36: 8, 37: 8, 170: 8, 180: 8, 295: 8, 296: 8, 400: 6, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 581: 5, 608: 8, 610: 5, 643: 7, 713: 8, 740: 5, 800: 8, 836: 8, 845: 5, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 913: 8, 916: 3, 918: 7, 921: 7, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 3, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1009: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1112: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1168: 1, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1187: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1208: 8, 1212: 8, 1227: 8, 1232: 8, 1235: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1728: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.LEXUS_CTH: [
{36: 8, 37: 8, 170: 8, 180: 8, 288: 8, 426: 6, 452: 8, 466: 8, 467: 8, 548: 8, 552: 4, 560: 7, 581: 5, 608: 8, 610: 5, 643: 7, 713: 8, 740: 5, 800: 8, 810: 2, 832: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 1, 921: 8, 933: 8, 944: 6, 945: 8, 950: 8, 951: 8, 953: 3, 955: 4, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1056: 8, 1057: 8, 1059: 1, 1076: 8, 1077: 8, 1114: 8, 1116: 8, 1160: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1190: 8, 1191: 8, 1192: 8, 1227: 8, 1235: 8, 1279: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1558: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1664: 8, 1728: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.RAV4H_TSS2: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 658: 8, 713: 8, 728: 8, 740: 5, 742: 8, 743: 8, 761: 8, 764: 8, 765: 8, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 882: 8, 885: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 987: 8, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1063: 8, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1696: 8, 1745: 8, 1775: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1792: 8, 1800: 8, 1808: 8, 1810: 8, 1816: 8, 1818: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1945: 8, 1952: 8, 1953: 8, 1961: 8, 1968: 8, 1976: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.LEXUS_NXH: [
{36: 8, 37: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 5, 643: 7, 713: 8, 740: 5, 742: 8, 743: 8, 764: 8, 800: 8, 810: 2, 812: 3, 818: 8, 822: 8, 824: 8, 835: 8, 836: 8, 845: 5, 849: 4, 869: 7, 870: 7, 871: 2, 889: 8, 891: 8, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 913: 8, 916: 3, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 3, 955: 8, 956: 8, 979: 2, 987: 8, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1006: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1056: 8, 1057: 8, 1059: 1, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1168: 1, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1195: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1208: 8, 1212: 8, 1227: 8, 1228: 8, 1232: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1728: 8, 1745: 8, 1777: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.LEXUS_UXH_TSS2: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 658: 8, 713: 8, 728: 8, 740: 5, 742: 8, 743: 8, 761: 8, 764: 8, 765: 8, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 882: 8, 885: 8, 889: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 942: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 987: 8, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1063: 8, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1775: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1792: 8, 1800: 8, 1808: 8, 1810: 8, 1813: 8, 1814: 8, 1816: 8, 1818: 8, 1821: 8, 1822: 8, 1840: 8, 1848: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1940: 8, 1941: 8, 1945: 8, 1948: 8, 1949: 8, 1952: 8, 1953: 8, 1956: 8, 1960: 8, 1961: 8, 1964: 8, 1968: 8, 1976: 8, 1986: 8, 1990: 8, 1994: 8, 1998: 8, 2004: 8, 2012: 8, 2015: 8, 2016: 8, 2024: 8}
],
}
# Don't use theses fingerprints for fingerprinting, they are still needed for ECU detection
IGNORED_FINGERPRINTS = [CAR.LEXUS_RXH_TSS2]
FW_VERSIONS = {
CAR.AVALON: {
(Ecu.esp, 0x7b0, None): [b'F152607060\x00\x00\x00\x00\x00\x00'],
(Ecu.dsu, 0x791, None): [
b'881510705200\x00\x00\x00\x00',
b'881510701300\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [b'8965B41051\x00\x00\x00\x00\x00\x00'],
(Ecu.engine, 0x7e0, None): [
b'\x0230721100\x00\x00\x00\x00\x00\x00\x00\x00A0C01000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230721200\x00\x00\x00\x00\x00\x00\x00\x00A0C01000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0701100\x00\x00\x00\x00',
b'8646F0703000\x00\x00\x00\x00',
],
},
CAR.CAMRY: {
(Ecu.engine, 0x700, None): [
b'\x018966306L3100\x00\x00\x00\x00',
b'\x018966306L4200\x00\x00\x00\x00',
b'\x018966306L5200\x00\x00\x00\x00',
b'\x018966306Q3100\x00\x00\x00\x00',
b'\x018966306Q4000\x00\x00\x00\x00',
b'\x018966306Q4100\x00\x00\x00\x00',
b'\x018966333P3100\x00\x00\x00\x00',
b'\x018966333P3200\x00\x00\x00\x00',
b'\x018966333P4200\x00\x00\x00\x00',
b'\x018966333P4300\x00\x00\x00\x00',
b'\x018966333P4400\x00\x00\x00\x00',
b'\x018966333P4500\x00\x00\x00\x00',
b'\x018966333P4700\x00\x00\x00\x00',
b'\x018966333Q6000\x00\x00\x00\x00',
b'\x018966333Q6200\x00\x00\x00\x00',
b'\x018966306Q4100\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0603300 ',
b'8821F0607200 ',
b'8821F0608000 ',
],
(Ecu.esp, 0x7b0, None): [
b'F152606210\x00\x00\x00\x00\x00\x00',
b'F152606230\x00\x00\x00\x00\x00\x00',
b'F152606290\x00\x00\x00\x00\x00\x00',
b'F152633540\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33540\x00\x00\x00\x00\x00\x00',
b'8965B33542\x00\x00\x00\x00\x00\x00',
b'8965B33580\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [ # Same as 0x791
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0603300 ',
b'8821F0607200 ',
b'8821F0608000 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0601200 ',
b'8646F0601300 ',
b'8646F0603400 ',
b'8646F0605000 ',
b'8646F0606000 ',
],
},
CAR.CAMRYH: {
(Ecu.engine, 0x700, None): [
b'\x018966333N4300\x00\x00\x00\x00',
b'\x018966333X0000\x00\x00\x00\x00',
b'\x028966306B2100\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306B2300\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8100\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8200\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8300\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8400\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306R5000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306R5000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306R6000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306R6000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306S0000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306S0100\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306S1100\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152633214\x00\x00\x00\x00\x00\x00',
b'F152633660\x00\x00\x00\x00\x00\x00',
b'F152633712\x00\x00\x00\x00\x00\x00',
b'F152633713\x00\x00\x00\x00\x00\x00',
b'F152633B51\x00\x00\x00\x00\x00\x00',
b'F152633B60\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0603400 ',
b'8821F0604200 ',
b'8821F0606200 ',
b'8821F0607200 ',
b'8821F0608000 ',
b'8821F0609000 ',
b'8821F0609100 ',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33540\x00\x00\x00\x00\x00\x00',
b'8965B33542\x00\x00\x00\x00\x00\x00',
b'8965B33550\x00\x00\x00\x00\x00\x00',
b'8965B33551\x00\x00\x00\x00\x00\x00',
b'8965B33580\x00\x00\x00\x00\x00\x00',
b'8965B33581\x00\x00\x00\x00\x00\x00',
b'8965B33611\x00\x00\x00\x00\x00\x00',
b'8965B33621\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [ # Same as 0x791
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0603400 ',
b'8821F0604200 ',
b'8821F0606200 ',
b'8821F0607200 ',
b'8821F0608000 ',
b'8821F0609000 ',
b'8821F0609100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0601200 ',
b'8646F0601300 ',
b'8646F0601400 ',
b'8646F0603500 ',
b'8646F0604100 ',
b'8646F0605000 ',
b'8646F0606000 ',
b'8646F0606100 ',
b'8646F0607000 ',
b'8646F0607100 ',
],
},
CAR.CHR: {
(Ecu.engine, 0x700, None): [
b'\x01896631017100\x00\x00\x00\x00',
b'\x01896631017200\x00\x00\x00\x00',
b'\x0189663F413100\x00\x00\x00\x00',
b'\x0189663F414100\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0W01000 ',
b'8821FF401600 ',
b'8821FF404100 ',
b'8821FF405100 ',
b'8821FF406000 ',
b'8821F0W01100 ',
],
(Ecu.esp, 0x7b0, None): [
b'F152610020\x00\x00\x00\x00\x00\x00',
b'F152610153\x00\x00\x00\x00\x00\x00',
b'F1526F4034\x00\x00\x00\x00\x00\x00',
b'F1526F4044\x00\x00\x00\x00\x00\x00',
b'F1526F4073\x00\x00\x00\x00\x00\x00',
b'F1526F4122\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B10011\x00\x00\x00\x00\x00\x00',
b'8965B10040\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x033F401100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203102\x00\x00\x00\x00',
b'\x033F424000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
b'\x0331024000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F0W01000 ',
b'8821FF401600 ',
b'8821FF404100 ',
b'8821FF405100 ',
b'8821FF406000 ',
b'8821F0W01100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646FF401800 ',
b'8646FF404000 ',
b'8646FF406000 ',
],
},
CAR.CHRH: {
(Ecu.engine, 0x700, None): [
b'\x0289663F423000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0289663F431000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0189663F438000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152610040\x00\x00\x00\x00\x00\x00',
b'F152610190\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821FF404000 ',
b'8821FF407100 ',
],
(Ecu.eps, 0x7a1, None): [
b'8965B10040\x00\x00\x00\x00\x00\x00',
b'8965B10050\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821FF404000 ',
b'8821FF407100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646FF404000 ',
b'8646FF407000 ',
],
},
CAR.COROLLA: {
(Ecu.engine, 0x7e0, None): [
b'\x01896630E88000\x00\x00\x00\x00',
b'\x0230ZC2000\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC2100\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC2200\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC2300\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3000\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3200\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3300\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0330ZC1200\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510201100\x00\x00\x00\x00',
b'881510201200\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152602190\x00\x00\x00\x00\x00\x00',
b'F152602191\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B02181\x00\x00\x00\x00\x00\x00',
b'8965B02191\x00\x00\x00\x00\x00\x00',
b'8965B48150\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0201101\x00\x00\x00\x00',
b'8646F0201200\x00\x00\x00\x00',
b'8646F0E01300\x00\x00\x00\x00',
],
},
CAR.COROLLA_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630ZG5000\x00\x00\x00\x00',
b'\x01896630ZG5100\x00\x00\x00\x00',
b'\x01896630ZG5200\x00\x00\x00\x00',
b'\x01896630ZG5300\x00\x00\x00\x00',
b'\x01896630ZQ5000\x00\x00\x00\x00',
b'\x018966312L8000\x00\x00\x00\x00',
b'\x018966312P9000\x00\x00\x00\x00',
b'\x018966312P9100\x00\x00\x00\x00',
b'\x018966312P9200\x00\x00\x00\x00',
b'\x018966312R0100\x00\x00\x00\x00',
b'\x018966312R1000\x00\x00\x00\x00',
b'\x018966312R1100\x00\x00\x00\x00',
b'\x018966312R3100\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x03312N6000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
b'\x03312N6000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
b'\x03312N6100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
b'\x03312N6100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203402\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B12361\x00\x00\x00\x00\x00\x00',
b'\x018965B12350\x00\x00\x00\x00\x00\x00',
b'\x018965B12500\x00\x00\x00\x00\x00\x00',
b'\x018965B12520\x00\x00\x00\x00\x00\x00',
b'\x018965B12530\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152602191\x00\x00\x00\x00\x00\x00',
b'\x01F152602280\x00\x00\x00\x00\x00\x00',
b'\x01F152602560\x00\x00\x00\x00\x00\x00',
b'\x01F152612641\x00\x00\x00\x00\x00\x00',
b'\x01F152612651\x00\x00\x00\x00\x00\x00',
b'\x01F152612B10\x00\x00\x00\x00\x00\x00',
b'\x01F152612B60\x00\x00\x00\x00\x00\x00',
b'\x01F152612B61\x00\x00\x00\x00\x00\x00',
b'\x01F152612B90\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F12010D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201100\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1202000\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F1202100\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.COROLLAH_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630ZJ1000\x00\x00\x00\x00',
b'\x018966342M5000\x00\x00\x00\x00',
b'\x02896630ZQ3000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896630ZR2000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966312Q4000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x038966312L7000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF1205001\x00\x00\x00\x00',
b'\x038966312N1000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
b'\x038966312L7000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF1205001\x00\x00\x00\x00',
b'\x02896630ZN8000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B12361\x00\x00\x00\x00\x00\x00',
b'8965B12451\x00\x00\x00\x00\x00\x00',
b'\x018965B12350\x00\x00\x00\x00\x00\x00',
b'\x018965B12470\x00\x00\x00\x00\x00\x00',
b'\x018965B12500\x00\x00\x00\x00\x00\x00',
b'\x018965B12530\x00\x00\x00\x00\x00\x00',
b'\x018965B12490\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152612A00\x00\x00\x00\x00\x00\x00',
b'F152612590\x00\x00\x00\x00\x00\x00',
b'F152612691\x00\x00\x00\x00\x00\x00',
b'F152612692\x00\x00\x00\x00\x00\x00',
b'F152612700\x00\x00\x00\x00\x00\x00',
b'F152612800\x00\x00\x00\x00\x00\x00',
b'F152612840\x00\x00\x00\x00\x00\x00',
b'F152612A10\x00\x00\x00\x00\x00\x00',
b'F152642540\x00\x00\x00\x00\x00\x00',
b'F152612820\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F1201100\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201300\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F1202000\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203400\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F1201300\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.HIGHLANDER: {
(Ecu.engine, 0x700, None): [
b'\x01896630E09000\x00\x00\x00\x00',
b'\x01896630E43100\x00\x00\x00\x00',
b'\x01896630E43200\x00\x00\x00\x00',
b'\x01896630E44200\x00\x00\x00\x00',
b'\x01896630E45000\x00\x00\x00\x00',
b'\x01896630E45100\x00\x00\x00\x00',
b'\x01896630E45200\x00\x00\x00\x00',
b'\x01896630E74000\x00\x00\x00\x00',
b'\x01896630E76000\x00\x00\x00\x00',
b'\x01896630E83000\x00\x00\x00\x00',
b'\x01896630E84000\x00\x00\x00\x00',
b'\x01896630E85000\x00\x00\x00\x00',
b'\x01896630E88000\x00\x00\x00\x00',
b'\x01896630E09000\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B48140\x00\x00\x00\x00\x00\x00',
b'8965B48150\x00\x00\x00\x00\x00\x00',
b'8965B48210\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [b'F15260E011\x00\x00\x00\x00\x00\x00'],
(Ecu.dsu, 0x791, None): [
b'881510E01100\x00\x00\x00\x00',
b'881510E01200\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0E01200\x00\x00\x00\x00',
b'8646F0E01300\x00\x00\x00\x00',
],
},
CAR.HIGHLANDERH: {
(Ecu.eps, 0x7a1, None): [
b'8965B48160\x00\x00\x00\x00\x00\x00'
],
(Ecu.esp, 0x7b0, None): [
b'F152648541\x00\x00\x00\x00\x00\x00',
b'F152648542\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0230E40000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230EA2000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0E01200\x00\x00\x00\x00',
b'8646F0E01300\x00\x00\x00\x00',
],
},
CAR.HIGHLANDER_TSS2: {
(Ecu.eps, 0x7a1, None): [
b'8965B48241\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F15260E051\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x01896630E64100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F0E02100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
CAR.HIGHLANDERH_TSS2: {
(Ecu.eps, 0x7a1, None): [
b'8965B48241\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F15264872300\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x02896630E66000\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F0E02100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F0E02100\x00\x00\x00\x00!!!!!!!!!!!!!!!!',
],
},
CAR.LEXUS_IS: {
(Ecu.engine, 0x700, None): [
b'\x018966353M7100\x00\x00\x00\x00',
b'\x018966353Q2300\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [b'F152653330\x00\x00\x00\x00\x00\x00'],
(Ecu.dsu, 0x791, None): [
b'881515306400\x00\x00\x00\x00',
b'881515306500\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [b'8965B53271\x00\x00\x00\x00\x00\x00'],
(Ecu.fwdRadar, 0x750, 0xf): [b'8821F4702300\x00\x00\x00\x00'],
(Ecu.fwdCamera, 0x750, 0x6d): [b'8646F5301400\x00\x00\x00\x00'],
},
CAR.PRIUS: {
(Ecu.engine, 0x700, None): [
b'\x02896634761000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634761100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634761200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634763000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634765000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634769100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634774000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634774100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634774200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634782000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634784000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966347A5000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966347A8000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x03896634759100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634759200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634759200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634759300\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634760000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701002\x00\x00\x00\x00',
b'\x03896634760000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634760100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634760200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634760200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634760300\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634768000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703001\x00\x00\x00\x00',
b'\x03896634768000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703002\x00\x00\x00\x00',
b'\x03896634768100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703002\x00\x00\x00\x00',
b'\x03896634785000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4705001\x00\x00\x00\x00',
b'\x03896634786000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4705001\x00\x00\x00\x00',
b'\x03896634786000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
b'\x03896634789000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703002\x00\x00\x00\x00',
b'\x038966347A3000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4707001\x00\x00\x00\x00',
b'\x038966347B6000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
b'\x038966347B7000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B47021\x00\x00\x00\x00\x00\x00',
b'8965B47022\x00\x00\x00\x00\x00\x00',
b'8965B47023\x00\x00\x00\x00\x00\x00',
b'8965B47050\x00\x00\x00\x00\x00\x00',
b'8965B47060\x00\x00\x00\x00\x00\x00', # This is the EPS with good angle sensor
],
(Ecu.esp, 0x7b0, None): [
b'F152647290\x00\x00\x00\x00\x00\x00',
b'F152647300\x00\x00\x00\x00\x00\x00',
b'F152647310\x00\x00\x00\x00\x00\x00',
b'F152647414\x00\x00\x00\x00\x00\x00',
b'F152647415\x00\x00\x00\x00\x00\x00',
b'F152647416\x00\x00\x00\x00\x00\x00',
b'F152647417\x00\x00\x00\x00\x00\x00',
b'F152647470\x00\x00\x00\x00\x00\x00',
b'F152647490\x00\x00\x00\x00\x00\x00',
b'F152647684\x00\x00\x00\x00\x00\x00',
b'F152647862\x00\x00\x00\x00\x00\x00',
b'F152647863\x00\x00\x00\x00\x00\x00',
b'F152647864\x00\x00\x00\x00\x00\x00',
b'F152647865\x00\x00\x00\x00\x00\x00',
b'F152647470\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514702300\x00\x00\x00\x00',
b'881514703100\x00\x00\x00\x00',
b'881514704100\x00\x00\x00\x00',
b'881514706000\x00\x00\x00\x00',
b'881514706100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4201200\x00\x00\x00\x00',
b'8646F4701300\x00\x00\x00\x00',
b'8646F4702001\x00\x00\x00\x00',
b'8646F4702100\x00\x00\x00\x00',
b'8646F4702200\x00\x00\x00\x00',
b'8646F4705000\x00\x00\x00\x00',
b'8646F4705200\x00\x00\x00\x00',
],
},
CAR.PRIUS_2019: {
(Ecu.engine, 0x700, None): [
b'\x028966347A5000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x038966347B6000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B47060\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152647470\x00\x00\x00\x00\x00\x00',
b'F152647290\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514706000\x00\x00\x00\x00',
b'881514706100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4705200\x00\x00\x00\x00',
],
},
CAR.RAV4: {
(Ecu.engine, 0x7e0, None): [
b'\x02342Q1000\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q1100\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q1200\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q1300\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2000\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2100\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2200\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q4000\x00\x00\x00\x00\x00\x00\x00\x0054215000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42082\x00\x00\x00\x00\x00\x00',
b'8965B42083\x00\x00\x00\x00\x00\x00',
b'8965B42063\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F15260R102\x00\x00\x00\x00\x00\x00',
b'F15260R103\x00\x00\x00\x00\x00\x00',
b'F152642493\x00\x00\x00\x00\x00\x00',
b'F152642492\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514201200\x00\x00\x00\x00',
b'881514201300\x00\x00\x00\x00',
b'881514201400\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4201200\x00\x00\x00\x00',
b'8646F4202001\x00\x00\x00\x00',
b'8646F4202100\x00\x00\x00\x00',
],
},
CAR.RAV4H: {
(Ecu.engine, 0x7e0, None): [
b'\x02342N9000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342N9100\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342P0000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2000\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42103\x00\x00\x00\x00\x00\x00',
b'8965B42162\x00\x00\x00\x00\x00\x00',
b'8965B42163\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152642090\x00\x00\x00\x00\x00\x00',
b'F152642120\x00\x00\x00\x00\x00\x00',
b'F152642400\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514202200\x00\x00\x00\x00',
b'881514202300\x00\x00\x00\x00',
b'881514202400\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4201100\x00\x00\x00\x00',
b'8646F4201200\x00\x00\x00\x00',
b'8646F4202001\x00\x00\x00\x00',
b'8646F4202100\x00\x00\x00\x00',
b'8646F4204000\x00\x00\x00\x00',
],
},
CAR.RAV4_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630R58000\x00\x00\x00\x00',
b'\x018966342E2000\x00\x00\x00\x00',
b'\x018966342M8000\x00\x00\x00\x00',
b'\x018966342T1000\x00\x00\x00\x00',
b'\x018966342T6000\x00\x00\x00\x00',
b'\x018966342T9000\x00\x00\x00\x00',
b'\x018966342U4000\x00\x00\x00\x00',
b'\x018966342V3100\x00\x00\x00\x00',
b'\x018966342V3200\x00\x00\x00\x00',
b'\x018966342X5000\x00\x00\x00\x00',
b'\x01896634A05000\x00\x00\x00\x00',
b'\x01896634A19000\x00\x00\x00\x00',
b'\x01896634A19100\x00\x00\x00\x00',
b'\x01896634A20000\x00\x00\x00\x00',
b'\x01896634A22000\x00\x00\x00\x00',
b'\x018966342U4000\x00\x00\x00\x00',
b'\x01F152642551\x00\x00\x00\x00\x00\x00',
b'\x028966342T0000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x028966342Y8000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x02896634A18000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152642520\x00\x00\x00\x00\x00\x00',
b'\x01F15260R210\x00\x00\x00\x00\x00\x00',
b'\x01F15260R220\x00\x00\x00\x00\x00\x00',
b'\x01F15260R300\x00\x00\x00\x00\x00\x00',
b'\x01F152642551\x00\x00\x00\x00\x00\x00',
b'\x01F152642561\x00\x00\x00\x00\x00\x00',
b'\x01F152642700\x00\x00\x00\x00\x00\x00',
b'\x01F152642710\x00\x00\x00\x00\x00\x00',
b'\x01F152642750\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42170\x00\x00\x00\x00\x00\x00',
b'8965B42171\x00\x00\x00\x00\x00\x00',
b'8965B42181\x00\x00\x00\x00\x00\x00',
b'\x028965B0R01200\x00\x00\x00\x008965B0R02200\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4203200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203300\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203400\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203500\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203700\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.RAV4H_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x018966342W8000\x00\x00\x00\x00',
b'\x018966342M5000\x00\x00\x00\x00',
b'\x018966342X6000\x00\x00\x00\x00',
b'\x028966342W4001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
b'\x02896634A23001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
b'\x02896634A23001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152642541\x00\x00\x00\x00\x00\x00',
b'F152642291\x00\x00\x00\x00\x00\x00',
b'F152642330\x00\x00\x00\x00\x00\x00',
b'F152642531\x00\x00\x00\x00\x00\x00',
b'F152642532\x00\x00\x00\x00\x00\x00',
b'F152642521\x00\x00\x00\x00\x00\x00',
b'F152642541\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42170\x00\x00\x00\x00\x00\x00',
b'8965B42171\x00\x00\x00\x00\x00\x00',
b'8965B42181\x00\x00\x00\x00\x00\x00',
b'\x028965B0R01200\x00\x00\x00\x008965B0R02200\x00\x00\x00\x00',
b'\x028965B0R01300\x00\x00\x00\x008965B0R02300\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4203700\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4203200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203300\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203400\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203500\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
CAR.LEXUS_ES_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x018966333T5100\x00\x00\x00\x00',
b'\x018966333T5000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [b'\x01F152606281\x00\x00\x00\x00\x00\x00'],
(Ecu.eps, 0x7a1, None): [b'8965B33252\x00\x00\x00\x00\x00\x00'],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F3303200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F33030D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
],
},
CAR.SIENNA: {
(Ecu.engine, 0x700, None): [
b'\x01896630832100\x00\x00\x00\x00',
b'\x01896630838000\x00\x00\x00\x00',
b'\x01896630838100\x00\x00\x00\x00',
b'\x01896630842000\x00\x00\x00\x00',
b'\x01896630851000\x00\x00\x00\x00',
b'\x01896630851100\x00\x00\x00\x00',
b'\x01896630852100\x00\x00\x00\x00',
b'\x01896630859000\x00\x00\x00\x00',
b'\x01896630860000\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B45070\x00\x00\x00\x00\x00\x00',
b'8965B45082\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152608130\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510801100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702200\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0801100\x00\x00\x00\x00',
],
},
CAR.LEXUS_ESH_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x028966333S8000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966333V4000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152633423\x00\x00\x00\x00\x00\x00',
b'F152633680\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33252\x00\x00\x00\x00\x00\x00',
b'8965B33590\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F33030D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F3304100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
CAR.LEXUS_NXH: {
(Ecu.engine, 0x7e0, None): [
b'\x0237882000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0237841000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152678160\x00\x00\x00\x00\x00\x00',
b'F152678170\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881517804300\x00\x00\x00\x00',
b'881517804100\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B78100\x00\x00\x00\x00\x00\x00',
b'8965B78060\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F7801300\x00\x00\x00\x00',
b'8646F7801100\x00\x00\x00\x00',
],
},
CAR.LEXUS_RX: {
(Ecu.engine, 0x700, None): [
b'\x01896630E37200\x00\x00\x00\x00',
b'\x01896630E41000\x00\x00\x00\x00',
b'\x01896630E41200\x00\x00\x00\x00',
b'\x01896630E37300\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648472\x00\x00\x00\x00\x00\x00',
b'F152648473\x00\x00\x00\x00\x00\x00',
b'F152648492\x00\x00\x00\x00\x00\x00',
b'F152648493\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514810300\x00\x00\x00\x00',
b'881514810500\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B0E011\x00\x00\x00\x00\x00\x00',
b'8965B0E012\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4701000\x00\x00\x00\x00',
b'8821F4701100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4801100\x00\x00\x00\x00',
b'8646F4801200\x00\x00\x00\x00',
b'8646F4802001\x00\x00\x00\x00',
b'8646F4802100\x00\x00\x00\x00',
],
},
CAR.LEXUS_RXH: {
(Ecu.engine, 0x7e0, None): [
b'\x02348N0000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348Q4000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348T1100\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348V6000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348Z3000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648361\x00\x00\x00\x00\x00\x00',
b'F152648501\x00\x00\x00\x00\x00\x00',
b'F152648502\x00\x00\x00\x00\x00\x00',
b'F152648504\x00\x00\x00\x00\x00\x00',
b'F152648A30\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514811300\x00\x00\x00\x00',
b'881514811500\x00\x00\x00\x00',
b'881514811700\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B0E011\x00\x00\x00\x00\x00\x00',
b'8965B0E012\x00\x00\x00\x00\x00\x00',
b'8965B48112\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4701000\x00\x00\x00\x00',
b'8821F4701100\x00\x00\x00\x00',
b'8821F4701200\x00\x00\x00\x00',
b'8821F4701300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4801200\x00\x00\x00\x00',
b'8646F4802100\x00\x00\x00\x00',
b'8646F4802200\x00\x00\x00\x00',
b'8646F4809000\x00\x00\x00\x00',
],
},
CAR.LEXUS_RX_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630EB0000\x00\x00\x00\x00',
b'\x01896630EA9000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F15260E031\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B48271\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4810100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
CAR.LEXUS_RXH_TSS2: {
(Ecu.engine, 0x7e0, None): [
b'\x02348X8000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648831\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B48271\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4810100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
}
STEER_THRESHOLD = 100
DBC = {
CAR.RAV4H: dbc_dict('toyota_rav4_hybrid_2017_pt_generated', 'toyota_adas'),
CAR.RAV4: dbc_dict('toyota_rav4_2017_pt_generated', 'toyota_adas'),
CAR.PRIUS: dbc_dict('toyota_prius_2017_pt_generated', 'toyota_adas'),
CAR.PRIUS_2019: dbc_dict('toyota_prius_2017_pt_generated', 'toyota_adas'),
CAR.COROLLA: dbc_dict('toyota_corolla_2017_pt_generated', 'toyota_adas'),
CAR.COROLLA_2015: dbc_dict('toyota_corolla_2017_pt_generated', 'toyota_adas'),
CAR.LEXUS_RX: dbc_dict('lexus_rx_350_2016_pt_generated', 'toyota_adas'),
CAR.LEXUS_RXH: dbc_dict('lexus_rx_hybrid_2017_pt_generated', 'toyota_adas'),
CAR.LEXUS_RX_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_RXH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.CHR: dbc_dict('toyota_nodsu_pt_generated', 'toyota_adas'),
CAR.CHRH: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_adas'),
CAR.CAMRY: dbc_dict('toyota_nodsu_pt_generated', 'toyota_adas'),
CAR.CAMRYH: dbc_dict('toyota_camry_hybrid_2018_pt_generated', 'toyota_adas'),
CAR.HIGHLANDER: dbc_dict('toyota_highlander_2017_pt_generated', 'toyota_adas'),
CAR.HIGHLANDER_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.HIGHLANDERH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.HIGHLANDERH: dbc_dict('toyota_highlander_hybrid_2018_pt_generated', 'toyota_adas'),
CAR.HIGHLANDERH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.AVALON: dbc_dict('toyota_avalon_2017_pt_generated', 'toyota_adas'),
CAR.RAV4_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.RAV4H_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.COROLLA_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.COROLLAH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_ES_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_ESH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.SIENNA: dbc_dict('toyota_sienna_xle_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_IS: dbc_dict('lexus_is_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_ISH: dbc_dict('lexus_is_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_CTH: dbc_dict('lexus_ct200h_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_NXH: dbc_dict('lexus_nx300h_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_UXH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
}
NO_DSU_CAR = [CAR.CHR, CAR.CHRH, CAR.CAMRY, CAR.CAMRYH, CAR.RAV4_TSS2, CAR.COROLLA_TSS2, CAR.COROLLAH_TSS2, CAR.LEXUS_ES_TSS2, CAR.LEXUS_ESH_TSS2, CAR.RAV4H_TSS2, CAR.LEXUS_RX_TSS2, CAR.HIGHLANDER_TSS2, CAR.LEXUS_UXH_TSS2, CAR.HIGHLANDERH_TSS2]
TSS2_CAR = [CAR.RAV4_TSS2, CAR.COROLLA_TSS2, CAR.COROLLAH_TSS2, CAR.LEXUS_ES_TSS2, CAR.LEXUS_ESH_TSS2, CAR.RAV4H_TSS2, CAR.LEXUS_RX_TSS2, CAR.HIGHLANDER_TSS2, CAR.LEXUS_UXH_TSS2, CAR.HIGHLANDERH_TSS2]
NO_STOP_TIMER_CAR = [CAR.RAV4H, CAR.HIGHLANDERH, CAR.HIGHLANDER, CAR.RAV4_TSS2, CAR.COROLLA_TSS2, CAR.COROLLAH_TSS2, CAR.LEXUS_ES_TSS2, CAR.LEXUS_ESH_TSS2, CAR.SIENNA, CAR.RAV4H_TSS2, CAR.LEXUS_RX_TSS2, CAR.HIGHLANDER_TSS2, CAR.LEXUS_UXH_TSS2, CAR.HIGHLANDERH_TSS2] # no resume button press required
| 68.964957 | 1,405 | 0.603267 |
from selfdrive.car import dbc_dict
from cereal import car
Ecu = car.CarParams.Ecu
class SteerLimitParams:
STEER_MAX = 1500
STEER_DELTA_UP = 10 STEER_DELTA_DOWN = 44 STEER_ERROR_MAX = 350
class CAR:
PRIUS = "TOYOTA PRIUS 2017"
PRIUS_2019 = "TOYOTA PRIUS 2019"
RAV4H = "TOYOTA RAV4 HYBRID 2017"
RAV4 = "TOYOTA RAV4 2017"
COROLLA = "TOYOTA COROLLA 2017"
COROLLA_2015 = "TOYOTA COROLLA 2015"
LEXUS_RX = "LEXUS RX 350 2017"
LEXUS_RXH = "LEXUS RX HYBRID 2017"
LEXUS_RX_TSS2 = "LEXUS RX350 2020"
LEXUS_RXH_TSS2 = "LEXUS RX450 HYBRID 2020"
CHR = "TOYOTA C-HR 2018"
CHRH = "TOYOTA C-HR HYBRID 2018"
CAMRY = "TOYOTA CAMRY 2018"
CAMRYH = "TOYOTA CAMRY HYBRID 2018"
HIGHLANDER = "TOYOTA HIGHLANDER 2017"
HIGHLANDER_TSS2 = "TOYOTA HIGHLANDER 2020"
HIGHLANDERH = "TOYOTA HIGHLANDER HYBRID 2018"
HIGHLANDERH_TSS2 = "TOYOTA HIGHLANDER HYBRID 2020"
AVALON = "TOYOTA AVALON 2016"
RAV4_TSS2 = "TOYOTA RAV4 2019"
COROLLA_TSS2 = "TOYOTA COROLLA TSS2 2019"
COROLLAH_TSS2 = "TOYOTA COROLLA HYBRID TSS2 2019"
LEXUS_ES_TSS2 = "LEXUS ES 2019"
LEXUS_ESH_TSS2 = "LEXUS ES 300H 2019"
SIENNA = "TOYOTA SIENNA XLE 2018"
LEXUS_IS = "LEXUS IS300 2018"
LEXUS_CTH = "LEXUS CT 200H 2018"
RAV4H_TSS2 = "TOYOTA RAV4 HYBRID 2019"
LEXUS_ISH = "LEXUS IS HYBRID 2017"
LEXUS_NXH = "LEXUS NX300H 2018"
LEXUS_UXH_TSS2 = "LEXUS UX 250H 2019"
class ECU:
CAM = Ecu.fwdCamera DSU = Ecu.dsu APGS = Ecu.apgs SMART = Ecu.unknown
STATIC_MSGS = [
(0x130, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 1, 100, b'\x00\x00\x00\x00\x00\x00\x38'),
(0x240, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 1, 5, b'\x00\x10\x01\x00\x10\x01\x00'),
(0x241, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 1, 5, b'\x00\x10\x01\x00\x10\x01\x00'),
(0x244, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 1, 5, b'\x00\x10\x01\x00\x10\x01\x00'),
(0x245, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 1, 5, b'\x00\x10\x01\x00\x10\x01\x00'),
(0x248, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 1, 5, b'\x00\x00\x00\x00\x00\x00\x01'),
(0x367, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 0, 40, b'\x06\x00'),
(0x414, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 0, 100, b'\x00\x00\x00\x00\x00\x00\x17\x00'),
(0x466, Ecu.fwdCamera, (CAR.COROLLA, CAR.COROLLA_2015), 1, 100, b'\x24\x20\xB1'),
(0x489, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 0, 100, b'\x00\x00\x00\x00\x00\x00\x00'),
(0x48a, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 0, 100, b'\x00\x00\x00\x00\x00\x00\x00'),
(0x48b, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 0, 100, b'\x66\x06\x08\x0a\x02\x00\x00\x00'),
(0x4d3, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015), 0, 100, b'\x1C\x00\x00\x01\x00\x00\x00\x00'),
(0x128, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.AVALON), 1, 3, b'\xf4\x01\x90\x83\x00\x37'),
(0x128, Ecu.dsu, (CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.SIENNA, CAR.LEXUS_CTH), 1, 3, b'\x03\x00\x20\x00\x00\x52'),
(0x141, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 1, 2, b'\x00\x00\x00\x46'),
(0x160, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 1, 7, b'\x00\x00\x08\x12\x01\x31\x9c\x51'),
(0x161, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.AVALON, CAR.LEXUS_RX), 1, 7, b'\x00\x1e\x00\x00\x00\x80\x07'),
(0X161, Ecu.dsu, (CAR.HIGHLANDERH, CAR.HIGHLANDER, CAR.SIENNA, CAR.LEXUS_CTH), 1, 7, b'\x00\x1e\x00\xd4\x00\x00\x5b'),
(0x283, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 0, 3, b'\x00\x00\x00\x00\x00\x00\x8c'),
(0x2E6, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH), 0, 3, b'\xff\xf8\x00\x08\x7f\xe0\x00\x4e'),
(0x2E7, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH), 0, 3, b'\xa8\x9c\x31\x9c\x00\x00\x00\x02'),
(0x33E, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH), 0, 20, b'\x0f\xff\x26\x40\x00\x1f\x00'),
(0x2E6, Ecu.unknown, (CAR.RAV4H, CAR.LEXUS_RXH), 0, 3, b'\xff\xf8\x00\x08\x7f\xe0\x00\x4e'),
(0x2E7, Ecu.unknown, (CAR.RAV4H, CAR.LEXUS_RXH), 0, 3, b'\xa8\x9c\x31\x9c\x00\x00\x00\x02'),
(0x33E, Ecu.unknown, (CAR.RAV4H, CAR.LEXUS_RXH), 0, 20, b'\x0f\xff\x26\x40\x00\x1f\x00'),
(0x344, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 0, 5, b'\x00\x00\x01\x00\x00\x00\x00\x50'),
(0x365, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.HIGHLANDERH), 0, 20, b'\x00\x00\x00\x80\x03\x00\x08'),
(0x365, Ecu.dsu, (CAR.RAV4, CAR.RAV4H, CAR.COROLLA, CAR.HIGHLANDER, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 0, 20, b'\x00\x00\x00\x80\xfc\x00\x08'),
(0x366, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.HIGHLANDERH), 0, 20, b'\x00\x00\x4d\x82\x40\x02\x00'),
(0x366, Ecu.dsu, (CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 0, 20, b'\x00\x72\x07\xff\x09\xfe\x00'),
(0x470, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.LEXUS_RXH), 1, 100, b'\x00\x00\x02\x7a'),
(0x470, Ecu.dsu, (CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.RAV4H, CAR.SIENNA, CAR.LEXUS_CTH), 1, 100, b'\x00\x00\x01\x79'),
(0x4CB, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDERH, CAR.HIGHLANDER, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 0, 100, b'\x0c\x00\x00\x00\x00\x00\x00\x00'),
]
ECU_FINGERPRINT = {
Ecu.fwdCamera: [0x2e4], Ecu.dsu: [0x283], }
FINGERPRINTS = {
CAR.RAV4: [
{36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 512: 6, 513: 6, 547: 8, 548: 8, 552: 4, 562: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 767:4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 4, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1008: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1596: 8, 1597: 8, 1600: 8, 1656: 8, 1664: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2015: 8, 2024: 8}
],
CAR.RAV4H: [
{36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 218: 8, 296: 8, 355: 5, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 512: 6, 513: 6, 515: 3, 547: 8, 548: 8, 550: 8, 552: 4, 560: 7, 562: 4, 581: 5, 608: 8, 610: 5, 643: 7, 705: 8, 713: 8, 725: 2, 740: 5, 742: 8, 743: 8, 767: 4, 800: 8, 830: 7, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 922: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 3, 955: 8, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1008: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1207: 8, 1212: 8, 1227: 8, 1228: 8, 1232: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1596: 8, 1597: 8, 1600: 8, 1656: 8, 1664: 8, 1728: 8, 1745: 8, 1779: 8, 1792: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1953: 8, 1961: 8, 1968: 8, 1976: 8, 1984: 8, 1990: 8, 1992: 8, 1998: 8, 2016: 8, 2018: 8, 2019: 8, 2022: 8, 2024: 8, 2026: 8}
],
CAR.PRIUS: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 512: 6, 513: 6, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 764: 8, 767: 4, 800: 8, 810: 2, 814: 8, 824: 2, 825: 8, 829: 2, 830: 7, 835: 8, 836: 8, 845: 5, 861: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 875: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 974: 8, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1005: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1083: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1130: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1175: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1595: 8, 1649: 8, 1681: 8, 1767:4, 1777: 8, 1779: 8, 1792: 8, 1840: 8, 1863:8, 1872: 8, 1904: 8, 1912: 8, 1941: 8, 1949: 8, 1952: 8, 1960: 8, 1984: 8, 1988: 8, 1990: 8, 1992: 8, 1996:8, 1998: 8, 2004: 8, 2010: 8, 2012: 8, 2015: 8, 2016: 8, 2018: 8, 2024: 8, 2026: 8, 2027: 8, 2029: 8, 2030: 8, 2031: 8}
],
CAR.PRIUS_2019: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 764: 8, 767: 4, 800: 8, 810: 2, 814: 8, 818: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 845: 5, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 889: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 974: 8, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1227: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1595: 8, 1649: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2002: 8, 2010: 8}
],
CAR.COROLLA: [
{36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 512: 6, 513: 6, 547: 8, 548: 8, 552: 4, 608: 8, 610: 5, 643: 7, 705: 8, 740: 5, 767:4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 2, 921: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 4, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1196: 8, 1227: 8, 1235: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1596: 8, 1597: 8, 1600: 8, 1664: 8, 1728: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2016: 8, 2017: 8, 2018: 8, 2019: 8, 2020: 8, 2021: 8, 2022: 8, 2023: 8, 2024: 8}
],
CAR.COROLLA_2015: [
{32: 4, 36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 456: 8, 464: 8, 466: 8, 467: 8, 513: 6, 547: 8, 548: 8, 552: 4, 608: 8, 610: 5, 611: 7, 705: 8, 800: 8, 849: 4, 852: 1, 865: 8, 896: 8, 897: 8, 898: 8, 899: 8, 900: 6, 902: 6, 903: 8, 905: 8, 906: 5, 910: 8, 911: 8, 916: 2, 921: 8, 928: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 4, 956: 8, 976: 1, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1017: 8, 1024: 8, 1043: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1078: 8, 1079: 8, 1088: 8, 1090: 8, 1091: 8, 1196: 8, 1217: 8, 1219: 8, 1222: 8, 1224: 8, 1244: 8, 1245: 8, 1279: 8, 1552: 8, 1553: 8, 1555: 8, 1556: 8, 1557: 8, 1560: 8, 1561: 8, 1562: 8, 1564: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1574: 8, 1592: 8, 1596: 8, 1597: 8, 1600: 8, 1664: 8, 1761: 8, 1762: 8}
],
CAR.LEXUS_RX: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 5, 643: 7, 658: 8, 705: 8, 740: 5, 742: 8, 743: 8, 767: 4, 800: 8, 810: 2, 812: 3, 814: 8, 818: 8, 819: 8, 820: 8, 821: 8, 822: 8, 830: 7, 835: 8, 836: 8, 845: 5, 869: 7, 870: 7, 871: 2, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1063: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1349: 8, 1350: 8, 1351: 8, 1413: 8, 1414: 8, 1415: 8, 1416: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1595: 8, 1777: 8, 1779: 8, 1792: 8, 1800: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1945: 8, 1953: 8, 1961: 8, 1968: 8, 1976: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.LEXUS_RXH: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 512: 6, 513: 6, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 5, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 744: 8, 767: 4, 800: 8, 810: 2, 812: 3, 814: 8, 818: 8, 819: 8, 820: 8, 821: 8, 822: 8, 830: 7, 835: 8, 836: 8, 845: 5, 863: 8, 869: 7, 869: 7, 870: 7, 871: 2, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 6, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1059: 1, 1063: 8, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1349: 8, 1350: 8, 1351: 8, 1413: 8, 1414: 8, 1415: 8, 1416: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1592: 8, 1594: 8, 1595: 8, 1745: 8, 1777: 8, 1779: 8, 1808: 8, 1810: 8, 1816: 8, 1818: 8, 1840: 8, 1848: 8, 1904: 8, 1912: 8, 1940: 8, 1941: 8, 1948: 8, 1949: 8, 1952: 8, 1956: 8, 1960: 8, 1964: 8, 1986: 8, 1990: 8, 1994: 8, 1998: 8, 2004: 8, 2012: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.LEXUS_RX_TSS2: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 401: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 740: 5, 742: 8, 743: 8, 764: 8, 765: 8, 767: 4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 824: 8, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 987: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1063: 8, 1076: 8, 1077: 8,1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1592: 8, 1594:8, 1595: 8, 1600: 8, 1649: 8, 1775: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1792: 8, 1800: 8, 1808: 8, 1816: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1945: 8, 1953: 8, 1961: 8, 1968: 8, 1976: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.CHR: [
{36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 705: 8, 740: 5, 767:4, 800: 8, 810: 2, 812: 8, 814: 8, 830: 7, 835: 8, 836: 8, 845: 5, 869: 7, 870: 7, 871: 2, 898: 8, 913: 8, 918: 8, 921: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 1014: 8, 1017: 8, 1020: 8, 1021: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1082: 8, 1083: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1175: 8, 1228: 8, 1235: 8, 1237: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1745: 8, 1779: 8}
],
CAR.CHRH: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 767:4, 800: 8, 810: 2, 812: 8, 814: 8, 829: 2, 830: 7, 835: 8, 836: 8, 845: 5, 869: 7, 870: 7, 871: 2, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1014: 8, 1017: 8, 1020: 8, 1021: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1083: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1175: 8, 1228: 8, 1235: 8, 1237: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2015: 8, 2024: 8, 2026: 8, 2030: 8}
],
CAR.CAMRY: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 513: 6, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 728: 8, 740: 5, 761: 8, 764: 8, 767: 4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 888: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 942: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 983: 8, 984: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1011: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1412: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1745: 8, 1767: 4, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1792: 8, 1808: 8, 1816: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1945: 8, 1953: 8, 1956: 8, 1961: 8, 1964: 8, 1968: 8, 1976: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.CAMRYH: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 658: 8, 713: 8, 728: 8, 740: 5, 761: 8, 764: 8, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 888: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 942: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 983: 8, 984: 8, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1011: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1412: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1745: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1808: 8, 1810: 8, 1816: 8, 1818: 8, 1872: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.HIGHLANDER: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 355: 5, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 545: 5, 550: 8, 552: 4, 562: 6, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 845: 5, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 900: 6, 902: 6, 905: 8, 911: 8, 913: 8, 916: 3, 918: 7, 921: 8, 922: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1008: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1200: 8, 1201: 8, 1202: 8, 1203: 8, 1206: 8, 1207: 8, 1212: 8, 1227: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1585: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1984: 8, 1988: 8, 1990: 8, 1992: 8, 1996: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.HIGHLANDER_TSS2: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 355: 5, 401: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 565: 8, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 728: 8, 740: 5, 761: 8, 764: 8, 765: 8, 767:4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 824: 8, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 885: 8, 889: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 987: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1063: 8, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1696: 8, 1775: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1808: 8, 1816: 8, 1904: 8, 1912: 8, 1952: 8, 1960: 8, 1990: 8, 1998: 8}
],
CAR.HIGHLANDERH: [
{36: 8, 37: 8, 170: 8, 180: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 581: 5, 608: 8, 610: 5, 643: 7, 713: 8, 740: 5, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 3, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1076: 8, 1077: 8, 1112: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1212: 8, 1227: 8, 1232: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1552: 8, 1553: 8, 1554: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.HIGHLANDERH_TSS2: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 658: 8, 713: 8, 728: 8, 740: 5, 761: 8, 764: 8, 765: 8, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 885: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 942: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 987: 8, 993: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1063: 8, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1263: 8, 1264: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1696: 8, 1745: 8, 1775: 8, 1779: 8}
],
CAR.AVALON: [
{36: 8, 37: 8, 170: 8, 180: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 547: 8, 550: 8, 552: 4, 562: 6, 608: 8, 610: 5, 643: 7, 705: 8, 740: 5, 767:4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 905: 8, 911: 1, 916: 2, 921: 8, 933: 6, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 1005: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1200: 8, 1201: 8, 1202: 8, 1203: 8, 1206: 8, 1227: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1558: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1596: 8, 1597: 8, 1664: 8, 1728: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.RAV4_TSS2: [
{36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 355: 5, 401: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 565: 8, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 728: 8, 740: 5, 742: 8, 743: 8, 761: 8, 764: 8, 765: 8, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 882: 8, 885: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 987: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1063: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1600: 8, 1649: 8, 1696: 8, 1745: 8, 1775: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1872: 8, 1880:8 , 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.COROLLA_TSS2: [
{36: 8, 37: 8, 114: 5, 170: 8, 180: 8, 186: 4, 401: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 643: 7, 705: 8, 728: 8, 740: 5, 742: 8, 743: 8, 761: 8, 764: 8, 765: 8, 767:4, 800: 8, 810: 2, 812: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1235: 8, 1237: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1595: 8, 1649: 8, 1745: 8, 1775: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1808: 8, 1809: 8, 1816: 8, 1817: 8, 1840: 8, 1848: 8, 1904: 8, 1912: 8, 1940: 8, 1941: 8, 1948: 8, 1949: 8, 1952: 8, 1960: 8, 1981: 8, 1986: 8, 1990: 8, 1994: 8, 1998: 8, 2004: 8}
],
CAR.COROLLAH_TSS2: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 713: 8, 728: 8, 740: 5, 742: 8, 743: 8, 761: 8, 764: 8, 765: 8, 767: 4, 800: 8, 810: 2, 812: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 885: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 7, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 987: 8, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1112: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1235: 8, 1237: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1600: 8, 1649: 8, 1745: 8, 1775: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.LEXUS_ES_TSS2: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 401: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 728: 8, 740: 5, 761: 8, 764: 8, 765: 8, 767:4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 824: 8, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 882: 8, 885: 8, 889: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 987: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1696: 8, 1775: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.LEXUS_ESH_TSS2: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 658: 8, 713: 8, 728: 8, 740: 5, 742: 8, 743: 8, 744: 8, 761: 8, 764: 8, 765: 8, 767:4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 882: 8, 885: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 942: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 983: 8, 984: 8, 987: 8, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1696: 8, 1775: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.SIENNA: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 545: 5, 548: 8, 550: 8, 552: 4, 562: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 764: 8, 800: 8, 824: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 888: 8, 896: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 1, 918: 7, 921: 8, 933: 8, 944: 6, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1008: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1114: 8, 1160: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1200: 8, 1201: 8, 1202: 8, 1203: 8, 1212: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1279: 8, 1552: 8, 1553: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1656: 8, 1664: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.LEXUS_IS: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 400: 6, 426: 6, 452: 8, 464: 8, 466: 8, 467: 5, 544: 4, 550: 8, 552: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 738: 2, 740: 5, 744: 8, 800: 8, 815: 8, 836: 8, 845: 5, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 913: 8, 914: 2, 916: 3, 917: 5, 918: 7, 921: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1008: 2, 1009: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1112: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1168: 1, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1184: 8, 1185: 8, 1186: 8, 1187: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1193: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1200: 8, 1201: 8, 1202: 8, 1203: 8, 1206: 8, 1208: 8, 1212: 8, 1220: 8, 1226: 8, 1227: 8, 1235: 8, 1237: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1584: 8, 1589: 8, 1590: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1648: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.LEXUS_ISH: [
{36: 8, 37: 8, 170: 8, 180: 8, 295: 8, 296: 8, 400: 6, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 581: 5, 608: 8, 610: 5, 643: 7, 713: 8, 740: 5, 800: 8, 836: 8, 845: 5, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 913: 8, 916: 3, 918: 7, 921: 7, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 3, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1009: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1112: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1168: 1, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1187: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1208: 8, 1212: 8, 1227: 8, 1232: 8, 1235: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1728: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.LEXUS_CTH: [
{36: 8, 37: 8, 170: 8, 180: 8, 288: 8, 426: 6, 452: 8, 466: 8, 467: 8, 548: 8, 552: 4, 560: 7, 581: 5, 608: 8, 610: 5, 643: 7, 713: 8, 740: 5, 800: 8, 810: 2, 832: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 1, 921: 8, 933: 8, 944: 6, 945: 8, 950: 8, 951: 8, 953: 3, 955: 4, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1056: 8, 1057: 8, 1059: 1, 1076: 8, 1077: 8, 1114: 8, 1116: 8, 1160: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1190: 8, 1191: 8, 1192: 8, 1227: 8, 1235: 8, 1279: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1558: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1664: 8, 1728: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.RAV4H_TSS2: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 658: 8, 713: 8, 728: 8, 740: 5, 742: 8, 743: 8, 761: 8, 764: 8, 765: 8, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 882: 8, 885: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 987: 8, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1063: 8, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1696: 8, 1745: 8, 1775: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1792: 8, 1800: 8, 1808: 8, 1810: 8, 1816: 8, 1818: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1945: 8, 1952: 8, 1953: 8, 1961: 8, 1968: 8, 1976: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.LEXUS_NXH: [
{36: 8, 37: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 5, 643: 7, 713: 8, 740: 5, 742: 8, 743: 8, 764: 8, 800: 8, 810: 2, 812: 3, 818: 8, 822: 8, 824: 8, 835: 8, 836: 8, 845: 5, 849: 4, 869: 7, 870: 7, 871: 2, 889: 8, 891: 8, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 913: 8, 916: 3, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 3, 955: 8, 956: 8, 979: 2, 987: 8, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1006: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1056: 8, 1057: 8, 1059: 1, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1168: 1, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1195: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1208: 8, 1212: 8, 1227: 8, 1228: 8, 1232: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1728: 8, 1745: 8, 1777: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.LEXUS_UXH_TSS2: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 658: 8, 713: 8, 728: 8, 740: 5, 742: 8, 743: 8, 761: 8, 764: 8, 765: 8, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 882: 8, 885: 8, 889: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 942: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 987: 8, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1063: 8, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1775: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1792: 8, 1800: 8, 1808: 8, 1810: 8, 1813: 8, 1814: 8, 1816: 8, 1818: 8, 1821: 8, 1822: 8, 1840: 8, 1848: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1940: 8, 1941: 8, 1945: 8, 1948: 8, 1949: 8, 1952: 8, 1953: 8, 1956: 8, 1960: 8, 1961: 8, 1964: 8, 1968: 8, 1976: 8, 1986: 8, 1990: 8, 1994: 8, 1998: 8, 2004: 8, 2012: 8, 2015: 8, 2016: 8, 2024: 8}
],
}
IGNORED_FINGERPRINTS = [CAR.LEXUS_RXH_TSS2]
FW_VERSIONS = {
CAR.AVALON: {
(Ecu.esp, 0x7b0, None): [b'F152607060\x00\x00\x00\x00\x00\x00'],
(Ecu.dsu, 0x791, None): [
b'881510705200\x00\x00\x00\x00',
b'881510701300\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [b'8965B41051\x00\x00\x00\x00\x00\x00'],
(Ecu.engine, 0x7e0, None): [
b'\x0230721100\x00\x00\x00\x00\x00\x00\x00\x00A0C01000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230721200\x00\x00\x00\x00\x00\x00\x00\x00A0C01000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0701100\x00\x00\x00\x00',
b'8646F0703000\x00\x00\x00\x00',
],
},
CAR.CAMRY: {
(Ecu.engine, 0x700, None): [
b'\x018966306L3100\x00\x00\x00\x00',
b'\x018966306L4200\x00\x00\x00\x00',
b'\x018966306L5200\x00\x00\x00\x00',
b'\x018966306Q3100\x00\x00\x00\x00',
b'\x018966306Q4000\x00\x00\x00\x00',
b'\x018966306Q4100\x00\x00\x00\x00',
b'\x018966333P3100\x00\x00\x00\x00',
b'\x018966333P3200\x00\x00\x00\x00',
b'\x018966333P4200\x00\x00\x00\x00',
b'\x018966333P4300\x00\x00\x00\x00',
b'\x018966333P4400\x00\x00\x00\x00',
b'\x018966333P4500\x00\x00\x00\x00',
b'\x018966333P4700\x00\x00\x00\x00',
b'\x018966333Q6000\x00\x00\x00\x00',
b'\x018966333Q6200\x00\x00\x00\x00',
b'\x018966306Q4100\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0603300 ',
b'8821F0607200 ',
b'8821F0608000 ',
],
(Ecu.esp, 0x7b0, None): [
b'F152606210\x00\x00\x00\x00\x00\x00',
b'F152606230\x00\x00\x00\x00\x00\x00',
b'F152606290\x00\x00\x00\x00\x00\x00',
b'F152633540\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33540\x00\x00\x00\x00\x00\x00',
b'8965B33542\x00\x00\x00\x00\x00\x00',
b'8965B33580\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [ # Same as 0x791
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0603300 ',
b'8821F0607200 ',
b'8821F0608000 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0601200 ',
b'8646F0601300 ',
b'8646F0603400 ',
b'8646F0605000 ',
b'8646F0606000 ',
],
},
CAR.CAMRYH: {
(Ecu.engine, 0x700, None): [
b'\x018966333N4300\x00\x00\x00\x00',
b'\x018966333X0000\x00\x00\x00\x00',
b'\x028966306B2100\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306B2300\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8100\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8200\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8300\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8400\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306R5000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306R5000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306R6000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306R6000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306S0000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306S0100\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306S1100\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152633214\x00\x00\x00\x00\x00\x00',
b'F152633660\x00\x00\x00\x00\x00\x00',
b'F152633712\x00\x00\x00\x00\x00\x00',
b'F152633713\x00\x00\x00\x00\x00\x00',
b'F152633B51\x00\x00\x00\x00\x00\x00',
b'F152633B60\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0603400 ',
b'8821F0604200 ',
b'8821F0606200 ',
b'8821F0607200 ',
b'8821F0608000 ',
b'8821F0609000 ',
b'8821F0609100 ',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33540\x00\x00\x00\x00\x00\x00',
b'8965B33542\x00\x00\x00\x00\x00\x00',
b'8965B33550\x00\x00\x00\x00\x00\x00',
b'8965B33551\x00\x00\x00\x00\x00\x00',
b'8965B33580\x00\x00\x00\x00\x00\x00',
b'8965B33581\x00\x00\x00\x00\x00\x00',
b'8965B33611\x00\x00\x00\x00\x00\x00',
b'8965B33621\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [ # Same as 0x791
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0603400 ',
b'8821F0604200 ',
b'8821F0606200 ',
b'8821F0607200 ',
b'8821F0608000 ',
b'8821F0609000 ',
b'8821F0609100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0601200 ',
b'8646F0601300 ',
b'8646F0601400 ',
b'8646F0603500 ',
b'8646F0604100 ',
b'8646F0605000 ',
b'8646F0606000 ',
b'8646F0606100 ',
b'8646F0607000 ',
b'8646F0607100 ',
],
},
CAR.CHR: {
(Ecu.engine, 0x700, None): [
b'\x01896631017100\x00\x00\x00\x00',
b'\x01896631017200\x00\x00\x00\x00',
b'\x0189663F413100\x00\x00\x00\x00',
b'\x0189663F414100\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0W01000 ',
b'8821FF401600 ',
b'8821FF404100 ',
b'8821FF405100 ',
b'8821FF406000 ',
b'8821F0W01100 ',
],
(Ecu.esp, 0x7b0, None): [
b'F152610020\x00\x00\x00\x00\x00\x00',
b'F152610153\x00\x00\x00\x00\x00\x00',
b'F1526F4034\x00\x00\x00\x00\x00\x00',
b'F1526F4044\x00\x00\x00\x00\x00\x00',
b'F1526F4073\x00\x00\x00\x00\x00\x00',
b'F1526F4122\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B10011\x00\x00\x00\x00\x00\x00',
b'8965B10040\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x033F401100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203102\x00\x00\x00\x00',
b'\x033F424000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
b'\x0331024000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F0W01000 ',
b'8821FF401600 ',
b'8821FF404100 ',
b'8821FF405100 ',
b'8821FF406000 ',
b'8821F0W01100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646FF401800 ',
b'8646FF404000 ',
b'8646FF406000 ',
],
},
CAR.CHRH: {
(Ecu.engine, 0x700, None): [
b'\x0289663F423000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0289663F431000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0189663F438000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152610040\x00\x00\x00\x00\x00\x00',
b'F152610190\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821FF404000 ',
b'8821FF407100 ',
],
(Ecu.eps, 0x7a1, None): [
b'8965B10040\x00\x00\x00\x00\x00\x00',
b'8965B10050\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821FF404000 ',
b'8821FF407100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646FF404000 ',
b'8646FF407000 ',
],
},
CAR.COROLLA: {
(Ecu.engine, 0x7e0, None): [
b'\x01896630E88000\x00\x00\x00\x00',
b'\x0230ZC2000\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC2100\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC2200\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC2300\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3000\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3200\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3300\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0330ZC1200\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510201100\x00\x00\x00\x00',
b'881510201200\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152602190\x00\x00\x00\x00\x00\x00',
b'F152602191\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B02181\x00\x00\x00\x00\x00\x00',
b'8965B02191\x00\x00\x00\x00\x00\x00',
b'8965B48150\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0201101\x00\x00\x00\x00',
b'8646F0201200\x00\x00\x00\x00',
b'8646F0E01300\x00\x00\x00\x00',
],
},
CAR.COROLLA_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630ZG5000\x00\x00\x00\x00',
b'\x01896630ZG5100\x00\x00\x00\x00',
b'\x01896630ZG5200\x00\x00\x00\x00',
b'\x01896630ZG5300\x00\x00\x00\x00',
b'\x01896630ZQ5000\x00\x00\x00\x00',
b'\x018966312L8000\x00\x00\x00\x00',
b'\x018966312P9000\x00\x00\x00\x00',
b'\x018966312P9100\x00\x00\x00\x00',
b'\x018966312P9200\x00\x00\x00\x00',
b'\x018966312R0100\x00\x00\x00\x00',
b'\x018966312R1000\x00\x00\x00\x00',
b'\x018966312R1100\x00\x00\x00\x00',
b'\x018966312R3100\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x03312N6000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
b'\x03312N6000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
b'\x03312N6100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
b'\x03312N6100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203402\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B12361\x00\x00\x00\x00\x00\x00',
b'\x018965B12350\x00\x00\x00\x00\x00\x00',
b'\x018965B12500\x00\x00\x00\x00\x00\x00',
b'\x018965B12520\x00\x00\x00\x00\x00\x00',
b'\x018965B12530\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152602191\x00\x00\x00\x00\x00\x00',
b'\x01F152602280\x00\x00\x00\x00\x00\x00',
b'\x01F152602560\x00\x00\x00\x00\x00\x00',
b'\x01F152612641\x00\x00\x00\x00\x00\x00',
b'\x01F152612651\x00\x00\x00\x00\x00\x00',
b'\x01F152612B10\x00\x00\x00\x00\x00\x00',
b'\x01F152612B60\x00\x00\x00\x00\x00\x00',
b'\x01F152612B61\x00\x00\x00\x00\x00\x00',
b'\x01F152612B90\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F12010D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201100\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1202000\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F1202100\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.COROLLAH_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630ZJ1000\x00\x00\x00\x00',
b'\x018966342M5000\x00\x00\x00\x00',
b'\x02896630ZQ3000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896630ZR2000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966312Q4000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x038966312L7000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF1205001\x00\x00\x00\x00',
b'\x038966312N1000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
b'\x038966312L7000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF1205001\x00\x00\x00\x00',
b'\x02896630ZN8000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B12361\x00\x00\x00\x00\x00\x00',
b'8965B12451\x00\x00\x00\x00\x00\x00',
b'\x018965B12350\x00\x00\x00\x00\x00\x00',
b'\x018965B12470\x00\x00\x00\x00\x00\x00',
b'\x018965B12500\x00\x00\x00\x00\x00\x00',
b'\x018965B12530\x00\x00\x00\x00\x00\x00',
b'\x018965B12490\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152612A00\x00\x00\x00\x00\x00\x00',
b'F152612590\x00\x00\x00\x00\x00\x00',
b'F152612691\x00\x00\x00\x00\x00\x00',
b'F152612692\x00\x00\x00\x00\x00\x00',
b'F152612700\x00\x00\x00\x00\x00\x00',
b'F152612800\x00\x00\x00\x00\x00\x00',
b'F152612840\x00\x00\x00\x00\x00\x00',
b'F152612A10\x00\x00\x00\x00\x00\x00',
b'F152642540\x00\x00\x00\x00\x00\x00',
b'F152612820\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F1201100\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201300\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F1202000\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203400\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F1201300\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.HIGHLANDER: {
(Ecu.engine, 0x700, None): [
b'\x01896630E09000\x00\x00\x00\x00',
b'\x01896630E43100\x00\x00\x00\x00',
b'\x01896630E43200\x00\x00\x00\x00',
b'\x01896630E44200\x00\x00\x00\x00',
b'\x01896630E45000\x00\x00\x00\x00',
b'\x01896630E45100\x00\x00\x00\x00',
b'\x01896630E45200\x00\x00\x00\x00',
b'\x01896630E74000\x00\x00\x00\x00',
b'\x01896630E76000\x00\x00\x00\x00',
b'\x01896630E83000\x00\x00\x00\x00',
b'\x01896630E84000\x00\x00\x00\x00',
b'\x01896630E85000\x00\x00\x00\x00',
b'\x01896630E88000\x00\x00\x00\x00',
b'\x01896630E09000\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B48140\x00\x00\x00\x00\x00\x00',
b'8965B48150\x00\x00\x00\x00\x00\x00',
b'8965B48210\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [b'F15260E011\x00\x00\x00\x00\x00\x00'],
(Ecu.dsu, 0x791, None): [
b'881510E01100\x00\x00\x00\x00',
b'881510E01200\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0E01200\x00\x00\x00\x00',
b'8646F0E01300\x00\x00\x00\x00',
],
},
CAR.HIGHLANDERH: {
(Ecu.eps, 0x7a1, None): [
b'8965B48160\x00\x00\x00\x00\x00\x00'
],
(Ecu.esp, 0x7b0, None): [
b'F152648541\x00\x00\x00\x00\x00\x00',
b'F152648542\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0230E40000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230EA2000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0E01200\x00\x00\x00\x00',
b'8646F0E01300\x00\x00\x00\x00',
],
},
CAR.HIGHLANDER_TSS2: {
(Ecu.eps, 0x7a1, None): [
b'8965B48241\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F15260E051\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x01896630E64100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F0E02100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
CAR.HIGHLANDERH_TSS2: {
(Ecu.eps, 0x7a1, None): [
b'8965B48241\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F15264872300\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x02896630E66000\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F0E02100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F0E02100\x00\x00\x00\x00!!!!!!!!!!!!!!!!',
],
},
CAR.LEXUS_IS: {
(Ecu.engine, 0x700, None): [
b'\x018966353M7100\x00\x00\x00\x00',
b'\x018966353Q2300\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [b'F152653330\x00\x00\x00\x00\x00\x00'],
(Ecu.dsu, 0x791, None): [
b'881515306400\x00\x00\x00\x00',
b'881515306500\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [b'8965B53271\x00\x00\x00\x00\x00\x00'],
(Ecu.fwdRadar, 0x750, 0xf): [b'8821F4702300\x00\x00\x00\x00'],
(Ecu.fwdCamera, 0x750, 0x6d): [b'8646F5301400\x00\x00\x00\x00'],
},
CAR.PRIUS: {
(Ecu.engine, 0x700, None): [
b'\x02896634761000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634761100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634761200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634763000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634765000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634769100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634774000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634774100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634774200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634782000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634784000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966347A5000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966347A8000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x03896634759100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634759200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634759200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634759300\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634760000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701002\x00\x00\x00\x00',
b'\x03896634760000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634760100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634760200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634760200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634760300\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634768000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703001\x00\x00\x00\x00',
b'\x03896634768000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703002\x00\x00\x00\x00',
b'\x03896634768100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703002\x00\x00\x00\x00',
b'\x03896634785000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4705001\x00\x00\x00\x00',
b'\x03896634786000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4705001\x00\x00\x00\x00',
b'\x03896634786000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
b'\x03896634789000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703002\x00\x00\x00\x00',
b'\x038966347A3000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4707001\x00\x00\x00\x00',
b'\x038966347B6000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
b'\x038966347B7000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B47021\x00\x00\x00\x00\x00\x00',
b'8965B47022\x00\x00\x00\x00\x00\x00',
b'8965B47023\x00\x00\x00\x00\x00\x00',
b'8965B47050\x00\x00\x00\x00\x00\x00',
b'8965B47060\x00\x00\x00\x00\x00\x00', # This is the EPS with good angle sensor
],
(Ecu.esp, 0x7b0, None): [
b'F152647290\x00\x00\x00\x00\x00\x00',
b'F152647300\x00\x00\x00\x00\x00\x00',
b'F152647310\x00\x00\x00\x00\x00\x00',
b'F152647414\x00\x00\x00\x00\x00\x00',
b'F152647415\x00\x00\x00\x00\x00\x00',
b'F152647416\x00\x00\x00\x00\x00\x00',
b'F152647417\x00\x00\x00\x00\x00\x00',
b'F152647470\x00\x00\x00\x00\x00\x00',
b'F152647490\x00\x00\x00\x00\x00\x00',
b'F152647684\x00\x00\x00\x00\x00\x00',
b'F152647862\x00\x00\x00\x00\x00\x00',
b'F152647863\x00\x00\x00\x00\x00\x00',
b'F152647864\x00\x00\x00\x00\x00\x00',
b'F152647865\x00\x00\x00\x00\x00\x00',
b'F152647470\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514702300\x00\x00\x00\x00',
b'881514703100\x00\x00\x00\x00',
b'881514704100\x00\x00\x00\x00',
b'881514706000\x00\x00\x00\x00',
b'881514706100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4201200\x00\x00\x00\x00',
b'8646F4701300\x00\x00\x00\x00',
b'8646F4702001\x00\x00\x00\x00',
b'8646F4702100\x00\x00\x00\x00',
b'8646F4702200\x00\x00\x00\x00',
b'8646F4705000\x00\x00\x00\x00',
b'8646F4705200\x00\x00\x00\x00',
],
},
CAR.PRIUS_2019: {
(Ecu.engine, 0x700, None): [
b'\x028966347A5000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x038966347B6000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B47060\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152647470\x00\x00\x00\x00\x00\x00',
b'F152647290\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514706000\x00\x00\x00\x00',
b'881514706100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4705200\x00\x00\x00\x00',
],
},
CAR.RAV4: {
(Ecu.engine, 0x7e0, None): [
b'\x02342Q1000\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q1100\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q1200\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q1300\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2000\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2100\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2200\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q4000\x00\x00\x00\x00\x00\x00\x00\x0054215000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42082\x00\x00\x00\x00\x00\x00',
b'8965B42083\x00\x00\x00\x00\x00\x00',
b'8965B42063\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F15260R102\x00\x00\x00\x00\x00\x00',
b'F15260R103\x00\x00\x00\x00\x00\x00',
b'F152642493\x00\x00\x00\x00\x00\x00',
b'F152642492\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514201200\x00\x00\x00\x00',
b'881514201300\x00\x00\x00\x00',
b'881514201400\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4201200\x00\x00\x00\x00',
b'8646F4202001\x00\x00\x00\x00',
b'8646F4202100\x00\x00\x00\x00',
],
},
CAR.RAV4H: {
(Ecu.engine, 0x7e0, None): [
b'\x02342N9000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342N9100\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342P0000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2000\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42103\x00\x00\x00\x00\x00\x00',
b'8965B42162\x00\x00\x00\x00\x00\x00',
b'8965B42163\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152642090\x00\x00\x00\x00\x00\x00',
b'F152642120\x00\x00\x00\x00\x00\x00',
b'F152642400\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514202200\x00\x00\x00\x00',
b'881514202300\x00\x00\x00\x00',
b'881514202400\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4201100\x00\x00\x00\x00',
b'8646F4201200\x00\x00\x00\x00',
b'8646F4202001\x00\x00\x00\x00',
b'8646F4202100\x00\x00\x00\x00',
b'8646F4204000\x00\x00\x00\x00',
],
},
CAR.RAV4_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630R58000\x00\x00\x00\x00',
b'\x018966342E2000\x00\x00\x00\x00',
b'\x018966342M8000\x00\x00\x00\x00',
b'\x018966342T1000\x00\x00\x00\x00',
b'\x018966342T6000\x00\x00\x00\x00',
b'\x018966342T9000\x00\x00\x00\x00',
b'\x018966342U4000\x00\x00\x00\x00',
b'\x018966342V3100\x00\x00\x00\x00',
b'\x018966342V3200\x00\x00\x00\x00',
b'\x018966342X5000\x00\x00\x00\x00',
b'\x01896634A05000\x00\x00\x00\x00',
b'\x01896634A19000\x00\x00\x00\x00',
b'\x01896634A19100\x00\x00\x00\x00',
b'\x01896634A20000\x00\x00\x00\x00',
b'\x01896634A22000\x00\x00\x00\x00',
b'\x018966342U4000\x00\x00\x00\x00',
b'\x01F152642551\x00\x00\x00\x00\x00\x00',
b'\x028966342T0000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x028966342Y8000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x02896634A18000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152642520\x00\x00\x00\x00\x00\x00',
b'\x01F15260R210\x00\x00\x00\x00\x00\x00',
b'\x01F15260R220\x00\x00\x00\x00\x00\x00',
b'\x01F15260R300\x00\x00\x00\x00\x00\x00',
b'\x01F152642551\x00\x00\x00\x00\x00\x00',
b'\x01F152642561\x00\x00\x00\x00\x00\x00',
b'\x01F152642700\x00\x00\x00\x00\x00\x00',
b'\x01F152642710\x00\x00\x00\x00\x00\x00',
b'\x01F152642750\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42170\x00\x00\x00\x00\x00\x00',
b'8965B42171\x00\x00\x00\x00\x00\x00',
b'8965B42181\x00\x00\x00\x00\x00\x00',
b'\x028965B0R01200\x00\x00\x00\x008965B0R02200\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4203200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203300\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203400\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203500\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203700\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.RAV4H_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x018966342W8000\x00\x00\x00\x00',
b'\x018966342M5000\x00\x00\x00\x00',
b'\x018966342X6000\x00\x00\x00\x00',
b'\x028966342W4001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
b'\x02896634A23001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
b'\x02896634A23001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152642541\x00\x00\x00\x00\x00\x00',
b'F152642291\x00\x00\x00\x00\x00\x00',
b'F152642330\x00\x00\x00\x00\x00\x00',
b'F152642531\x00\x00\x00\x00\x00\x00',
b'F152642532\x00\x00\x00\x00\x00\x00',
b'F152642521\x00\x00\x00\x00\x00\x00',
b'F152642541\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42170\x00\x00\x00\x00\x00\x00',
b'8965B42171\x00\x00\x00\x00\x00\x00',
b'8965B42181\x00\x00\x00\x00\x00\x00',
b'\x028965B0R01200\x00\x00\x00\x008965B0R02200\x00\x00\x00\x00',
b'\x028965B0R01300\x00\x00\x00\x008965B0R02300\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4203700\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4203200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203300\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203400\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203500\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
CAR.LEXUS_ES_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x018966333T5100\x00\x00\x00\x00',
b'\x018966333T5000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [b'\x01F152606281\x00\x00\x00\x00\x00\x00'],
(Ecu.eps, 0x7a1, None): [b'8965B33252\x00\x00\x00\x00\x00\x00'],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F3303200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F33030D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
],
},
CAR.SIENNA: {
(Ecu.engine, 0x700, None): [
b'\x01896630832100\x00\x00\x00\x00',
b'\x01896630838000\x00\x00\x00\x00',
b'\x01896630838100\x00\x00\x00\x00',
b'\x01896630842000\x00\x00\x00\x00',
b'\x01896630851000\x00\x00\x00\x00',
b'\x01896630851100\x00\x00\x00\x00',
b'\x01896630852100\x00\x00\x00\x00',
b'\x01896630859000\x00\x00\x00\x00',
b'\x01896630860000\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B45070\x00\x00\x00\x00\x00\x00',
b'8965B45082\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152608130\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510801100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702200\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0801100\x00\x00\x00\x00',
],
},
CAR.LEXUS_ESH_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x028966333S8000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966333V4000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152633423\x00\x00\x00\x00\x00\x00',
b'F152633680\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33252\x00\x00\x00\x00\x00\x00',
b'8965B33590\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F33030D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F3304100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
CAR.LEXUS_NXH: {
(Ecu.engine, 0x7e0, None): [
b'\x0237882000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0237841000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152678160\x00\x00\x00\x00\x00\x00',
b'F152678170\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881517804300\x00\x00\x00\x00',
b'881517804100\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B78100\x00\x00\x00\x00\x00\x00',
b'8965B78060\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F7801300\x00\x00\x00\x00',
b'8646F7801100\x00\x00\x00\x00',
],
},
CAR.LEXUS_RX: {
(Ecu.engine, 0x700, None): [
b'\x01896630E37200\x00\x00\x00\x00',
b'\x01896630E41000\x00\x00\x00\x00',
b'\x01896630E41200\x00\x00\x00\x00',
b'\x01896630E37300\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648472\x00\x00\x00\x00\x00\x00',
b'F152648473\x00\x00\x00\x00\x00\x00',
b'F152648492\x00\x00\x00\x00\x00\x00',
b'F152648493\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514810300\x00\x00\x00\x00',
b'881514810500\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B0E011\x00\x00\x00\x00\x00\x00',
b'8965B0E012\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4701000\x00\x00\x00\x00',
b'8821F4701100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4801100\x00\x00\x00\x00',
b'8646F4801200\x00\x00\x00\x00',
b'8646F4802001\x00\x00\x00\x00',
b'8646F4802100\x00\x00\x00\x00',
],
},
CAR.LEXUS_RXH: {
(Ecu.engine, 0x7e0, None): [
b'\x02348N0000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348Q4000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348T1100\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348V6000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348Z3000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648361\x00\x00\x00\x00\x00\x00',
b'F152648501\x00\x00\x00\x00\x00\x00',
b'F152648502\x00\x00\x00\x00\x00\x00',
b'F152648504\x00\x00\x00\x00\x00\x00',
b'F152648A30\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514811300\x00\x00\x00\x00',
b'881514811500\x00\x00\x00\x00',
b'881514811700\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B0E011\x00\x00\x00\x00\x00\x00',
b'8965B0E012\x00\x00\x00\x00\x00\x00',
b'8965B48112\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4701000\x00\x00\x00\x00',
b'8821F4701100\x00\x00\x00\x00',
b'8821F4701200\x00\x00\x00\x00',
b'8821F4701300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4801200\x00\x00\x00\x00',
b'8646F4802100\x00\x00\x00\x00',
b'8646F4802200\x00\x00\x00\x00',
b'8646F4809000\x00\x00\x00\x00',
],
},
CAR.LEXUS_RX_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630EB0000\x00\x00\x00\x00',
b'\x01896630EA9000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F15260E031\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B48271\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4810100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
CAR.LEXUS_RXH_TSS2: {
(Ecu.engine, 0x7e0, None): [
b'\x02348X8000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648831\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B48271\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4810100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
}
STEER_THRESHOLD = 100
DBC = {
CAR.RAV4H: dbc_dict('toyota_rav4_hybrid_2017_pt_generated', 'toyota_adas'),
CAR.RAV4: dbc_dict('toyota_rav4_2017_pt_generated', 'toyota_adas'),
CAR.PRIUS: dbc_dict('toyota_prius_2017_pt_generated', 'toyota_adas'),
CAR.PRIUS_2019: dbc_dict('toyota_prius_2017_pt_generated', 'toyota_adas'),
CAR.COROLLA: dbc_dict('toyota_corolla_2017_pt_generated', 'toyota_adas'),
CAR.COROLLA_2015: dbc_dict('toyota_corolla_2017_pt_generated', 'toyota_adas'),
CAR.LEXUS_RX: dbc_dict('lexus_rx_350_2016_pt_generated', 'toyota_adas'),
CAR.LEXUS_RXH: dbc_dict('lexus_rx_hybrid_2017_pt_generated', 'toyota_adas'),
CAR.LEXUS_RX_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_RXH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.CHR: dbc_dict('toyota_nodsu_pt_generated', 'toyota_adas'),
CAR.CHRH: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_adas'),
CAR.CAMRY: dbc_dict('toyota_nodsu_pt_generated', 'toyota_adas'),
CAR.CAMRYH: dbc_dict('toyota_camry_hybrid_2018_pt_generated', 'toyota_adas'),
CAR.HIGHLANDER: dbc_dict('toyota_highlander_2017_pt_generated', 'toyota_adas'),
CAR.HIGHLANDER_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.HIGHLANDERH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.HIGHLANDERH: dbc_dict('toyota_highlander_hybrid_2018_pt_generated', 'toyota_adas'),
CAR.HIGHLANDERH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.AVALON: dbc_dict('toyota_avalon_2017_pt_generated', 'toyota_adas'),
CAR.RAV4_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.RAV4H_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.COROLLA_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.COROLLAH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_ES_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_ESH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.SIENNA: dbc_dict('toyota_sienna_xle_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_IS: dbc_dict('lexus_is_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_ISH: dbc_dict('lexus_is_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_CTH: dbc_dict('lexus_ct200h_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_NXH: dbc_dict('lexus_nx300h_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_UXH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
}
NO_DSU_CAR = [CAR.CHR, CAR.CHRH, CAR.CAMRY, CAR.CAMRYH, CAR.RAV4_TSS2, CAR.COROLLA_TSS2, CAR.COROLLAH_TSS2, CAR.LEXUS_ES_TSS2, CAR.LEXUS_ESH_TSS2, CAR.RAV4H_TSS2, CAR.LEXUS_RX_TSS2, CAR.HIGHLANDER_TSS2, CAR.LEXUS_UXH_TSS2, CAR.HIGHLANDERH_TSS2]
TSS2_CAR = [CAR.RAV4_TSS2, CAR.COROLLA_TSS2, CAR.COROLLAH_TSS2, CAR.LEXUS_ES_TSS2, CAR.LEXUS_ESH_TSS2, CAR.RAV4H_TSS2, CAR.LEXUS_RX_TSS2, CAR.HIGHLANDER_TSS2, CAR.LEXUS_UXH_TSS2, CAR.HIGHLANDERH_TSS2]
NO_STOP_TIMER_CAR = [CAR.RAV4H, CAR.HIGHLANDERH, CAR.HIGHLANDER, CAR.RAV4_TSS2, CAR.COROLLA_TSS2, CAR.COROLLAH_TSS2, CAR.LEXUS_ES_TSS2, CAR.LEXUS_ESH_TSS2, CAR.SIENNA, CAR.RAV4H_TSS2, CAR.LEXUS_RX_TSS2, CAR.HIGHLANDER_TSS2, CAR.LEXUS_UXH_TSS2, CAR.HIGHLANDERH_TSS2] # no resume button press required
| true | true |
1c4990d4c73ad0f202de3c760de5c414e397c7d8 | 636 | py | Python | paths.py | ChiLlx/Modified-3D-UNet-Pytorch | 0b3bb64bbfa7c5422b2fc85d0c4eb37c0773afec | [
"MIT"
] | 193 | 2018-04-17T07:28:16.000Z | 2022-03-23T00:34:43.000Z | paths.py | ChiLlx/Modified-3D-UNet-Pytorch | 0b3bb64bbfa7c5422b2fc85d0c4eb37c0773afec | [
"MIT"
] | 7 | 2018-07-16T01:54:23.000Z | 2020-12-04T06:55:02.000Z | paths.py | ChiLlx/Modified-3D-UNet-Pytorch | 0b3bb64bbfa7c5422b2fc85d0c4eb37c0773afec | [
"MIT"
] | 50 | 2018-08-13T23:06:19.000Z | 2021-12-09T09:42:13.000Z | raw_training_data_folder = "/media/pkao/Dataset/BraTS2018/training"
raw_validation_data_folder = "/media/pkao/Dataset/BraTS/2017/Brats17ValidationData"
raw_testing_data_folder = "/media/pkao/Datase/BraTS/2017/Brats17TestingData"
preprocessed_training_data_folder = "/media/pkao/Dataset/DeepLearningData/BraTS_2018_train"
preprocessed_validation_data_folder = "/media/pkao/Dataset/DeepLearningData/BraTS_2017_val"
preprocessed_testing_data_folder = "/media/pkao/Dataset/DeepLearningData/datasets/BraTS_2017_test"
#results_folder = "/home/pkao/PhD/results/BraTS_2017_lasagne/" # where to save the network training and validation files
| 63.6 | 121 | 0.849057 | raw_training_data_folder = "/media/pkao/Dataset/BraTS2018/training"
raw_validation_data_folder = "/media/pkao/Dataset/BraTS/2017/Brats17ValidationData"
raw_testing_data_folder = "/media/pkao/Datase/BraTS/2017/Brats17TestingData"
preprocessed_training_data_folder = "/media/pkao/Dataset/DeepLearningData/BraTS_2018_train"
preprocessed_validation_data_folder = "/media/pkao/Dataset/DeepLearningData/BraTS_2017_val"
preprocessed_testing_data_folder = "/media/pkao/Dataset/DeepLearningData/datasets/BraTS_2017_test"
| true | true |
1c499129a87226688813507ae4e22c4b9909a7a1 | 520 | py | Python | example/main.py | helioh2/pygame-universe | 94be98072ff1644480aaaab9692c8040223c3fb1 | [
"MIT"
] | 1 | 2018-04-04T17:55:35.000Z | 2018-04-04T17:55:35.000Z | example/main.py | helioh2/pygame-universe | 94be98072ff1644480aaaab9692c8040223c3fb1 | [
"MIT"
] | null | null | null | example/main.py | helioh2/pygame-universe | 94be98072ff1644480aaaab9692c8040223c3fb1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from funcoes import *
''' ================= '''
''' Main (Big Bang):
'''
''' Jogo -> Jogo '''
''' inicie o mundo com main(JOGO_INICIAL) '''
def main(inic):
big_bang(inic, tela=TELA,
frequencia=60,
quando_tick=mover_jogo,
desenhar=desenha_jogo,
quando_tecla=trata_tecla,
quando_solta_tecla=trata_solta_tecla,
modo_debug=True,
fonte_debug=15
)
main(JOGO_INICIAL)
| 20.8 | 50 | 0.532692 |
from funcoes import *
def main(inic):
big_bang(inic, tela=TELA,
frequencia=60,
quando_tick=mover_jogo,
desenhar=desenha_jogo,
quando_tecla=trata_tecla,
quando_solta_tecla=trata_solta_tecla,
modo_debug=True,
fonte_debug=15
)
main(JOGO_INICIAL)
| true | true |
1c4991c37149654c613707697ef34148eef8f639 | 2,815 | py | Python | git-auto-commit.py | electryone/git-auto-commit | 66dbd02d0d4696f7f12162784aff0c97318a1a74 | [
"MIT"
] | null | null | null | git-auto-commit.py | electryone/git-auto-commit | 66dbd02d0d4696f7f12162784aff0c97318a1a74 | [
"MIT"
] | null | null | null | git-auto-commit.py | electryone/git-auto-commit | 66dbd02d0d4696f7f12162784aff0c97318a1a74 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/9/29 22:49
# @Author : Huyd
# @Site :
# @File : git-auto-commit.py
# @Software: PyCharm
import datetime
import smtplib
import subprocess
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
import re
import schedule
import time
def send_mail(subject, message):
mail_host = "smtp.163.com" # 设置邮件服务器
mail_user = "[email protected]" # 用户名
mail_pass = "21897594" # 口令
sender = '[email protected]' # 发送邮件的邮箱
receivers = '[email protected]' # 接收邮件的邮箱,可设置为你的QQ邮箱或者其他邮箱,多个邮箱用,分隔开来
# 创建一个带附件的实例
message = MIMEText(message, 'plain', 'utf-8')
message['From'] = "[email protected]" # 邮件发送人
message['To'] = "[email protected]" # 邮件接收人
# subject = '测试监测结果' # 邮件主题
message['Subject'] = Header(subject, 'utf-8')
try:
smtpObj = smtplib.SMTP_SSL()
smtpObj.connect(mail_host, 465) # 25 为 SMTP 端口号
smtpObj.login(mail_user, mail_pass)
smtpObj.sendmail(sender, receivers, message.as_string())
print("邮件发送成功")
except smtplib.SMTPException:
print("Error: 无法发送邮件")
def job():
f = open('content.txt', 'a')
f.write(time.asctime(time.localtime(time.time())) + '\n')
date = datetime.datetime.today().isoformat()[0:10]
#status = subprocess.run(["git", "status"])
status = subprocess.run(["git", "status"],shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print(status)
print('**********start git add.**********')
gadd = subprocess.run(["git", "add", "."],shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print(gadd)
print('**********git add done.**********')
print('**********start git commit.**********')
gcom = subprocess.run(["git", "commit", "-m" + date],shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print(gcom)
print('**********git commit done.**********')
print('**********start git push.**********')
gpush = subprocess.run(["git", "push", "origin", "master"],shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print(gpush)
print('**********git push done.**********')
#send_mail("git a commit", str(date)) # 发送邮件
#time.sleep(61)
def main(h, m):
'''h表示设定的小时,m为设定的分钟'''
while True:
job()
break
# 判断是否达到设定时间,例如0:00
while True:
now = datetime.datetime.now()
print(now.hour, ' ', now.minute, ' ', now.microsecond)
# 到达设定时间,结束内循环
if now.hour == h and now.minute == m:
break
# 不到时间就等20秒之后再次检测
time.sleep(20)
# 做正事,一天做一次
#job()
print(time.asctime(time.localtime(time.time())))
main(23, 55)
| 32.356322 | 125 | 0.596803 | import datetime
import smtplib
import subprocess
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
import re
import schedule
import time
def send_mail(subject, message):
mail_host = "smtp.163.com" mail_user = "[email protected]" mail_pass = "21897594"
sender = '[email protected]' receivers = '[email protected]'
message = MIMEText(message, 'plain', 'utf-8')
message['From'] = "[email protected]" message['To'] = "[email protected]" message['Subject'] = Header(subject, 'utf-8')
try:
smtpObj = smtplib.SMTP_SSL()
smtpObj.connect(mail_host, 465) smtpObj.login(mail_user, mail_pass)
smtpObj.sendmail(sender, receivers, message.as_string())
print("邮件发送成功")
except smtplib.SMTPException:
print("Error: 无法发送邮件")
def job():
f = open('content.txt', 'a')
f.write(time.asctime(time.localtime(time.time())) + '\n')
date = datetime.datetime.today().isoformat()[0:10]
status = subprocess.run(["git", "status"],shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print(status)
print('**********start git add.**********')
gadd = subprocess.run(["git", "add", "."],shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print(gadd)
print('**********git add done.**********')
print('**********start git commit.**********')
gcom = subprocess.run(["git", "commit", "-m" + date],shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print(gcom)
print('**********git commit done.**********')
print('**********start git push.**********')
gpush = subprocess.run(["git", "push", "origin", "master"],shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print(gpush)
print('**********git push done.**********')
def main(h, m):
while True:
job()
break
while True:
now = datetime.datetime.now()
print(now.hour, ' ', now.minute, ' ', now.microsecond)
if now.hour == h and now.minute == m:
break
time.sleep(20)
print(time.asctime(time.localtime(time.time())))
main(23, 55)
| true | true |
1c499260da486610cee64c7a7a643e367163b5b2 | 4,052 | py | Python | objects/CSCG/_2d/forms/standard/_2_form/base/reconstruct.py | mathischeap/mifem | 3242e253fb01ca205a76568eaac7bbdb99e3f059 | [
"MIT"
] | 1 | 2020-10-14T12:48:35.000Z | 2020-10-14T12:48:35.000Z | objects/CSCG/_2d/forms/standard/_2_form/base/reconstruct.py | mathischeap/mifem | 3242e253fb01ca205a76568eaac7bbdb99e3f059 | [
"MIT"
] | null | null | null | objects/CSCG/_2d/forms/standard/_2_form/base/reconstruct.py | mathischeap/mifem | 3242e253fb01ca205a76568eaac7bbdb99e3f059 | [
"MIT"
] | null | null | null |
from screws.freeze.base import FrozenOnly
import numpy as np
class _2dCSCG_S2F_Reconstruct(FrozenOnly):
""""""
def __init__(self, f):
self._f_ = f
self._freeze_self_()
def __call__(self, xi, eta, ravel=False, i=None, vectorized=False, value_only=False):
"""
Reconstruct the standard 3-form.
Given ``xi``, ``eta`` and ``sigma``, we reconstruct the 3-form on ``meshgrid(xi, eta, sigma)``
in all elements.
:param xi: A 1d iterable object of floats between -1 and 1.
:param eta: A 1d iterable object of floats between -1 and 1.
:param i: (`default`:``None``) Do the reconstruction for ``#i`` element. if it is ``None``,
then do it for all elements.
:type i: int, None
:type xi: list, tuple, numpy.ndarray
:type eta: list, tuple, numpy.ndarray
:param bool ravel: (`default`:``False``) If we return 1d data?
:param vectorized:
:param value_only:
:returns: A tuple of outputs
1. (Dict[int, list]) -- :math:`x, y, z` coordinates.
2. (Dict[int, list]) -- Reconstructed values.
"""
f = self._f_
mesh = self._f_.mesh
xietasigma, basis = f.do.evaluate_basis_at_meshgrid(xi, eta)
#--- parse indices --------------------------------------------------
if i is None: # default, in all local mesh-elements.
INDICES = mesh.elements.indices
else:
if vectorized: vectorized = False
if isinstance(i ,int):
INDICES = [i, ]
else:
raise NotImplementedError()
#---- vectorized -----------------------------------------------
if vectorized:
assert INDICES == mesh.elements.indices, f"currently, vectorized computation only works" \
f"for full reconstruction."
det_iJ = mesh.elements.coordinate_transformation.vectorized.inverse_Jacobian(*xietasigma)
if len(INDICES) > 0:
if mesh.elements.IS.homogeneous_according_to_types_wrt_metric:
v = np.einsum('ij, ki, j -> kj', basis[0], f.cochain.array, det_iJ, optimize='greedy')
else:
v = np.einsum('ij, ki, kj -> kj', basis[0], f.cochain.array, det_iJ, optimize='greedy')
else:
v = None
if ravel:
pass
else:
raise NotImplementedError()
if value_only:
return (v,)
else:
raise Exception()
#----- non-vectorized ------------------------------------------------
else:
if value_only:
raise NotImplementedError()
else:
xyz = dict()
value = dict()
shape = [len(xi), len(eta)]
iJC = dict()
for i in INDICES:
element = mesh.elements[i]
typeWr2Metric = element.type_wrt_metric.mark
xyz[i] = element.coordinate_transformation.mapping(*xietasigma)
if typeWr2Metric in iJC:
basis_det_iJ = iJC[typeWr2Metric]
else:
det_iJ = element.coordinate_transformation.inverse_Jacobian(*xietasigma)
basis_det_iJ = basis[0] * det_iJ
if isinstance(typeWr2Metric, str):
iJC[typeWr2Metric] = basis_det_iJ
v = np.einsum('ij, i -> j', basis_det_iJ, f.cochain.local[i], optimize='greedy')
if ravel:
value[i] = [v,]
else:
# noinspection PyUnresolvedReferences
xyz[i] = [xyz[i][j].reshape(shape, order='F') for j in range(2)]
value[i] = [v.reshape(shape, order='F'),]
return xyz, value | 38.961538 | 107 | 0.488648 |
from screws.freeze.base import FrozenOnly
import numpy as np
class _2dCSCG_S2F_Reconstruct(FrozenOnly):
def __init__(self, f):
self._f_ = f
self._freeze_self_()
def __call__(self, xi, eta, ravel=False, i=None, vectorized=False, value_only=False):
f = self._f_
mesh = self._f_.mesh
xietasigma, basis = f.do.evaluate_basis_at_meshgrid(xi, eta)
if i is None: INDICES = mesh.elements.indices
else:
if vectorized: vectorized = False
if isinstance(i ,int):
INDICES = [i, ]
else:
raise NotImplementedError()
if vectorized:
assert INDICES == mesh.elements.indices, f"currently, vectorized computation only works" \
f"for full reconstruction."
det_iJ = mesh.elements.coordinate_transformation.vectorized.inverse_Jacobian(*xietasigma)
if len(INDICES) > 0:
if mesh.elements.IS.homogeneous_according_to_types_wrt_metric:
v = np.einsum('ij, ki, j -> kj', basis[0], f.cochain.array, det_iJ, optimize='greedy')
else:
v = np.einsum('ij, ki, kj -> kj', basis[0], f.cochain.array, det_iJ, optimize='greedy')
else:
v = None
if ravel:
pass
else:
raise NotImplementedError()
if value_only:
return (v,)
else:
raise Exception()
else:
if value_only:
raise NotImplementedError()
else:
xyz = dict()
value = dict()
shape = [len(xi), len(eta)]
iJC = dict()
for i in INDICES:
element = mesh.elements[i]
typeWr2Metric = element.type_wrt_metric.mark
xyz[i] = element.coordinate_transformation.mapping(*xietasigma)
if typeWr2Metric in iJC:
basis_det_iJ = iJC[typeWr2Metric]
else:
det_iJ = element.coordinate_transformation.inverse_Jacobian(*xietasigma)
basis_det_iJ = basis[0] * det_iJ
if isinstance(typeWr2Metric, str):
iJC[typeWr2Metric] = basis_det_iJ
v = np.einsum('ij, i -> j', basis_det_iJ, f.cochain.local[i], optimize='greedy')
if ravel:
value[i] = [v,]
else:
xyz[i] = [xyz[i][j].reshape(shape, order='F') for j in range(2)]
value[i] = [v.reshape(shape, order='F'),]
return xyz, value | true | true |
1c4992765c22841944c3a0022d4163faee833f72 | 750 | py | Python | scheduleSynchronizer/urls.py | 497022407/Shifts-manager | beccb63c8622c015a9a453f586d4c3bb5d5066b9 | [
"Apache-2.0"
] | null | null | null | scheduleSynchronizer/urls.py | 497022407/Shifts-manager | beccb63c8622c015a9a453f586d4c3bb5d5066b9 | [
"Apache-2.0"
] | null | null | null | scheduleSynchronizer/urls.py | 497022407/Shifts-manager | beccb63c8622c015a9a453f586d4c3bb5d5066b9 | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
# /
path('', views.home, name='home'),
path('guide', views.guide, name='guide'),
# TEMPORARY
path('signin', views.sign_in, name='signin'),
path('signout', views.sign_out, name='signout'),
path('calendar', views.calendar, name='calendar'),
path('shift', views.shift, name='shift'),
path('delete_incorrect_shifts', views.delete_incorrect_shifts,
name='delete_incorrect_shifts'),
path('search_function', views.search_function, name='search_function'),
path('delete_by_id', views.delete_by_id, name='delete_by_id'),
path('callback', views.callback, name='callback'),
path('calendar/new', views.newevent, name='newevent'),
]
| 32.608696 | 75 | 0.674667 | from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('guide', views.guide, name='guide'),
path('signin', views.sign_in, name='signin'),
path('signout', views.sign_out, name='signout'),
path('calendar', views.calendar, name='calendar'),
path('shift', views.shift, name='shift'),
path('delete_incorrect_shifts', views.delete_incorrect_shifts,
name='delete_incorrect_shifts'),
path('search_function', views.search_function, name='search_function'),
path('delete_by_id', views.delete_by_id, name='delete_by_id'),
path('callback', views.callback, name='callback'),
path('calendar/new', views.newevent, name='newevent'),
]
| true | true |
1c49941779b1860c8f46a3c4c6efc4ea5ed1d14a | 374 | py | Python | onlinecourse/migrations/0002_auto_20220120_0050.py | jalvaradoWD/final-cloud-app-with-database | 9d3f814b68f24343b48c336dd4464764e805f0a5 | [
"Apache-2.0"
] | null | null | null | onlinecourse/migrations/0002_auto_20220120_0050.py | jalvaradoWD/final-cloud-app-with-database | 9d3f814b68f24343b48c336dd4464764e805f0a5 | [
"Apache-2.0"
] | null | null | null | onlinecourse/migrations/0002_auto_20220120_0050.py | jalvaradoWD/final-cloud-app-with-database | 9d3f814b68f24343b48c336dd4464764e805f0a5 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.1.3 on 2022-01-20 00:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('onlinecourse', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='choice',
name='is_correct',
field=models.BooleanField(),
),
]
| 19.684211 | 47 | 0.588235 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('onlinecourse', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='choice',
name='is_correct',
field=models.BooleanField(),
),
]
| true | true |
1c49946e54bff8f5ded1a44c434d7ff67635335a | 7,120 | py | Python | veriloggen/types/ram.py | akmaru/veriloggen | 74f998139e8cf613f7703fa4cffd571bbf069bbc | [
"Apache-2.0"
] | null | null | null | veriloggen/types/ram.py | akmaru/veriloggen | 74f998139e8cf613f7703fa4cffd571bbf069bbc | [
"Apache-2.0"
] | null | null | null | veriloggen/types/ram.py | akmaru/veriloggen | 74f998139e8cf613f7703fa4cffd571bbf069bbc | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import print_function
import copy
import veriloggen.core.vtypes as vtypes
from veriloggen.core.module import Module
from . import util
def mkRAMDefinition(name, datawidth=32, addrwidth=10, numports=2,
initvals=None, sync=True, with_enable=False,
nocheck_initvals=False, ram_style=None):
m = Module(name)
clk = m.Input('CLK')
interfaces = []
for i in range(numports):
interface = RAMSlaveInterface(
m, name + '_%d' % i, datawidth, addrwidth, with_enable=with_enable)
if sync:
interface.delay_addr = m.Reg(name + '_%d_daddr' % i, addrwidth)
interfaces.append(interface)
if ram_style is not None:
m.EmbeddedCode(ram_style)
mem = m.Reg('mem', datawidth, 2**addrwidth)
if initvals is not None:
if not isinstance(initvals, (tuple, list)):
raise TypeError("initvals must be tuple or list, not '%s" %
str(type(initvals)))
base = 16
if not nocheck_initvals:
new_initvals = []
for initval in initvals:
if isinstance(initval, int):
new_initvals.append(
vtypes.Int(initval, datawidth, base=16))
elif isinstance(initval, vtypes.Int) and isinstance(initval.value, int):
v = copy.deepcopy(initval)
v.width = datawidth
v.base = base
new_initvals.append(v)
elif isinstance(initval, vtypes.Int) and isinstance(initval.value, str):
v = copy.deepcopy(initval)
v.width = datawidth
if v.base != 2 and v.base != 16:
raise ValueError('base must be 2 or 16')
base = v.base
new_initvals.append(v)
else:
raise TypeError("values of initvals must be int, not '%s" %
str(type(initval)))
initvals = new_initvals
if 2 ** addrwidth > len(initvals):
initvals.extend(
[vtypes.Int(0, datawidth, base=base)
for _ in range(2 ** addrwidth - len(initvals))])
m.Initial(
*[mem[i](initval) for i, initval in enumerate(initvals)]
)
for interface in interfaces:
body = [
vtypes.If(interface.wenable)(
mem[interface.addr](interface.wdata)
)]
if sync:
body.append(interface.delay_addr(interface.addr))
if with_enable:
body = vtypes.If(interface.enable)(*body)
m.Always(vtypes.Posedge(clk))(
body
)
if sync:
m.Assign(interface.rdata(mem[interface.delay_addr]))
else:
m.Assign(interface.rdata(mem[interface.addr]))
return m
class RAMInterface(object):
_I = 'Reg'
_O = 'Wire'
def __init__(self, m, name=None, datawidth=32, addrwidth=10,
itype=None, otype=None,
p_addr='addr', p_rdata='rdata',
p_wdata='wdata', p_wenable='wenable',
p_enable='enable',
with_enable=False, index=None):
if itype is None:
itype = self._I
if otype is None:
otype = self._O
self.m = m
name_addr = p_addr if name is None else '_'.join([name, p_addr])
name_rdata = p_rdata if name is None else '_'.join([name, p_rdata])
name_wdata = p_wdata if name is None else '_'.join([name, p_wdata])
name_wenable = (
p_wenable if name is None else '_'.join([name, p_wenable]))
if with_enable:
name_enable = (
p_enable if name is None else '_'.join([name, p_enable]))
if index is not None:
name_addr = name_addr + str(index)
name_rdata = name_rdata + str(index)
name_wdata = name_wdata + str(index)
name_wenable = name_wenable + str(index)
if with_enable:
name_enable = name_enable + str(index)
self.addr = util.make_port(m, itype, name_addr, addrwidth, initval=0)
self.rdata = util.make_port(m, otype, name_rdata, datawidth, initval=0)
self.wdata = util.make_port(m, itype, name_wdata, datawidth, initval=0)
self.wenable = util.make_port(m, itype, name_wenable, initval=0)
if with_enable:
self.enable = util.make_port(m, itype, name_enable, initval=0)
def connect(self, targ):
self.addr.connect(targ.addr)
targ.rdata.connect(self.rdata)
self.wdata.connect(targ.wdata)
self.wenable.connect(targ.wenable)
if hasattr(self, 'enable'):
if hasattr(targ, 'enable'):
self.enable.connect(targ.enable)
else:
self.enable.connect(1)
else:
if hasattr(targ, 'enable'):
raise ValueError('no enable port')
class RAMSlaveInterface(RAMInterface):
_I = 'Input'
_O = 'Output'
class RAMMasterInterface(RAMInterface):
_I = 'Output'
_O = 'Input'
class _RAM_RTL(object):
def __init__(self, m, name, clk,
datawidth=32, addrwidth=10, numports=1,
initvals=None, sync=True, with_enable=False):
self.m = m
self.name = name
self.clk = clk
self.with_enable = with_enable
self.interfaces = [RAMInterface(m, name + '_%d' % i, datawidth, addrwidth,
itype='Wire', otype='Wire', with_enable=with_enable)
for i in range(numports)]
ram_def = mkRAMDefinition(name, datawidth, addrwidth, numports,
initvals, sync, with_enable)
self.m.Instance(ram_def, name,
params=(), ports=m.connect_ports(ram_def))
def connect(self, port, addr, wdata, wenable, enable=None):
self.m.Assign(self.interfaces[port].addr(addr))
self.m.Assign(self.interfaces[port].wdata(wdata))
self.m.Assign(self.interfaces[port].wenable(wenable))
if self.with_enable:
self.m.Assign(self.interfaces[port].enable(enable))
def rdata(self, port):
return self.interfaces[port].rdata
class SyncRAM(_RAM_RTL):
def __init__(self, m, name, clk,
datawidth=32, addrwidth=10, numports=1,
initvals=None, with_enable=False):
_RAM_RTL.__init__(self, m, name, clk,
datawidth, addrwidth, numports,
initvals, sync=True, with_enable=with_enable)
class AsyncRAM(_RAM_RTL):
def __init__(self, m, name, clk,
datawidth=32, addrwidth=10, numports=1,
initvals=None, with_enable=False):
_RAM_RTL.__init__(self, m, name, clk,
datawidth, addrwidth, numports,
initvals, sync=False)
| 33.584906 | 92 | 0.556039 | from __future__ import absolute_import
from __future__ import print_function
import copy
import veriloggen.core.vtypes as vtypes
from veriloggen.core.module import Module
from . import util
def mkRAMDefinition(name, datawidth=32, addrwidth=10, numports=2,
initvals=None, sync=True, with_enable=False,
nocheck_initvals=False, ram_style=None):
m = Module(name)
clk = m.Input('CLK')
interfaces = []
for i in range(numports):
interface = RAMSlaveInterface(
m, name + '_%d' % i, datawidth, addrwidth, with_enable=with_enable)
if sync:
interface.delay_addr = m.Reg(name + '_%d_daddr' % i, addrwidth)
interfaces.append(interface)
if ram_style is not None:
m.EmbeddedCode(ram_style)
mem = m.Reg('mem', datawidth, 2**addrwidth)
if initvals is not None:
if not isinstance(initvals, (tuple, list)):
raise TypeError("initvals must be tuple or list, not '%s" %
str(type(initvals)))
base = 16
if not nocheck_initvals:
new_initvals = []
for initval in initvals:
if isinstance(initval, int):
new_initvals.append(
vtypes.Int(initval, datawidth, base=16))
elif isinstance(initval, vtypes.Int) and isinstance(initval.value, int):
v = copy.deepcopy(initval)
v.width = datawidth
v.base = base
new_initvals.append(v)
elif isinstance(initval, vtypes.Int) and isinstance(initval.value, str):
v = copy.deepcopy(initval)
v.width = datawidth
if v.base != 2 and v.base != 16:
raise ValueError('base must be 2 or 16')
base = v.base
new_initvals.append(v)
else:
raise TypeError("values of initvals must be int, not '%s" %
str(type(initval)))
initvals = new_initvals
if 2 ** addrwidth > len(initvals):
initvals.extend(
[vtypes.Int(0, datawidth, base=base)
for _ in range(2 ** addrwidth - len(initvals))])
m.Initial(
*[mem[i](initval) for i, initval in enumerate(initvals)]
)
for interface in interfaces:
body = [
vtypes.If(interface.wenable)(
mem[interface.addr](interface.wdata)
)]
if sync:
body.append(interface.delay_addr(interface.addr))
if with_enable:
body = vtypes.If(interface.enable)(*body)
m.Always(vtypes.Posedge(clk))(
body
)
if sync:
m.Assign(interface.rdata(mem[interface.delay_addr]))
else:
m.Assign(interface.rdata(mem[interface.addr]))
return m
class RAMInterface(object):
_I = 'Reg'
_O = 'Wire'
def __init__(self, m, name=None, datawidth=32, addrwidth=10,
itype=None, otype=None,
p_addr='addr', p_rdata='rdata',
p_wdata='wdata', p_wenable='wenable',
p_enable='enable',
with_enable=False, index=None):
if itype is None:
itype = self._I
if otype is None:
otype = self._O
self.m = m
name_addr = p_addr if name is None else '_'.join([name, p_addr])
name_rdata = p_rdata if name is None else '_'.join([name, p_rdata])
name_wdata = p_wdata if name is None else '_'.join([name, p_wdata])
name_wenable = (
p_wenable if name is None else '_'.join([name, p_wenable]))
if with_enable:
name_enable = (
p_enable if name is None else '_'.join([name, p_enable]))
if index is not None:
name_addr = name_addr + str(index)
name_rdata = name_rdata + str(index)
name_wdata = name_wdata + str(index)
name_wenable = name_wenable + str(index)
if with_enable:
name_enable = name_enable + str(index)
self.addr = util.make_port(m, itype, name_addr, addrwidth, initval=0)
self.rdata = util.make_port(m, otype, name_rdata, datawidth, initval=0)
self.wdata = util.make_port(m, itype, name_wdata, datawidth, initval=0)
self.wenable = util.make_port(m, itype, name_wenable, initval=0)
if with_enable:
self.enable = util.make_port(m, itype, name_enable, initval=0)
def connect(self, targ):
self.addr.connect(targ.addr)
targ.rdata.connect(self.rdata)
self.wdata.connect(targ.wdata)
self.wenable.connect(targ.wenable)
if hasattr(self, 'enable'):
if hasattr(targ, 'enable'):
self.enable.connect(targ.enable)
else:
self.enable.connect(1)
else:
if hasattr(targ, 'enable'):
raise ValueError('no enable port')
class RAMSlaveInterface(RAMInterface):
_I = 'Input'
_O = 'Output'
class RAMMasterInterface(RAMInterface):
_I = 'Output'
_O = 'Input'
class _RAM_RTL(object):
def __init__(self, m, name, clk,
datawidth=32, addrwidth=10, numports=1,
initvals=None, sync=True, with_enable=False):
self.m = m
self.name = name
self.clk = clk
self.with_enable = with_enable
self.interfaces = [RAMInterface(m, name + '_%d' % i, datawidth, addrwidth,
itype='Wire', otype='Wire', with_enable=with_enable)
for i in range(numports)]
ram_def = mkRAMDefinition(name, datawidth, addrwidth, numports,
initvals, sync, with_enable)
self.m.Instance(ram_def, name,
params=(), ports=m.connect_ports(ram_def))
def connect(self, port, addr, wdata, wenable, enable=None):
self.m.Assign(self.interfaces[port].addr(addr))
self.m.Assign(self.interfaces[port].wdata(wdata))
self.m.Assign(self.interfaces[port].wenable(wenable))
if self.with_enable:
self.m.Assign(self.interfaces[port].enable(enable))
def rdata(self, port):
return self.interfaces[port].rdata
class SyncRAM(_RAM_RTL):
def __init__(self, m, name, clk,
datawidth=32, addrwidth=10, numports=1,
initvals=None, with_enable=False):
_RAM_RTL.__init__(self, m, name, clk,
datawidth, addrwidth, numports,
initvals, sync=True, with_enable=with_enable)
class AsyncRAM(_RAM_RTL):
def __init__(self, m, name, clk,
datawidth=32, addrwidth=10, numports=1,
initvals=None, with_enable=False):
_RAM_RTL.__init__(self, m, name, clk,
datawidth, addrwidth, numports,
initvals, sync=False)
| true | true |
1c499523c6731619a57785345e99096f3cd43458 | 3,402 | py | Python | tapiriik/services/ratelimiting.py | Decathlon/exercisync | e9df9d4f2210fff8cfc8b34e2e5f9d09d84bddef | [
"Apache-2.0"
] | 11 | 2019-08-05T15:38:25.000Z | 2022-03-12T09:50:02.000Z | tapiriik/services/ratelimiting.py | Decathlon/exercisync | e9df9d4f2210fff8cfc8b34e2e5f9d09d84bddef | [
"Apache-2.0"
] | 31 | 2019-03-05T20:38:11.000Z | 2022-03-21T09:41:23.000Z | tapiriik/services/ratelimiting.py | Decathlon/exercisync | e9df9d4f2210fff8cfc8b34e2e5f9d09d84bddef | [
"Apache-2.0"
] | 8 | 2019-03-05T08:20:07.000Z | 2021-08-18T08:20:17.000Z | from tapiriik.database import ratelimit as rl_db, redis
from tapiriik.settings import _GLOBAL_LOGGER
from pymongo.read_preferences import ReadPreference
from datetime import datetime, timedelta
import math
import logging
class RateLimitExceededException(Exception):
pass
class RateLimit:
def Limit(key):
current_limits = rl_db.limits.find({"Key": key}, {"Max": 1, "Count": 1})
for limit in current_limits:
if limit["Max"] < limit["Count"]:
# We can't continue without exceeding this limit
# Don't want to halt the synchronization worker to wait for 15min-1 hour
# So...
raise RateLimitExceededException()
_GLOBAL_LOGGER.info("Adding 1 to count")
rl_db.limits.update_many({"Key": key}, {"$inc": {"Count": 1}})
def Refresh(key, limits):
# Limits is in format [(timespan, max-count),...]
# The windows are anchored at midnight
# The timespan is used to uniquely identify limit instances between runs
midnight = datetime.combine(datetime.utcnow().date(), datetime.min.time())
time_since_midnight = (datetime.utcnow() - midnight)
rl_db.limits.delete_many({"Key": key, "Expires": {"$lt": datetime.utcnow()}})
current_limits = list(rl_db.limits.with_options(read_preference=ReadPreference.PRIMARY).find({"Key": key}, {"Duration": 1}))
missing_limits = [x for x in limits if x[0].total_seconds() not in [limit["Duration"] for limit in current_limits]]
for limit in missing_limits:
window_start = midnight + timedelta(seconds=math.floor(time_since_midnight.total_seconds()/limit[0].total_seconds()) * limit[0].total_seconds())
window_end = window_start + limit[0]
rl_db.limits.insert({"Key": key, "Count": 0, "Duration": limit[0].total_seconds(), "Max": limit[1], "Expires": window_end})
class RedisRateLimit:
def IsOneRateLimitReached(rate_limited_services):
for svc in rate_limited_services:
for limit in svc.GlobalRateLimits:
limit_timedelta_seconds = int(limit[0].total_seconds())
limit_number = limit[1]
limit_key = svc.ID+":lm:"+str(limit_timedelta_seconds)
actual_limit = redis.get(limit_key)
if actual_limit != None:
if int(actual_limit.decode('utf-8')) >= (limit_number * 0.95):
return True
return False
def Limit(key, limits):
for limit in limits:
limit_timedelta_seconds = int(limit[0].total_seconds())
limit_number = limit[1]
limit_key = key+":lm:"+str(limit_timedelta_seconds)
# Increasing the key by one
# If it does not exist or it has expired it will be set to one
# The incr function of redis is atomic and "SHOULD" not create race condition
actual_rl = redis.incr(limit_key)
# The key expires at time is determined by :
# - now in UNIX epoch floor divided by limit_timedelta_seconds
# - added by one to simulate a ceil division
# - multiplied by limit_timedelta_seconds to set this back in an UNIX epoch timestamp
redis.expireat(limit_key, ((int(datetime.now().strftime('%s')) // limit_timedelta_seconds)+1) * limit_timedelta_seconds)
# Well, here we might loose 1 api call but this is for security purpose if an unexpected race condition happens
# Better safe than sorry :)
if actual_rl >= limit_number-1:
raise RateLimitExceededException("Actual rate limit : %s / Max rate limit : %s" % (actual_rl, limit_number))
_GLOBAL_LOGGER.info("Adding 1 to %s %s limit count. It is now %s/%s" % (key, limit_number, actual_rl, limit_number)) | 46.60274 | 147 | 0.726925 | from tapiriik.database import ratelimit as rl_db, redis
from tapiriik.settings import _GLOBAL_LOGGER
from pymongo.read_preferences import ReadPreference
from datetime import datetime, timedelta
import math
import logging
class RateLimitExceededException(Exception):
pass
class RateLimit:
def Limit(key):
current_limits = rl_db.limits.find({"Key": key}, {"Max": 1, "Count": 1})
for limit in current_limits:
if limit["Max"] < limit["Count"]:
# Don't want to halt the synchronization worker to wait for 15min-1 hour
raise RateLimitExceededException()
_GLOBAL_LOGGER.info("Adding 1 to count")
rl_db.limits.update_many({"Key": key}, {"$inc": {"Count": 1}})
def Refresh(key, limits):
midnight = datetime.combine(datetime.utcnow().date(), datetime.min.time())
time_since_midnight = (datetime.utcnow() - midnight)
rl_db.limits.delete_many({"Key": key, "Expires": {"$lt": datetime.utcnow()}})
current_limits = list(rl_db.limits.with_options(read_preference=ReadPreference.PRIMARY).find({"Key": key}, {"Duration": 1}))
missing_limits = [x for x in limits if x[0].total_seconds() not in [limit["Duration"] for limit in current_limits]]
for limit in missing_limits:
window_start = midnight + timedelta(seconds=math.floor(time_since_midnight.total_seconds()/limit[0].total_seconds()) * limit[0].total_seconds())
window_end = window_start + limit[0]
rl_db.limits.insert({"Key": key, "Count": 0, "Duration": limit[0].total_seconds(), "Max": limit[1], "Expires": window_end})
class RedisRateLimit:
def IsOneRateLimitReached(rate_limited_services):
for svc in rate_limited_services:
for limit in svc.GlobalRateLimits:
limit_timedelta_seconds = int(limit[0].total_seconds())
limit_number = limit[1]
limit_key = svc.ID+":lm:"+str(limit_timedelta_seconds)
actual_limit = redis.get(limit_key)
if actual_limit != None:
if int(actual_limit.decode('utf-8')) >= (limit_number * 0.95):
return True
return False
def Limit(key, limits):
for limit in limits:
limit_timedelta_seconds = int(limit[0].total_seconds())
limit_number = limit[1]
limit_key = key+":lm:"+str(limit_timedelta_seconds)
actual_rl = redis.incr(limit_key)
redis.expireat(limit_key, ((int(datetime.now().strftime('%s')) // limit_timedelta_seconds)+1) * limit_timedelta_seconds)
if actual_rl >= limit_number-1:
raise RateLimitExceededException("Actual rate limit : %s / Max rate limit : %s" % (actual_rl, limit_number))
_GLOBAL_LOGGER.info("Adding 1 to %s %s limit count. It is now %s/%s" % (key, limit_number, actual_rl, limit_number)) | true | true |
1c4995509deb8fbfcad4283c6a2e9e2fcf5fef57 | 13 | py | Python | ProximityScore/ProximityScore.py | IndyMPO/IndyGeoprocessingTools | 968f9befc37252e065e8d8085c0d10f17a871152 | [
"Apache-2.0"
] | null | null | null | ProximityScore/ProximityScore.py | IndyMPO/IndyGeoprocessingTools | 968f9befc37252e065e8d8085c0d10f17a871152 | [
"Apache-2.0"
] | 3 | 2016-08-30T16:10:20.000Z | 2016-09-06T15:32:44.000Z | ProximityScore/ProximityScore.py | IndyMPO/IndyGeoprocessingTools | 968f9befc37252e065e8d8085c0d10f17a871152 | [
"Apache-2.0"
] | null | null | null | import arcpy
| 6.5 | 12 | 0.846154 | import arcpy
| true | true |
1c49969fcd2408de0311767f86ec53448a5425dc | 80 | py | Python | grokproject/__init__.py | zopefoundation/grokproject | 78d00bded86dbc1cf8ed2f561c8221eda7e68e7a | [
"ZPL-2.1"
] | 4 | 2015-12-05T05:47:56.000Z | 2017-08-22T13:45:02.000Z | grokproject/__init__.py | zopefoundation/grokproject | 78d00bded86dbc1cf8ed2f561c8221eda7e68e7a | [
"ZPL-2.1"
] | 12 | 2015-12-03T11:58:01.000Z | 2018-01-23T13:29:25.000Z | grokproject/__init__.py | zopefoundation/grokproject | 78d00bded86dbc1cf8ed2f561c8221eda7e68e7a | [
"ZPL-2.1"
] | 5 | 2016-03-21T10:23:36.000Z | 2020-09-27T02:47:31.000Z | from grokproject.templates import GrokProject
from grokproject.main import main
| 26.666667 | 45 | 0.875 | from grokproject.templates import GrokProject
from grokproject.main import main
| true | true |
1c4996ec7c2dfcc43f4cb7feb849a3a3828a477f | 858 | py | Python | Virtualenv/Env/src/GoTravel/Contact/migrations/0001_initial.py | Anoop01234/Go-Travel | aa91f1a4ce7e7ed78de8eadc55e6a25d1a73bdd8 | [
"MIT"
] | null | null | null | Virtualenv/Env/src/GoTravel/Contact/migrations/0001_initial.py | Anoop01234/Go-Travel | aa91f1a4ce7e7ed78de8eadc55e6a25d1a73bdd8 | [
"MIT"
] | null | null | null | Virtualenv/Env/src/GoTravel/Contact/migrations/0001_initial.py | Anoop01234/Go-Travel | aa91f1a4ce7e7ed78de8eadc55e6a25d1a73bdd8 | [
"MIT"
] | 1 | 2021-12-21T17:27:34.000Z | 2021-12-21T17:27:34.000Z | # Generated by Django 3.0.7 on 2020-06-23 02:13
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstname', models.CharField(max_length=50)),
('lastname', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
('subject', models.CharField(max_length=50)),
('message', models.TextField()),
],
options={
'verbose_name': 'Contact',
'verbose_name_plural': 'Contacts',
},
),
]
| 28.6 | 114 | 0.538462 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstname', models.CharField(max_length=50)),
('lastname', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
('subject', models.CharField(max_length=50)),
('message', models.TextField()),
],
options={
'verbose_name': 'Contact',
'verbose_name_plural': 'Contacts',
},
),
]
| true | true |
1c4997d7ab42517d38d4ecafb5aa2ac189d46bdc | 1,885 | py | Python | inbm/cloudadapter-agent/tests/unit/test_cloudadapter.py | ahameedx/intel-inb-manageability | aca445fa4cef0b608e6e88e74476547e10c06073 | [
"Apache-2.0"
] | 5 | 2021-12-13T21:19:31.000Z | 2022-01-18T18:29:43.000Z | inbm/cloudadapter-agent/tests/unit/test_cloudadapter.py | ahameedx/intel-inb-manageability | aca445fa4cef0b608e6e88e74476547e10c06073 | [
"Apache-2.0"
] | 45 | 2021-12-30T17:21:09.000Z | 2022-03-29T22:47:32.000Z | inbm/cloudadapter-agent/tests/unit/test_cloudadapter.py | ahameedx/intel-inb-manageability | aca445fa4cef0b608e6e88e74476547e10c06073 | [
"Apache-2.0"
] | 4 | 2022-01-26T17:42:54.000Z | 2022-03-30T04:48:04.000Z | """
Unit tests for the cloudadapter file
"""
import unittest
import mock
import sys
import cloudadapter.cloudadapter as cloudadapter
from cloudadapter.cloudadapter import CloudAdapter
from cloudadapter.exceptions import BadConfigError
class TestCloudAdapter(unittest.TestCase):
@mock.patch('cloudadapter.cloudadapter.fileConfig', autospec=True)
@mock.patch('cloudadapter.cloudadapter.Waiter', autospec=True)
@mock.patch('cloudadapter.cloudadapter.Client', autospec=True)
def test_cloudadapter_starts_client_succeeds(self, MockClient, MockWaiter, mock_fileConfig):
cloudadapter.main()
assert MockClient.return_value.start.call_count == 1
@mock.patch('cloudadapter.cloudadapter.fileConfig', autospec=True)
@mock.patch('cloudadapter.cloudadapter.logging', autospec=True)
@mock.patch('cloudadapter.cloudadapter.Waiter', autospec=True)
@mock.patch('cloudadapter.cloudadapter.Client', autospec=True)
def test_cloudadapter_logs_and_exits_client_error_succeeds(
self, MockClient, MockWaiter, mock_logging, mock_fileConfig):
MockClient.side_effect = BadConfigError("Error!")
mock_logger = mock_logging.getLogger.return_value
cloudadapter.main()
if sys.version_info >= (3, 6):
assert mock_logger.error.call_count == 1
else:
assert mock_logger.error.call_count == 2
assert MockClient.return_value.start.call_count == 0
@mock.patch('cloudadapter.cloudadapter.fileConfig', autospec=True)
@mock.patch('cloudadapter.cloudadapter.Waiter', autospec=True)
@mock.patch('cloudadapter.cloudadapter.Client', autospec=True)
def test_service_name_prefixed_inbm(self, MockClient, MockWaiter, mock_fileConfig):
ca = CloudAdapter()
self.assertFalse(' ' in ca._svc_name_)
self.assertEquals(ca._svc_name_.split('-')[0], 'inbm')
| 39.270833 | 96 | 0.741114 |
import unittest
import mock
import sys
import cloudadapter.cloudadapter as cloudadapter
from cloudadapter.cloudadapter import CloudAdapter
from cloudadapter.exceptions import BadConfigError
class TestCloudAdapter(unittest.TestCase):
@mock.patch('cloudadapter.cloudadapter.fileConfig', autospec=True)
@mock.patch('cloudadapter.cloudadapter.Waiter', autospec=True)
@mock.patch('cloudadapter.cloudadapter.Client', autospec=True)
def test_cloudadapter_starts_client_succeeds(self, MockClient, MockWaiter, mock_fileConfig):
cloudadapter.main()
assert MockClient.return_value.start.call_count == 1
@mock.patch('cloudadapter.cloudadapter.fileConfig', autospec=True)
@mock.patch('cloudadapter.cloudadapter.logging', autospec=True)
@mock.patch('cloudadapter.cloudadapter.Waiter', autospec=True)
@mock.patch('cloudadapter.cloudadapter.Client', autospec=True)
def test_cloudadapter_logs_and_exits_client_error_succeeds(
self, MockClient, MockWaiter, mock_logging, mock_fileConfig):
MockClient.side_effect = BadConfigError("Error!")
mock_logger = mock_logging.getLogger.return_value
cloudadapter.main()
if sys.version_info >= (3, 6):
assert mock_logger.error.call_count == 1
else:
assert mock_logger.error.call_count == 2
assert MockClient.return_value.start.call_count == 0
@mock.patch('cloudadapter.cloudadapter.fileConfig', autospec=True)
@mock.patch('cloudadapter.cloudadapter.Waiter', autospec=True)
@mock.patch('cloudadapter.cloudadapter.Client', autospec=True)
def test_service_name_prefixed_inbm(self, MockClient, MockWaiter, mock_fileConfig):
ca = CloudAdapter()
self.assertFalse(' ' in ca._svc_name_)
self.assertEquals(ca._svc_name_.split('-')[0], 'inbm')
| true | true |
1c4997ef0d7b724a972be742252a3257c5688768 | 660 | py | Python | videochat/pyserver/manage.py | GenBInc/quickhellou | fb97f995904a8397c631a7256f86905c5b16a7c0 | [
"MIT"
] | 1 | 2022-03-31T13:18:41.000Z | 2022-03-31T13:18:41.000Z | videochat/pyserver/manage.py | GenBInc/quickhellou | fb97f995904a8397c631a7256f86905c5b16a7c0 | [
"MIT"
] | null | null | null | videochat/pyserver/manage.py | GenBInc/quickhellou | fb97f995904a8397c631a7256f86905c5b16a7c0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'qhv2.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.695652 | 73 | 0.677273 | import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'qhv2.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true | true |
1c4998e95581100279df8ab9274b91042ee7fc13 | 479 | py | Python | software/old_stuff/lm75.py | 84ace/esp32_smart_keezer | 48f13ab377de82d3eea2c7a769ff3f82ff48fdd9 | [
"MIT"
] | 1 | 2022-01-27T21:30:10.000Z | 2022-01-27T21:30:10.000Z | software/old_stuff/lm75.py | 84ace/esp32_smart_keezer | 48f13ab377de82d3eea2c7a769ff3f82ff48fdd9 | [
"MIT"
] | null | null | null | software/old_stuff/lm75.py | 84ace/esp32_smart_keezer | 48f13ab377de82d3eea2c7a769ff3f82ff48fdd9 | [
"MIT"
] | null | null | null | class LM75(object):
ADDRESS = 0x48 # LM75 bus address
FREQUENCY = 100000 # I2C bus frequency
def __init__(self):
pass
def get_output(self):
"""Return raw output from the LM75 sensor."""
output = self.i2c.readfrom(self.ADDRESS, 2)
return output[0], output[1]
def get_temp(self):
"""Return a tuple of (temp_c, point)."""
temp = self.get_output()
return int(temp[0]), floor(int(temp[1]) / 23) | 28.176471 | 53 | 0.580376 | class LM75(object):
ADDRESS = 0x48 FREQUENCY = 100000
def __init__(self):
pass
def get_output(self):
output = self.i2c.readfrom(self.ADDRESS, 2)
return output[0], output[1]
def get_temp(self):
temp = self.get_output()
return int(temp[0]), floor(int(temp[1]) / 23) | true | true |
1c499a6ec3f70ca7497e6aebb8e03ced8e3f52ca | 91 | py | Python | framenet/apps.py | henryyang42/lifelog_annotation | 586f44132508f59e97dda701bd5602d26b79a6f4 | [
"MIT"
] | null | null | null | framenet/apps.py | henryyang42/lifelog_annotation | 586f44132508f59e97dda701bd5602d26b79a6f4 | [
"MIT"
] | null | null | null | framenet/apps.py | henryyang42/lifelog_annotation | 586f44132508f59e97dda701bd5602d26b79a6f4 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class FramenetConfig(AppConfig):
name = 'framenet'
| 15.166667 | 33 | 0.758242 | from django.apps import AppConfig
class FramenetConfig(AppConfig):
name = 'framenet'
| true | true |
1c499b1f6bad0e0e9467d88057d523b47ce4dcbc | 1,301 | py | Python | notifications/utils/models/tests.py | Revibe-Music/core-services | 6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2 | [
"MIT"
] | 2 | 2022-01-24T23:30:18.000Z | 2022-01-26T00:21:22.000Z | notifications/utils/models/tests.py | Revibe-Music/core-services | 6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2 | [
"MIT"
] | null | null | null | notifications/utils/models/tests.py | Revibe-Music/core-services | 6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2 | [
"MIT"
] | null | null | null | """
Created: 10 June 2020
"""
from revibe._helpers.test import RevibeTestCase
from notifications.models import Notification
from notifications.utils.models.notification import create_notification_uuid, mark_email_as_read
# -----------------------------------------------------------------------------
class NotificationUtilsTestCase(RevibeTestCase):
def setUp(self):
self._get_application()
self._get_user()
self._get_external_event()
self._get_external_event_template()
def test_create_notification_uuid(self):
# create notifications with IDs
pass
def test_mark_email_as_read(self):
# create notification
notif_tracking_id = create_notification_uuid()
Notification.objects.create(event_template=self.external_event_template, user=self.user, read_id=notif_tracking_id)
# call function
mark_email_as_read(notif_tracking_id)
# check stuff
notif = Notification.objects.get(read_id=notif_tracking_id)
self.assertTrue(
notif.seen,
msg="Notification 'seen' field has not been changed"
)
self.assertEqual(
bool(notif.date_seen), True,
msg="The notification 'date_seen' field has not been updated"
)
| 28.282609 | 123 | 0.651038 |
from revibe._helpers.test import RevibeTestCase
from notifications.models import Notification
from notifications.utils.models.notification import create_notification_uuid, mark_email_as_read
class NotificationUtilsTestCase(RevibeTestCase):
def setUp(self):
self._get_application()
self._get_user()
self._get_external_event()
self._get_external_event_template()
def test_create_notification_uuid(self):
pass
def test_mark_email_as_read(self):
notif_tracking_id = create_notification_uuid()
Notification.objects.create(event_template=self.external_event_template, user=self.user, read_id=notif_tracking_id)
mark_email_as_read(notif_tracking_id)
notif = Notification.objects.get(read_id=notif_tracking_id)
self.assertTrue(
notif.seen,
msg="Notification 'seen' field has not been changed"
)
self.assertEqual(
bool(notif.date_seen), True,
msg="The notification 'date_seen' field has not been updated"
)
| true | true |
1c499da0501c9aa0f795e1fbd833c83eb2effa66 | 35,333 | py | Python | 2017/pointing_lmt2017.py | sao-eht/lmtscripts | bbf31c859a8e04c4c95d09679112de574baa2382 | [
"MIT"
] | 1 | 2017-04-11T05:05:24.000Z | 2017-04-11T05:05:24.000Z | 2017/pointing_lmt2017.py | sao-eht/lmtscripts | bbf31c859a8e04c4c95d09679112de574baa2382 | [
"MIT"
] | null | null | null | 2017/pointing_lmt2017.py | sao-eht/lmtscripts | bbf31c859a8e04c4c95d09679112de574baa2382 | [
"MIT"
] | null | null | null | import numpy
import matplotlib
import shutil
# matplotlib.use('agg')
from matplotlib import pylab, mlab, pyplot
import os
np = numpy
plt = pyplot
# plt.ion()
from argparse import Namespace
from glob import glob
import scipy.io
from scipy.signal import butter,lfilter,freqz
from scipy.interpolate import interp1d
#from scipy.ndimage.filters import minimum_filter1dar
from scipy.interpolate import UnivariateSpline
from matplotlib.mlab import griddata, psd
from datetime import datetime, timedelta
from scipy.optimize import fmin
#pathname = '../data_lmt/2017/vlbi1mm_*%06d*.nc'
pathname = '/data_lmt/vlbi1mm/vlbi1mm_*%06d*.nc'
def asec2rad(asec):
return asec * 2*np.pi / 3600. / 360.
def rad2asec(rad):
return rad * 3600. * 360. / (2*np.pi)
###################
def focus(first, last, plot=False, point=False, win_pointing=5., win_focusing=5., res=2., fwhm=11., channel='b', z0search=20., alphasearch=20., disk_diameter=0.):
plt.close('all')
if point:
print 'pointing'
out = pointing_lmt2017(first, last=last, plot=plot, win=win_pointing, res=res, fwhm=fwhm, channel=channel, disk_diameter=disk_diameter)
imax = np.argmax(out.snr.ravel())
(xmax, ymax) = (out.xx.ravel()[imax], out.yy.ravel()[imax])
else:
xmax = 0.
ymax = 0.
scans = range(first, last+1)
focus_subset(scans, x0=xmax, y0=ymax, plot=plot, win_pointing=win_pointing, win_focusing=win_focusing, res=res, fwhm=fwhm, channel=channel, z0search=z0search, alphasearch=alphasearch, disk_diameter=disk_diameter)
def focus_subset(scans, x0=0., y0=0., plot=False, win_pointing=50., win_focusing=5., res=2., fwhm=11., channel='b', z0search=20., alphasearch=20., disk_diameter=0.):
focusing_parabolicfit_lmt2017(scans, plot=plot, win=win_pointing, channel=channel, disk_diameter=disk_diameter)
focusing_matchfilter_lmt2017(scans, x0=x0, y0=y0, win=win_focusing, res=res, fwhm=fwhm, channel=channel, z0search=z0search, alphasearch=alphasearch)
###########################################################
# Based on scitools meshgrid
def meshgrid_lmtscripts(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
################### EXTRACT INFORMATION ###################
# extract 1mm total power data and fix some timing jitter issues
def extract(nc):
t0 = nc.variables['Data.Sky.Time'].data[0]
t = nc.variables['Data.Sky.Time'].data - t0
a = nc.variables['Data.Vlbi1mmTpm.APower'].data
b = nc.variables['Data.Vlbi1mmTpm.BPower'].data
x = nc.variables['Data.Sky.XPos'].data
y = nc.variables['Data.Sky.YPos'].data
i = ~nc.variables['Data.Dcs.BufPos'].data.astype(np.bool)
iobs = nc.variables['Header.Dcs.ObsNum'].data
if iobs >= 39150: # move to 50 Hz sampling to avoid ADC time glitches
fs = 50.
tnew = nc.variables['Data.Vlbi1mmTpm.Time'].data - nc.variables['Data.Vlbi1mmTpm.Time'].data[0]
idx = tnew <= t[-1]
a = a[idx]
b = b[idx]
tnew = tnew[idx]
elif iobs >= 38983: # kamal includes gap times
tnew = np.linspace(0, t[-1], len(t))
fs = 1./(t[1]-t[0])
adctime = nc.variables['Data.Vlbi1mmTpm.Time'].data - nc.variables['Data.Vlbi1mmTpm.Time'].data[0]
tnew = np.linspace(0, adctime[-1], len(adctime))
tnew = tnew[(tnew <= t[-1])]
a = interp1d(adctime, a)(tnew)
b = interp1d(adctime, b)(tnew)
elif iobs >= 38915: # 83.3 Hz becomes available but has gaps
fs = 1./0.012
tnew = np.arange(0, t[-1] + 1e-6, 1./fs)
a = interp1d(t, a)(tnew) # t is not a great varialbe to use, but all we have
b = interp1d(t, b)(tnew) # t is not a great varialbe to use, but all we have
else: # we are in 10 Hz data
fs = 10.
tnew = np.arange(0, t[-1] + 1e-6, .10)
a = interp1d(t, a)(tnew)
b = interp1d(t, b)(tnew)
x = interp1d(t, x)(tnew)
y = interp1d(t, y)(tnew)
i = interp1d(t, i)(tnew).astype(bool)
t = tnew
#iobs = nc.hdu.header.ObsNum[0]
source = ''.join(nc.variables['Header.Source.SourceName'])
return Namespace(t0=t0, t=t, a=a, b=b, x=x, y=y, i=i, iobs=iobs, source=source, fs=fs)
def rawopen(iobs):
from scipy.io import netcdf
filename = glob(pathname % iobs)[-1]
nc = netcdf.netcdf_file(filename)
# keep = dict((name.split('.')[-1], val.data) for (name, val) in nc.variables.items()
# if name[:4] == 'Data')
keep = Namespace()
keep.BufPos = nc.variables['Data.Dcs.BufPos'].data
keep.Time = nc.variables['Data.Sky.Time'].data
keep.XPos = nc.variables['Data.Sky.XPos'].data
keep.YPos = nc.variables['Data.Sky.YPos'].data
keep.APower = nc.variables['Data.Vlbi1mmTpm.APower'].data
keep.BPower = nc.variables['Data.Vlbi1mmTpm.BPower'].data
keep.nc = nc
if 'Data.Vlbi1mmTpm.Time' in nc.variables:
keep.ADCTime = nc.variables['Data.Vlbi1mmTpm.Time'].data
return keep
# patch together many scans and try to align in time (to the sample -- to keep X and Y)
def mfilt(scans):
aps = []
bps = []
xs = []
ys = []
ts = []
ss = []
fss = []
zs = []
ntaper = 100
for i in sorted(scans):
keep = rawopen(i)
scan = extract(keep.nc)
aps.append(detrend(scan.a, ntaper=ntaper))
bps.append(detrend(scan.b, ntaper=ntaper))
ts.append(scan.t + scan.t0)
xs.append(scan.x)
ys.append(scan.y)
ss.append(scan.source)
fss.append(scan.fs)
zs.append(keep.nc.variables['Header.M2.ZReq'].data)
flag = 1
for s1 in range(0,len(ss)):
for s2 in range(s1,len(ss)):
if (ss[s1] != ss[s2]):
flag = 0
print('WARNING: NOT THE SAME SOURCE!!')
print ss
break
s = ss[0]
fs = fss[0]
t0 = ts[0][0]
t1 = ts[-1][-1]
tnew = np.arange(t0, t1+1./fs, 1./fs)
idx = np.zeros(len(tnew), dtype=np.bool)
x = np.zeros(len(tnew))
y = np.zeros(len(tnew))
a = np.zeros(len(tnew))
b = np.zeros(len(tnew))
for i in range(len(ts)):
istart = int(np.round((ts[i][0] - t0) * 50.))
idx[istart:istart+len(ts[i])] = True
x[istart:istart+len(xs[i])] = xs[i][:len(x)-istart]
y[istart:istart+len(ys[i])] = ys[i][:len(y)-istart]
a[istart:istart+len(aps[i])] = aps[i][:len(a)-istart]
b[istart:istart+len(bps[i])] = bps[i][:len(b)-istart]
x[~idx] = np.inf
y[~idx] = np.inf
fillfrac = float(np.sum(idx)-ntaper*len(scans)) / len(tnew)
return Namespace(t=tnew, a=a, b=b, x=x, y=y, z=zs, idx=idx, source=s, fs=fs, fillfrac=fillfrac)
################### POINTING & FOCUSING ###################
def pointing_lmt2017(first, last=None, plot=True, win=10., res=0.5, fwhm=11., channel='b', disk_diameter=0.):
if last is None:
last = first
scans = range(first, last+1)
out = pointing_lmt2017_wrapper(scans, plot=plot, win=win, res=res, fwhm=fwhm, channel=channel, disk_diameter=disk_diameter)
return out
def pointing_lmt2017_wrapper(scans, plot=True, win=10., res=0.5, fwhm=11., channel='b', disk_diameter=0.):
############## pointing #############
z = mfilt(scans)
if win is None:
win = np.ceil(rad2asec(np.abs(np.min(z.x))))
# get the prob of a each location in the map being the point source and rename the variables
out = fitmodel_lmt2017(z, win=win, res=res, fwhm=fwhm, channel=channel, disk_diameter=disk_diameter)
(xxa, yya, snr, v, prob, pcum) = (out.xx, out.yy, out.snr, out.v, out.prob, out.pcum)
############## compute statistics #############
indices_3sigma = (pcum.ravel() < 0.99730020393673979)
voltages_3sigma = v.ravel()[indices_3sigma]
prob_3sigma = prob.ravel()[indices_3sigma]
# compute the expected value of the source voltage within 3 sigma
sourcevoltage_expvalue = np.sum(voltages_3sigma * prob_3sigma) / np.sum(prob_3sigma) # expectation value of v3s
# compute the variance of the source voltage within 3 sigma
voltage_squareddiff = (voltages_3sigma - sourcevoltage_expvalue)**2
sourcevoltage_stdev = np.sqrt(np.sum(voltage_squareddiff * prob_3sigma) / np.sum(prob_3sigma)) # std
############## plotting #############
if plot:
plt.figure()
plt.clf()
plt.axis(aspect=1.0)
plt.imshow(v, extent=(-win-res/2., win+res/2., -win-res/2., win+res/2.), interpolation='nearest', origin='lower', cmap='afmhot_r')
plt.plot(rad2asec(z.x), rad2asec(z.y), '-', color='violet', ls='--', lw=1.5, alpha=0.75)
h1 = plt.contour(xxa, yya, pcum, scipy.special.erf(np.array([0,1,2,3])/np.sqrt(2)), colors='cyan', linewidths=2, alpha=1.0)
imax = np.argmax(snr.ravel())
(xmax, ymax) = (xxa.ravel()[imax], yya.ravel()[imax])
plt.plot(xmax, ymax, 'y+', ms=11, mew=2)
plt.text(-0.99*win-res/2, 0.98*win+res/2, '[%.1f, %.1f]"' % (xmax, ymax), va='top', ha='left', color='black')
plt.text(.99*win+res/2, .98*win+res/2, '[%.1f $\pm$ %.1f mV]' % (sourcevoltage_expvalue, sourcevoltage_stdev), va='top', ha='right', color='black')
plt.title(z.source.strip() + ' scans:' + str(scans[0]) + '-' + str(scans[-1]))
plt.xlabel('$\Delta$x [arcsec]')
plt.ylabel('$\Delta$y [arcsec]')
plt.gca().set_aspect(1.0)
plt.gca().set_axis_bgcolor('white')
plt.grid(alpha=0.5)
plt.ylim(-win-res/2, win+res/2)
plt.xlim(-win-res/2, win+res/2)
plt.tight_layout()
############## return #############
return out
def focusing_parabolicfit_lmt2017(scans, plot=True, win=10., res=0.5, fwhm=11., channel='b', disk_diameter=0.):
vmeans = []
vstds = []
z_position = []
for scan in scans:
z_position.append(rawopen(scan).nc.variables['Header.M2.ZReq'].data)
out = pointing_lmt2017(scan, plot=plot, win=win, res=res, fwhm=fwhm, channel=channel, disk_diameter=disk_diameter)
(xxa, yya, snr, v, prob, cumulative_prob) = (out.xx, out.yy, out.snr, out.v, out.prob, out.pcum)
# KATIE: DOESN'T THIS ONLY WORK IF YOU ONLY HAVE 1 PEAK???
# get the indices of the points on the map within 3 sigma and extract those voltages and probablities
indices_3sigma = (cumulative_prob.ravel() < 0.99730020393673979)
voltages_3sigma = v.ravel()[indices_3sigma]
prob_3sigma = prob.ravel()[indices_3sigma]
# compute the expected value of the source voltage within 3 sigma
sourcevoltage_expvalue = np.sum(voltages_3sigma * prob_3sigma) / np.sum(prob_3sigma) # expectation value of v3s
# compute the variance of the source voltage within 3 sigma
voltage_squareddiff = (voltages_3sigma - sourcevoltage_expvalue)**2
sourcevoltage_stdev = np.sqrt(np.sum(voltage_squareddiff * prob_3sigma) / np.sum(prob_3sigma)) # std
vmeans.append(sourcevoltage_expvalue)
vstds.append(sourcevoltage_stdev)
plt.figure(); plt.errorbar(z_position, vmeans, yerr=vstds)
############ LEAST SQUARES FITTING ################
A = np.vstack([np.ones([1, len(z_position)]), np.array(z_position), np.array(z_position)**2]).T
meas = np.array(vmeans)
meas_cov = np.diag(np.array(vstds)**2)
polydeg = 2
scale = 1e5
polyparams_cov = scale*np.eye(polydeg+1)
polyparams_mean = np.zeros([polydeg+1])
intTerm = np.linalg.inv(meas_cov + np.dot(A, np.dot(polyparams_cov, A.T)))
est_polyparams = polyparams_mean + np.dot(polyparams_cov, np.dot(A.T, np.dot( intTerm, (meas - np.dot(A, polyparams_mean)) ) ) )
error_polyparams = polyparams_cov - np.dot(polyparams_cov, np.dot(A.T, np.dot(intTerm, np.dot(A, polyparams_cov)) ) )
#print 'estimated polyparams'
#print est_polyparams
#print 'estimated error'
#print error_polyparams
p = np.poly1d(est_polyparams[::-1])
znews = np.linspace(np.min(z_position), np.max(z_position),100)
pnews = p(znews)
plt.plot(znews, pnews)
imax = np.argmax(pnews)
z0 = znews[imax]
print 'estimated z0'
print z0
##################################################
#vmean_fit_flipped, stats = np.polynomial.polynomial.polyfit(np.array(z_position), np.array(vmeans), 2, rcond=None, full=True, w=1/np.array(vstds))
#vmean_fit = vmean_fit_flipped[::-1]
#p = np.poly1d(vmean_fit)
#znews = np.linspace(np.min(z_position), np.max(z_position),100)
#pnews = p(znews)
#plt.plot(znews, pnews)
#plt.text(-1.4, 210., '[estimated $\mathbf{z}_0$: %.3f $\pm$ %.3f]' % (z0, z0_approxstdev), va='top', ha='left', color='black')
plt.text(min(znews), min(pnews), '[peak $\mathbf{z}$: %.3f]' % (z0), va='top', ha='left', color='black')
plt.title('Focusing')
plt.xlabel('$\mathbf{z}$')
plt.ylabel('amplitude')
def focusing_matchfilter_lmt2017(scans, x0=0, y0=0, win=50., res=2., fwhm=11., channel='b', alpha_min=0., alpha_max=20., disk_diameter=0., z0search=20., alphasearch=20., plot=True):
all_scans = mfilt(scans)
if win is None:
win = np.ceil(rad2asec(np.abs(np.min(all_scans.x))))
zpos = []
xpos = []
ypos = []
meas_whitened = []
N_s = []
for scan_num in scans:
scan = mfilt(range(scan_num,scan_num+1))
meas = scan.__dict__[channel]
N_s.append(len(scan.t))
maxN = np.max(N_s)
# compute pad length for efficient FFTs
pad = 2**int(np.ceil(np.log2(maxN)))
for scan_num in scans:
scan = mfilt(range(scan_num,scan_num+1))
# place the measurements into meas_pad so that its padded to be of a power 2 length
meas = scan.__dict__[channel]
# original sequence length
N = len(scan.t)
if scan_num == scans[0]:
whiteningfac = whiten_measurements(all_scans, pad, channel=channel)
meas_pad = np.zeros(pad)
meas_pad[:N] = meas
# measurements of channel volatage in frequency domain
meas_rfft = np.fft.rfft(meas_pad) # N factor goes into fft, ifft = 1/N * ..
meas_rfft_conj = meas_rfft.conj();
meas_rfft_conj_white = meas_rfft_conj * whiteningfac
meas_whitened.append(meas_rfft_conj_white)
zpos.append(scan.z[0])
xpos.append(scan.x)
ypos.append(scan.y)
z0_min = min(zpos)
z0_max = max(zpos)
z0s = np.linspace(z0_min, z0_max, z0search)
alphas = np.linspace(alpha_min, alpha_max,alphasearch)
# compute the x and y coordinates that we are computing the maps over
x = asec2rad(np.arange(x0-win, x0+win+res, res))
y = asec2rad(np.arange(y0-win, y0+win+res, res))
#(z0s_grid, alphas_grid, xx_grid, yy_grid) = np.meshgrid(z0s, alphas, x, y) # search grid
(z0s_grid, alphas_grid, xx_grid, yy_grid) = meshgrid_lmtscripts(z0s, alphas, x, y) # search grid
zr = z0s_grid.ravel()
ar = alphas_grid.ravel()
xr = xx_grid.ravel()
yr = yy_grid.ravel()
count = 0.
num_zs = len(zpos)
model_pad = np.zeros(pad)
snrs = [] # signal-to-noise ratios
norms = [] # sqrt of whitened matched filter signal power
for (ztest, atest, xtest, ytest) in zip(zr, ar, xr, yr):
#print count/len(zr)
if disk_diameter > 0:
models = focus_model_disk(xpos, ypos, zpos, x0=xtest, y0=ytest, fwhm=fwhm, z0=ztest, alpha=atest, disk_diameter=disk_diameter, res=0.2)
else:
models = focus_model(xpos, ypos, zpos, x0=xtest, y0=ytest, fwhm=fwhm, z0=ztest, alpha=atest)
snr = 0.0
norm = 0.0
for s in range(0,num_zs):
N = len(models[s])
# compute the ideal model in the time domain
model_pad[:N] = models[s]
# convert the ideal model to the frequency domain and whiten
model_rfft = np.fft.rfft(model_pad)
model_rfft_white = model_rfft * whiteningfac
# compute the normalization by taking the square root of the whitened model spectrums' dot products
norm = norm + np.sum(np.abs(model_rfft_white)**2)
snr = snr + ( np.sum((model_rfft_white * meas_whitened[s]).real) )
norm = np.sqrt(norm)
norms.append(norm)
snrs.append(snr/norm)
count = count + 1.
# compute probablity and cumulative probabilities
isnr = np.argsort(np.array(snrs).ravel())[::-1] # reverse sort high to low
prob = np.exp((np.array(snrs).ravel()/np.sqrt(num_zs * pad/2.))**2/2.)
pcum = np.zeros_like(prob)
pcum[isnr] = np.cumsum(prob[isnr])
pcum = pcum.reshape(z0s_grid.shape) / np.sum(prob)
# get the indices of the points on the map within 3 sigma and extract those z0s and probablities
indices_3sigma = (pcum.ravel() < 0.99730020393673979)
z0s_3sigma = z0s_grid.ravel()[indices_3sigma]
prob_3sigma = prob.ravel()[indices_3sigma]
# compute the expected value of the z0 within 3 sigma
z0_expvalue = np.sum(z0s_3sigma * prob_3sigma) / np.sum(prob_3sigma) # expectation value of v3s
# compute the variance of the source voltage within 3 sigma
z0_squareddiff = (z0s_3sigma - z0_expvalue)**2
z0_variance = np.sqrt(np.sum(z0_squareddiff * prob_3sigma) / np.sum(prob_3sigma)) # std
imax = np.argmax(np.array(snrs).ravel())
(zmax, amax, xmax, ymax) = (zr.ravel()[imax], ar.ravel()[imax], xr.ravel()[imax], yr.ravel()[imax])
print 'estimated z0'
print zmax
if plot:
plt.figure()
plt.clf()
loc = np.unravel_index(imax, xx_grid.shape)
reshape_snr = np.array(snrs).reshape(z0s_grid.shape)
slice_snr = reshape_snr[:,:,loc[2],loc[3]]
plt.imshow(slice_snr, extent=(z0_min, z0_max, alpha_min, alpha_max), aspect=(z0_max-z0_min)/(alpha_max-alpha_min), interpolation='nearest', origin='lower', cmap='Spectral_r')
h1 = plt.contour(z0s_grid[:,:,loc[2],loc[3]], alphas_grid[:,:,loc[2],loc[3]], pcum[:,:,loc[2],loc[3]], scipy.special.erf(np.array([0,1,2,3])/np.sqrt(2)), colors='cyan', linewidths=2, alpha=1.0)
plt.plot(zmax, amax, 'y+', ms=11, mew=2)
plt.text(z0_min+z0s[1]-z0s[0], alpha_max-(alphas[1]-alphas[0]), '[maximum $\mathbf{z}_0$: %.3f, x: %.3f, y: %.3f, alpha: %.3f]' % (zmax, rad2asec(xmax), rad2asec(ymax), amax), va='top', ha='left', color='black')
plt.text(z0_min+z0s[1]-z0s[0], alpha_max-4*(alphas[1]-alphas[0]), '[expected $\mathbf{z}_0$: %.3f $\pm$ %.3f]' % (z0_expvalue, np.sqrt(z0_variance)), va='top', ha='left', color='black')
plt.title('Focusing')
plt.xlabel('$\mathbf{z}_0$')
plt.ylabel('alpha (FWHM in arcseconds per mm offset in $\mathbf{z}$)')
plt.gca().set_axis_bgcolor('white')
plt.tight_layout()
return
def whiten_measurements(z, pad_psd, channel='b'):
Fs = z.fs
#extract the detrended voltage measurements
meas = z.__dict__[channel]
# compute the psd of the voltage measurements
(p, f) = psd(meas, NFFT=1024, pad_to=4096) # unit variance -> PSD = 1 = variance of complex FFT (1/sqrt(N))
# LINDY COMMENT: we will take out the 1/Hz normalization later, to get unit variance per complex data point
if 'fillfrac' in z:
p = p / z.fillfrac # account for zeros in stiched timeseries (otherwise 1)
# sample frequencies for a sequence of length 'pad'. This should be equal to f...
freq_samples = np.abs(np.fft.fftfreq(pad_psd, d=1./2.)[:1+pad_psd/2]) # the default nyquist units
# Compute the factor that whitens the data. This is 1 over the point spread funcntion.
# Each of the signals - the model and the measurements - should be whitened by the square root of this term
whiteningfac_squared = 1. / interp1d(f, p)(freq_samples) # compute 1/PSD at the locations of the measurements B. Really this shouldn't do anything...
whiteningfac_squared[freq_samples < 0.1 * (2./Fs)] = 0. # turn off low freqs below 0.1 Hz - just an arbitrary choice
whiteningfac = np.sqrt(whiteningfac_squared)
return whiteningfac
def fitmodel_lmt2017(z, win=50., res=2., fwhm=11., channel='b', disk_diameter=0.):
Fs = z.fs
#extract the detrended voltage measurements
meas = z.__dict__[channel]
# original sequence length
N = len(z.t)
# compute pad length for efficient FFTs
pad = 2**int(np.ceil(np.log2(N)))
whiteningfac = whiten_measurements(z, pad, channel=channel)
# place the measurements into meas_pad so that its padded to be of a power 2 length
modelpad = np.zeros(pad)
meas_pad = np.zeros(pad)
meas_pad[:N] = meas # lINDY COMMENT: fails if N = len(tp) ??
# measurements of channel volatage in frequency domain
meas_rfft = np.fft.rfft(meas_pad) # N factor goes into fft, ifft = 1/N * ..
meas_rfft_conj = meas_rfft.conj();
meas_rfft_conj_white = meas_rfft_conj * whiteningfac
# compute the x and y coordinates that we are computing the maps over
x = asec2rad(np.arange(-win, win+res, res))
y = asec2rad(np.arange(-win, win+res, res))
#(xx, yy) = np.meshgrid(x, y) # search grid
(xx, yy) = meshgrid_lmtscripts(x, y) # search grid
xr = xx.ravel()
yr = yy.ravel()
count = 0;
snrs = [] # signal-to-noise ratios
norms = [] # sqrt of whitened matched filter signal power
for (xtest, ytest) in zip(xr, yr):
# compute the ideal model in the time domain
if disk_diameter>0:
modelpad[:N] = model_disk(z.x, z.y, x0=xtest, y0=ytest, fwhm=fwhm, disk_diameter=disk_diameter, res=disk_diameter/8.)
else:
modelpad[:N] = model(z.x, z.y, x0=xtest, y0=ytest, fwhm=fwhm) # model signal
# convert the ideal model to the frequency domain and whiten
model_rfft = np.fft.rfft(modelpad)
model_rfft_white = model_rfft * whiteningfac
# compute the normalization by taking the square root of the whitened model spectrums' dot products
norm = np.sqrt(np.sum(np.abs(model_rfft_white)**2))
norms.append(norm)
snrs.append(np.sum((model_rfft_white * meas_rfft_conj_white).real) / norm)
count = count + 1
snr = np.array(snrs)
snr[snr < 0] = 0.
imax = np.argmax(snr) # maximum snr location
snr = snr.reshape(xx.shape)
isnr = np.argsort(snr.ravel())[::-1] # reverse sort high to low
prob = np.exp((snr.ravel()/np.sqrt(pad/2.))**2/2.)
pcum = np.zeros_like(prob)
pcum[isnr] = np.cumsum(prob[isnr])
pcum = pcum.reshape(xx.shape) / np.sum(prob)
xxa = xx * rad2asec(1.)
yya = yy * rad2asec(1.)
# m = model, b = measurements,
# Expected [ b_conj * (noise + amplitude*m) ]
# = Expected [b_conj*noise + b_conj*amplitude*m] = 0 + amplitude*b_conj*m
# Optimally, m = b. Therefore to get out the amplitude we would need to divide by
# b_conj*m = |model|^2 = norms^2
volts2milivolts = 1e3
voltage = volts2milivolts * snr/ np.array(norms).reshape(xx.shape)
return Namespace(xx=xxa, yy=yya, snr=snr/np.sqrt(pad/2.), v=voltage, prob=prob, pcum=pcum)
# linear detrend, use only edges
def detrend(x, ntaper=100):
x0 = np.mean(x[:ntaper])
x1 = np.mean(x[-ntaper:])
m = (x1 - x0) / len(x)
x2 = x - (x0 + m*np.arange(len(x)))
w = np.hanning(2 * ntaper)
x2[:ntaper] *= w[:ntaper]
x2[-ntaper:] *= w[-ntaper:]
return x2
def model(x, y, x0=0, y0=0, fwhm=11.):
fwhm = asec2rad(fwhm)
sigma = fwhm / 2.335
# predicted counts
m = np.exp(-((x-x0)**2 + (y-y0)**2) / (2*sigma**2))
return m
def focus_model(xpos, ypos, zs, x0=0, y0=0, fwhm=11., z0=0, alpha=0):
fwhm2stdev_factor = 1/2.335
sigma = asec2rad(fwhm) * fwhm2stdev_factor
alpha_rad = asec2rad(alpha) * fwhm2stdev_factor
count = 0
models = []
for z in zs:
sigma_z = np.sqrt(sigma**2 + (alpha_rad*np.abs(z-z0))**2)
amplitude_z = 1/( np.sqrt(2*np.pi) * (sigma_z)**2 )
m_z = amplitude_z * np.exp(-((xpos[count]-x0)**2 + (ypos[count]-y0)**2) / (2*sigma_z**2))
models.append(m_z)
count = count + 1
return models
def model_disk(xpos, ypos, x0=0, y0=0, fwhm=11., disk_diameter=0., res=2.):
fwhm2stdev_factor = 1/2.335
sigma = asec2rad(fwhm) * fwhm2stdev_factor
res_rad = asec2rad(res)
sigma_pixels = sigma/res_rad
# generate a disk image at radian positions xx and yy
disk_radius = asec2rad(disk_diameter)/2.
x = (np.arange(x0-disk_radius-3*sigma, x0+disk_radius+3*sigma+res_rad, res_rad))
y = (np.arange(y0-disk_radius-3*sigma, y0+disk_radius+3*sigma+res_rad, res_rad))
#(xx_disk, yy_disk) = np.meshgrid(x, y) # search grid
(xx_disk, yy_disk) = meshgrid_lmtscripts(x, y) # search grid
disk = np.zeros(xx_disk.shape)
disk[ ((xx_disk-x0 )**2 + (yy_disk- y0)**2) <= disk_radius**2 ] = 1. #1./(np.pi*disk_radius**2)
#disk = disk/np.sum(np.sum(disk))
blurred_disk = scipy.ndimage.filters.gaussian_filter(disk, sigma_pixels, mode='constant', cval=0.0)
#blurred_disk = blurred_disk*( np.sqrt(2*np.pi) * (sigma)**2 )
#blurred_disk = blurred_disk/np.sum(np.sum(blurred_disk))
#interpfunc = scipy.interpolate.RectBivariateSpline(xx_disk.flatten(), yy_disk.flatten(), blurred_disk.flatten())
interpfunc = scipy.interpolate.RectBivariateSpline(y, x, blurred_disk)
model = interpfunc(ypos, xpos, grid=False)
#plt.figure(); plt.plot(model)
#plt.figure()
#plt.axis(aspect=1.0)
#plt.imshow(blurred_disk, extent=(rad2asec(min(x)), rad2asec(max(x)), rad2asec(min(y)), rad2asec(max(y))), interpolation='nearest', origin='lower', cmap='afmhot_r')
#plt.plot(rad2asec(xpos), rad2asec(ypos), '-', color='violet', ls='--', lw=1.5, alpha=0.75)
return model
def focus_model_disk(xpos, ypos, zs, x0=0, y0=0, fwhm=11., z0=0, alpha=0, disk_diameter=0., res=2.):
# generate a disk image at radian positions xx and yy
disk_radius = asec2rad(disk_diameter)/2.
scaling = 10
res_rad = asec2rad(res)
x = (np.arange(x0-scaling*disk_radius, x0+scaling*disk_radius+res_rad, res_rad))
y = (np.arange(y0-scaling*disk_radius, y0+scaling*disk_radius+res_rad, res_rad))
#(xx_disk, yy_disk) = np.meshgrid(x, y) # search grid
(xx_disk, yy_disk) = meshgrid_lmtscripts(x, y) # search grid
disk = np.zeros(xx_disk.shape)
disk[ ((xx_disk-x0)**2 + (yy_disk-y0)**2) <= (disk_radius)**2 ] = 1.
fwhm2stdev_factor = 1/2.335
sigma = asec2rad(fwhm) * fwhm2stdev_factor
alpha_rad = asec2rad(alpha) * fwhm2stdev_factor
count = 0
models = []
for z in zs:
sigma_z = np.sqrt(sigma**2 + (alpha_rad*np.abs(z-z0))**2)
amplitude_z = 1/( np.sqrt(2*np.pi) * (sigma_z)**2 )
sigma_z_pixels = sigma_z/asec2rad(res)
blurred_disk = scipy.ndimage.filters.gaussian_filter(disk, sigma_z_pixels, mode='constant', cval=0.0)
#interpfunc = scipy.interpolate.RectBivariateSpline(xx_disk.flatten(), yy_disk.flatten(), blurred_disk.flatten())
interpfunc = scipy.interpolate.RectBivariateSpline(y, x, blurred_disk)
m_z = interpfunc(ypos[count], xpos[count], grid=False)
models.append(m_z)
count = count + 1
#plt.figure(); plt.imshow(blurred_disk)
return models
def gridPower(first, last=None, win=50., res=2., fwhm=11., channel='b', plot=True):
if last is None:
last = first
scans = range(first, last+1)
z = mfilt(scans)
meas = z.__dict__[channel]
# compute the x and y coordinates that we are computing the maps over
x = asec2rad(np.arange(-win, win+res, res))
y = asec2rad(np.arange(-win, win+res, res))
#(xx, yy) = np.meshgrid(x, y) # search grid
(xx, yy) = meshgrid_lmtscripts(x, y) # search grid
gridded = scipy.interpolate.griddata(np.array([z.x[:-1], z.y[:-1]]).T , meas[:-1], (xx, yy), method='linear', fill_value=0.)
imax = np.argmax(gridded.ravel())
(xmax, ymax) = (xx.ravel()[imax], yy.ravel()[imax])
peakval = (gridded.ravel()[imax])
if plot:
plt.figure()
plt.clf()
plt.axis(aspect=1.0)
plt.imshow(gridded, extent=(-win-res/2., win+res/2., -win-res/2., win+res/2.), interpolation='nearest', origin='lower', cmap='afmhot_r')
plt.plot(rad2asec(z.x), rad2asec(z.y), '-', color='violet', ls='--', lw=1.5, alpha=0.75)
plt.text(-0.99*win-res/2, 0.98*win+res/2, '[%.1f, %.1f]"' % (rad2asec(xmax), rad2asec(ymax)), va='top', ha='left', color='black')
plt.text(.99*win+res/2, .98*win+res/2, '[%.1f mV]' % (peakval*1e3), va='top', ha='right', color='black')
plt.title(z.source.strip() + ' scan:' + str(scans[0]) + '-' + str(scans[-1]) )
plt.xlabel('$\Delta$x [arcsec]')
plt.ylabel('$\Delta$y [arcsec]')
plt.gca().set_aspect(1.0)
plt.gca().set_axis_bgcolor('white')
plt.grid(alpha=0.5)
plt.ylim(-win-res/2, win+res/2)
plt.xlim(-win-res/2, win+res/2)
plt.tight_layout()
return peakval, xmax, ymax
def focus_origMap(first, last=None, win=50., res=2., fwhm=11., channel='b', plot=True):
if last is None:
last = first
scans = range(first, last+1)
vmeans = []
vstds = []
z_position = []
for scan in scans:
z_position.append(rawopen(scan).nc.variables['Header.M2.ZReq'].data)
peakval, xmax, ymax = gridPower(scan, last=None, win=win, res=res, fwhm=fwhm, channel=channel, plot=plot)
vmeans.append(peakval)
plt.figure(); plt.plot(z_position, vmeans)
vmean_fit_flipped, stats = np.polynomial.polynomial.polyfit(np.array(z_position), np.array(vmeans), 2, rcond=None, full=True)
vmean_fit = vmean_fit_flipped[::-1]
p = np.poly1d(vmean_fit)
znews = np.linspace(np.min(z_position), np.max(z_position),100)
pnews = p(znews)
plt.plot(znews, pnews)
imax = np.argmax(pnews)
z0 = znews[imax]
print 'estimated z0'
print z0
plt.text(min(znews), min(pnews), '[peak $\mathbf{z}$: %.3f]' % (z0), va='top', ha='left', color='black')
plt.title('Focusing')
plt.xlabel('$\mathbf{z}$')
plt.ylabel('amplitude')
return
| 36.997906 | 221 | 0.604251 | import numpy
import matplotlib
import shutil
from matplotlib import pylab, mlab, pyplot
import os
np = numpy
plt = pyplot
from argparse import Namespace
from glob import glob
import scipy.io
from scipy.signal import butter,lfilter,freqz
from scipy.interpolate import interp1d
from scipy.interpolate import UnivariateSpline
from matplotlib.mlab import griddata, psd
from datetime import datetime, timedelta
from scipy.optimize import fmin
pathname = '/data_lmt/vlbi1mm/vlbi1mm_*%06d*.nc'
def asec2rad(asec):
return asec * 2*np.pi / 3600. / 360.
def rad2asec(rad):
return rad * 3600. * 360. / (2*np.pi)
def focus(first, last, plot=False, point=False, win_pointing=5., win_focusing=5., res=2., fwhm=11., channel='b', z0search=20., alphasearch=20., disk_diameter=0.):
plt.close('all')
if point:
print 'pointing'
out = pointing_lmt2017(first, last=last, plot=plot, win=win_pointing, res=res, fwhm=fwhm, channel=channel, disk_diameter=disk_diameter)
imax = np.argmax(out.snr.ravel())
(xmax, ymax) = (out.xx.ravel()[imax], out.yy.ravel()[imax])
else:
xmax = 0.
ymax = 0.
scans = range(first, last+1)
focus_subset(scans, x0=xmax, y0=ymax, plot=plot, win_pointing=win_pointing, win_focusing=win_focusing, res=res, fwhm=fwhm, channel=channel, z0search=z0search, alphasearch=alphasearch, disk_diameter=disk_diameter)
def focus_subset(scans, x0=0., y0=0., plot=False, win_pointing=50., win_focusing=5., res=2., fwhm=11., channel='b', z0search=20., alphasearch=20., disk_diameter=0.):
focusing_parabolicfit_lmt2017(scans, plot=plot, win=win_pointing, channel=channel, disk_diameter=disk_diameter)
focusing_matchfilter_lmt2017(scans, x0=x0, y0=y0, win=win_focusing, res=res, fwhm=fwhm, channel=channel, z0search=z0search, alphasearch=alphasearch)
def meshgrid_lmtscripts(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def extract(nc):
t0 = nc.variables['Data.Sky.Time'].data[0]
t = nc.variables['Data.Sky.Time'].data - t0
a = nc.variables['Data.Vlbi1mmTpm.APower'].data
b = nc.variables['Data.Vlbi1mmTpm.BPower'].data
x = nc.variables['Data.Sky.XPos'].data
y = nc.variables['Data.Sky.YPos'].data
i = ~nc.variables['Data.Dcs.BufPos'].data.astype(np.bool)
iobs = nc.variables['Header.Dcs.ObsNum'].data
if iobs >= 39150: fs = 50.
tnew = nc.variables['Data.Vlbi1mmTpm.Time'].data - nc.variables['Data.Vlbi1mmTpm.Time'].data[0]
idx = tnew <= t[-1]
a = a[idx]
b = b[idx]
tnew = tnew[idx]
elif iobs >= 38983: tnew = np.linspace(0, t[-1], len(t))
fs = 1./(t[1]-t[0])
adctime = nc.variables['Data.Vlbi1mmTpm.Time'].data - nc.variables['Data.Vlbi1mmTpm.Time'].data[0]
tnew = np.linspace(0, adctime[-1], len(adctime))
tnew = tnew[(tnew <= t[-1])]
a = interp1d(adctime, a)(tnew)
b = interp1d(adctime, b)(tnew)
elif iobs >= 38915: fs = 1./0.012
tnew = np.arange(0, t[-1] + 1e-6, 1./fs)
a = interp1d(t, a)(tnew) b = interp1d(t, b)(tnew) else: fs = 10.
tnew = np.arange(0, t[-1] + 1e-6, .10)
a = interp1d(t, a)(tnew)
b = interp1d(t, b)(tnew)
x = interp1d(t, x)(tnew)
y = interp1d(t, y)(tnew)
i = interp1d(t, i)(tnew).astype(bool)
t = tnew
source = ''.join(nc.variables['Header.Source.SourceName'])
return Namespace(t0=t0, t=t, a=a, b=b, x=x, y=y, i=i, iobs=iobs, source=source, fs=fs)
def rawopen(iobs):
from scipy.io import netcdf
filename = glob(pathname % iobs)[-1]
nc = netcdf.netcdf_file(filename)
keep = Namespace()
keep.BufPos = nc.variables['Data.Dcs.BufPos'].data
keep.Time = nc.variables['Data.Sky.Time'].data
keep.XPos = nc.variables['Data.Sky.XPos'].data
keep.YPos = nc.variables['Data.Sky.YPos'].data
keep.APower = nc.variables['Data.Vlbi1mmTpm.APower'].data
keep.BPower = nc.variables['Data.Vlbi1mmTpm.BPower'].data
keep.nc = nc
if 'Data.Vlbi1mmTpm.Time' in nc.variables:
keep.ADCTime = nc.variables['Data.Vlbi1mmTpm.Time'].data
return keep
def mfilt(scans):
aps = []
bps = []
xs = []
ys = []
ts = []
ss = []
fss = []
zs = []
ntaper = 100
for i in sorted(scans):
keep = rawopen(i)
scan = extract(keep.nc)
aps.append(detrend(scan.a, ntaper=ntaper))
bps.append(detrend(scan.b, ntaper=ntaper))
ts.append(scan.t + scan.t0)
xs.append(scan.x)
ys.append(scan.y)
ss.append(scan.source)
fss.append(scan.fs)
zs.append(keep.nc.variables['Header.M2.ZReq'].data)
flag = 1
for s1 in range(0,len(ss)):
for s2 in range(s1,len(ss)):
if (ss[s1] != ss[s2]):
flag = 0
print('WARNING: NOT THE SAME SOURCE!!')
print ss
break
s = ss[0]
fs = fss[0]
t0 = ts[0][0]
t1 = ts[-1][-1]
tnew = np.arange(t0, t1+1./fs, 1./fs)
idx = np.zeros(len(tnew), dtype=np.bool)
x = np.zeros(len(tnew))
y = np.zeros(len(tnew))
a = np.zeros(len(tnew))
b = np.zeros(len(tnew))
for i in range(len(ts)):
istart = int(np.round((ts[i][0] - t0) * 50.))
idx[istart:istart+len(ts[i])] = True
x[istart:istart+len(xs[i])] = xs[i][:len(x)-istart]
y[istart:istart+len(ys[i])] = ys[i][:len(y)-istart]
a[istart:istart+len(aps[i])] = aps[i][:len(a)-istart]
b[istart:istart+len(bps[i])] = bps[i][:len(b)-istart]
x[~idx] = np.inf
y[~idx] = np.inf
fillfrac = float(np.sum(idx)-ntaper*len(scans)) / len(tnew)
return Namespace(t=tnew, a=a, b=b, x=x, y=y, z=zs, idx=idx, source=s, fs=fs, fillfrac=fillfrac)
def pointing_lmt2017(first, last=None, plot=True, win=10., res=0.5, fwhm=11., channel='b', disk_diameter=0.):
if last is None:
last = first
scans = range(first, last+1)
out = pointing_lmt2017_wrapper(scans, plot=plot, win=win, res=res, fwhm=fwhm, channel=channel, disk_diameter=disk_diameter)
return out
def pointing_lmt2017_wrapper(scans, plot=True, win=10., res=0.5, fwhm=11., channel='b', disk_diameter=0.):
z = mfilt(scans)
if win is None:
win = np.ceil(rad2asec(np.abs(np.min(z.x))))
out = fitmodel_lmt2017(z, win=win, res=res, fwhm=fwhm, channel=channel, disk_diameter=disk_diameter)
(xxa, yya, snr, v, prob, pcum) = (out.xx, out.yy, out.snr, out.v, out.prob, out.pcum)
indices_3sigma = (pcum.ravel() < 0.99730020393673979)
voltages_3sigma = v.ravel()[indices_3sigma]
prob_3sigma = prob.ravel()[indices_3sigma]
sourcevoltage_expvalue = np.sum(voltages_3sigma * prob_3sigma) / np.sum(prob_3sigma)
voltage_squareddiff = (voltages_3sigma - sourcevoltage_expvalue)**2
sourcevoltage_stdev = np.sqrt(np.sum(voltage_squareddiff * prob_3sigma) / np.sum(prob_3sigma))
if plot:
plt.figure()
plt.clf()
plt.axis(aspect=1.0)
plt.imshow(v, extent=(-win-res/2., win+res/2., -win-res/2., win+res/2.), interpolation='nearest', origin='lower', cmap='afmhot_r')
plt.plot(rad2asec(z.x), rad2asec(z.y), '-', color='violet', ls='--', lw=1.5, alpha=0.75)
h1 = plt.contour(xxa, yya, pcum, scipy.special.erf(np.array([0,1,2,3])/np.sqrt(2)), colors='cyan', linewidths=2, alpha=1.0)
imax = np.argmax(snr.ravel())
(xmax, ymax) = (xxa.ravel()[imax], yya.ravel()[imax])
plt.plot(xmax, ymax, 'y+', ms=11, mew=2)
plt.text(-0.99*win-res/2, 0.98*win+res/2, '[%.1f, %.1f]"' % (xmax, ymax), va='top', ha='left', color='black')
plt.text(.99*win+res/2, .98*win+res/2, '[%.1f $\pm$ %.1f mV]' % (sourcevoltage_expvalue, sourcevoltage_stdev), va='top', ha='right', color='black')
plt.title(z.source.strip() + ' scans:' + str(scans[0]) + '-' + str(scans[-1]))
plt.xlabel('$\Delta$x [arcsec]')
plt.ylabel('$\Delta$y [arcsec]')
plt.gca().set_aspect(1.0)
plt.gca().set_axis_bgcolor('white')
plt.grid(alpha=0.5)
plt.ylim(-win-res/2, win+res/2)
plt.xlim(-win-res/2, win+res/2)
plt.tight_layout()
############## return #############
return out
def focusing_parabolicfit_lmt2017(scans, plot=True, win=10., res=0.5, fwhm=11., channel='b', disk_diameter=0.):
vmeans = []
vstds = []
z_position = []
for scan in scans:
z_position.append(rawopen(scan).nc.variables['Header.M2.ZReq'].data)
out = pointing_lmt2017(scan, plot=plot, win=win, res=res, fwhm=fwhm, channel=channel, disk_diameter=disk_diameter)
(xxa, yya, snr, v, prob, cumulative_prob) = (out.xx, out.yy, out.snr, out.v, out.prob, out.pcum)
# KATIE: DOESN'T THIS ONLY WORK IF YOU ONLY HAVE 1 PEAK???
# get the indices of the points on the map within 3 sigma and extract those voltages and probablities
indices_3sigma = (cumulative_prob.ravel() < 0.99730020393673979)
voltages_3sigma = v.ravel()[indices_3sigma]
prob_3sigma = prob.ravel()[indices_3sigma]
# compute the expected value of the source voltage within 3 sigma
sourcevoltage_expvalue = np.sum(voltages_3sigma * prob_3sigma) / np.sum(prob_3sigma) # expectation value of v3s
# compute the variance of the source voltage within 3 sigma
voltage_squareddiff = (voltages_3sigma - sourcevoltage_expvalue)**2
sourcevoltage_stdev = np.sqrt(np.sum(voltage_squareddiff * prob_3sigma) / np.sum(prob_3sigma)) # std
vmeans.append(sourcevoltage_expvalue)
vstds.append(sourcevoltage_stdev)
plt.figure(); plt.errorbar(z_position, vmeans, yerr=vstds)
############ LEAST SQUARES FITTING ################
A = np.vstack([np.ones([1, len(z_position)]), np.array(z_position), np.array(z_position)**2]).T
meas = np.array(vmeans)
meas_cov = np.diag(np.array(vstds)**2)
polydeg = 2
scale = 1e5
polyparams_cov = scale*np.eye(polydeg+1)
polyparams_mean = np.zeros([polydeg+1])
intTerm = np.linalg.inv(meas_cov + np.dot(A, np.dot(polyparams_cov, A.T)))
est_polyparams = polyparams_mean + np.dot(polyparams_cov, np.dot(A.T, np.dot( intTerm, (meas - np.dot(A, polyparams_mean)) ) ) )
error_polyparams = polyparams_cov - np.dot(polyparams_cov, np.dot(A.T, np.dot(intTerm, np.dot(A, polyparams_cov)) ) )
#print 'estimated polyparams'
#print est_polyparams
#print 'estimated error'
#print error_polyparams
p = np.poly1d(est_polyparams[::-1])
znews = np.linspace(np.min(z_position), np.max(z_position),100)
pnews = p(znews)
plt.plot(znews, pnews)
imax = np.argmax(pnews)
z0 = znews[imax]
print 'estimated z0'
print z0
##################################################
#vmean_fit_flipped, stats = np.polynomial.polynomial.polyfit(np.array(z_position), np.array(vmeans), 2, rcond=None, full=True, w=1/np.array(vstds))
#vmean_fit = vmean_fit_flipped[::-1]
#p = np.poly1d(vmean_fit)
#znews = np.linspace(np.min(z_position), np.max(z_position),100)
#pnews = p(znews)
#plt.plot(znews, pnews)
#plt.text(-1.4, 210., '[estimated $\mathbf{z}_0$: %.3f $\pm$ %.3f]' % (z0, z0_approxstdev), va='top', ha='left', color='black')
plt.text(min(znews), min(pnews), '[peak $\mathbf{z}$: %.3f]' % (z0), va='top', ha='left', color='black')
plt.title('Focusing')
plt.xlabel('$\mathbf{z}$')
plt.ylabel('amplitude')
def focusing_matchfilter_lmt2017(scans, x0=0, y0=0, win=50., res=2., fwhm=11., channel='b', alpha_min=0., alpha_max=20., disk_diameter=0., z0search=20., alphasearch=20., plot=True):
all_scans = mfilt(scans)
if win is None:
win = np.ceil(rad2asec(np.abs(np.min(all_scans.x))))
zpos = []
xpos = []
ypos = []
meas_whitened = []
N_s = []
for scan_num in scans:
scan = mfilt(range(scan_num,scan_num+1))
meas = scan.__dict__[channel]
N_s.append(len(scan.t))
maxN = np.max(N_s)
# compute pad length for efficient FFTs
pad = 2**int(np.ceil(np.log2(maxN)))
for scan_num in scans:
scan = mfilt(range(scan_num,scan_num+1))
# place the measurements into meas_pad so that its padded to be of a power 2 length
meas = scan.__dict__[channel]
# original sequence length
N = len(scan.t)
if scan_num == scans[0]:
whiteningfac = whiten_measurements(all_scans, pad, channel=channel)
meas_pad = np.zeros(pad)
meas_pad[:N] = meas
# measurements of channel volatage in frequency domain
meas_rfft = np.fft.rfft(meas_pad) # N factor goes into fft, ifft = 1/N * ..
meas_rfft_conj = meas_rfft.conj();
meas_rfft_conj_white = meas_rfft_conj * whiteningfac
meas_whitened.append(meas_rfft_conj_white)
zpos.append(scan.z[0])
xpos.append(scan.x)
ypos.append(scan.y)
z0_min = min(zpos)
z0_max = max(zpos)
z0s = np.linspace(z0_min, z0_max, z0search)
alphas = np.linspace(alpha_min, alpha_max,alphasearch)
# compute the x and y coordinates that we are computing the maps over
x = asec2rad(np.arange(x0-win, x0+win+res, res))
y = asec2rad(np.arange(y0-win, y0+win+res, res))
#(z0s_grid, alphas_grid, xx_grid, yy_grid) = np.meshgrid(z0s, alphas, x, y) # search grid
(z0s_grid, alphas_grid, xx_grid, yy_grid) = meshgrid_lmtscripts(z0s, alphas, x, y) # search grid
zr = z0s_grid.ravel()
ar = alphas_grid.ravel()
xr = xx_grid.ravel()
yr = yy_grid.ravel()
count = 0.
num_zs = len(zpos)
model_pad = np.zeros(pad)
snrs = [] # signal-to-noise ratios
norms = [] # sqrt of whitened matched filter signal power
for (ztest, atest, xtest, ytest) in zip(zr, ar, xr, yr):
#print count/len(zr)
if disk_diameter > 0:
models = focus_model_disk(xpos, ypos, zpos, x0=xtest, y0=ytest, fwhm=fwhm, z0=ztest, alpha=atest, disk_diameter=disk_diameter, res=0.2)
else:
models = focus_model(xpos, ypos, zpos, x0=xtest, y0=ytest, fwhm=fwhm, z0=ztest, alpha=atest)
snr = 0.0
norm = 0.0
for s in range(0,num_zs):
N = len(models[s])
# compute the ideal model in the time domain
model_pad[:N] = models[s]
# convert the ideal model to the frequency domain and whiten
model_rfft = np.fft.rfft(model_pad)
model_rfft_white = model_rfft * whiteningfac
# compute the normalization by taking the square root of the whitened model spectrums' dot products
norm = norm + np.sum(np.abs(model_rfft_white)**2)
snr = snr + ( np.sum((model_rfft_white * meas_whitened[s]).real) )
norm = np.sqrt(norm)
norms.append(norm)
snrs.append(snr/norm)
count = count + 1.
# compute probablity and cumulative probabilities
isnr = np.argsort(np.array(snrs).ravel())[::-1] # reverse sort high to low
prob = np.exp((np.array(snrs).ravel()/np.sqrt(num_zs * pad/2.))**2/2.)
pcum = np.zeros_like(prob)
pcum[isnr] = np.cumsum(prob[isnr])
pcum = pcum.reshape(z0s_grid.shape) / np.sum(prob)
# get the indices of the points on the map within 3 sigma and extract those z0s and probablities
indices_3sigma = (pcum.ravel() < 0.99730020393673979)
z0s_3sigma = z0s_grid.ravel()[indices_3sigma]
prob_3sigma = prob.ravel()[indices_3sigma]
# compute the expected value of the z0 within 3 sigma
z0_expvalue = np.sum(z0s_3sigma * prob_3sigma) / np.sum(prob_3sigma) # expectation value of v3s
# compute the variance of the source voltage within 3 sigma
z0_squareddiff = (z0s_3sigma - z0_expvalue)**2
z0_variance = np.sqrt(np.sum(z0_squareddiff * prob_3sigma) / np.sum(prob_3sigma)) # std
imax = np.argmax(np.array(snrs).ravel())
(zmax, amax, xmax, ymax) = (zr.ravel()[imax], ar.ravel()[imax], xr.ravel()[imax], yr.ravel()[imax])
print 'estimated z0'
print zmax
if plot:
plt.figure()
plt.clf()
loc = np.unravel_index(imax, xx_grid.shape)
reshape_snr = np.array(snrs).reshape(z0s_grid.shape)
slice_snr = reshape_snr[:,:,loc[2],loc[3]]
plt.imshow(slice_snr, extent=(z0_min, z0_max, alpha_min, alpha_max), aspect=(z0_max-z0_min)/(alpha_max-alpha_min), interpolation='nearest', origin='lower', cmap='Spectral_r')
h1 = plt.contour(z0s_grid[:,:,loc[2],loc[3]], alphas_grid[:,:,loc[2],loc[3]], pcum[:,:,loc[2],loc[3]], scipy.special.erf(np.array([0,1,2,3])/np.sqrt(2)), colors='cyan', linewidths=2, alpha=1.0)
plt.plot(zmax, amax, 'y+', ms=11, mew=2)
plt.text(z0_min+z0s[1]-z0s[0], alpha_max-(alphas[1]-alphas[0]), '[maximum $\mathbf{z}_0$: %.3f, x: %.3f, y: %.3f, alpha: %.3f]' % (zmax, rad2asec(xmax), rad2asec(ymax), amax), va='top', ha='left', color='black')
plt.text(z0_min+z0s[1]-z0s[0], alpha_max-4*(alphas[1]-alphas[0]), '[expected $\mathbf{z}_0$: %.3f $\pm$ %.3f]' % (z0_expvalue, np.sqrt(z0_variance)), va='top', ha='left', color='black')
plt.title('Focusing')
plt.xlabel('$\mathbf{z}_0$')
plt.ylabel('alpha (FWHM in arcseconds per mm offset in $\mathbf{z}$)')
plt.gca().set_axis_bgcolor('white')
plt.tight_layout()
return
def whiten_measurements(z, pad_psd, channel='b'):
Fs = z.fs
#extract the detrended voltage measurements
meas = z.__dict__[channel]
# compute the psd of the voltage measurements
(p, f) = psd(meas, NFFT=1024, pad_to=4096) # unit variance -> PSD = 1 = variance of complex FFT (1/sqrt(N))
# LINDY COMMENT: we will take out the 1/Hz normalization later, to get unit variance per complex data point
if 'fillfrac' in z:
p = p / z.fillfrac # account for zeros in stiched timeseries (otherwise 1)
# sample frequencies for a sequence of length 'pad'. This should be equal to f...
freq_samples = np.abs(np.fft.fftfreq(pad_psd, d=1./2.)[:1+pad_psd/2]) # the default nyquist units
# Compute the factor that whitens the data. This is 1 over the point spread funcntion.
# Each of the signals - the model and the measurements - should be whitened by the square root of this term
whiteningfac_squared = 1. / interp1d(f, p)(freq_samples) # compute 1/PSD at the locations of the measurements B. Really this shouldn't do anything...
whiteningfac_squared[freq_samples < 0.1 * (2./Fs)] = 0. # turn off low freqs below 0.1 Hz - just an arbitrary choice
whiteningfac = np.sqrt(whiteningfac_squared)
return whiteningfac
def fitmodel_lmt2017(z, win=50., res=2., fwhm=11., channel='b', disk_diameter=0.):
Fs = z.fs
#extract the detrended voltage measurements
meas = z.__dict__[channel]
# original sequence length
N = len(z.t)
# compute pad length for efficient FFTs
pad = 2**int(np.ceil(np.log2(N)))
whiteningfac = whiten_measurements(z, pad, channel=channel)
# place the measurements into meas_pad so that its padded to be of a power 2 length
modelpad = np.zeros(pad)
meas_pad = np.zeros(pad)
meas_pad[:N] = meas # lINDY COMMENT: fails if N = len(tp) ??
# measurements of channel volatage in frequency domain
meas_rfft = np.fft.rfft(meas_pad) # N factor goes into fft, ifft = 1/N * ..
meas_rfft_conj = meas_rfft.conj();
meas_rfft_conj_white = meas_rfft_conj * whiteningfac
# compute the x and y coordinates that we are computing the maps over
x = asec2rad(np.arange(-win, win+res, res))
y = asec2rad(np.arange(-win, win+res, res))
#(xx, yy) = np.meshgrid(x, y) # search grid
(xx, yy) = meshgrid_lmtscripts(x, y) # search grid
xr = xx.ravel()
yr = yy.ravel()
count = 0;
snrs = [] # signal-to-noise ratios
norms = [] # sqrt of whitened matched filter signal power
for (xtest, ytest) in zip(xr, yr):
# compute the ideal model in the time domain
if disk_diameter>0:
modelpad[:N] = model_disk(z.x, z.y, x0=xtest, y0=ytest, fwhm=fwhm, disk_diameter=disk_diameter, res=disk_diameter/8.)
else:
modelpad[:N] = model(z.x, z.y, x0=xtest, y0=ytest, fwhm=fwhm) # model signal
# convert the ideal model to the frequency domain and whiten
model_rfft = np.fft.rfft(modelpad)
model_rfft_white = model_rfft * whiteningfac
# compute the normalization by taking the square root of the whitened model spectrums' dot products
norm = np.sqrt(np.sum(np.abs(model_rfft_white)**2))
norms.append(norm)
snrs.append(np.sum((model_rfft_white * meas_rfft_conj_white).real) / norm)
count = count + 1
snr = np.array(snrs)
snr[snr < 0] = 0.
imax = np.argmax(snr) # maximum snr location
snr = snr.reshape(xx.shape)
isnr = np.argsort(snr.ravel())[::-1] # reverse sort high to low
prob = np.exp((snr.ravel()/np.sqrt(pad/2.))**2/2.)
pcum = np.zeros_like(prob)
pcum[isnr] = np.cumsum(prob[isnr])
pcum = pcum.reshape(xx.shape) / np.sum(prob)
xxa = xx * rad2asec(1.)
yya = yy * rad2asec(1.)
# m = model, b = measurements,
# Expected [ b_conj * (noise + amplitude*m) ]
# = Expected [b_conj*noise + b_conj*amplitude*m] = 0 + amplitude*b_conj*m
# Optimally, m = b. Therefore to get out the amplitude we would need to divide by
# b_conj*m = |model|^2 = norms^2
volts2milivolts = 1e3
voltage = volts2milivolts * snr/ np.array(norms).reshape(xx.shape)
return Namespace(xx=xxa, yy=yya, snr=snr/np.sqrt(pad/2.), v=voltage, prob=prob, pcum=pcum)
# linear detrend, use only edges
def detrend(x, ntaper=100):
x0 = np.mean(x[:ntaper])
x1 = np.mean(x[-ntaper:])
m = (x1 - x0) / len(x)
x2 = x - (x0 + m*np.arange(len(x)))
w = np.hanning(2 * ntaper)
x2[:ntaper] *= w[:ntaper]
x2[-ntaper:] *= w[-ntaper:]
return x2
def model(x, y, x0=0, y0=0, fwhm=11.):
fwhm = asec2rad(fwhm)
sigma = fwhm / 2.335
# predicted counts
m = np.exp(-((x-x0)**2 + (y-y0)**2) / (2*sigma**2))
return m
def focus_model(xpos, ypos, zs, x0=0, y0=0, fwhm=11., z0=0, alpha=0):
fwhm2stdev_factor = 1/2.335
sigma = asec2rad(fwhm) * fwhm2stdev_factor
alpha_rad = asec2rad(alpha) * fwhm2stdev_factor
count = 0
models = []
for z in zs:
sigma_z = np.sqrt(sigma**2 + (alpha_rad*np.abs(z-z0))**2)
amplitude_z = 1/( np.sqrt(2*np.pi) * (sigma_z)**2 )
m_z = amplitude_z * np.exp(-((xpos[count]-x0)**2 + (ypos[count]-y0)**2) / (2*sigma_z**2))
models.append(m_z)
count = count + 1
return models
def model_disk(xpos, ypos, x0=0, y0=0, fwhm=11., disk_diameter=0., res=2.):
fwhm2stdev_factor = 1/2.335
sigma = asec2rad(fwhm) * fwhm2stdev_factor
res_rad = asec2rad(res)
sigma_pixels = sigma/res_rad
# generate a disk image at radian positions xx and yy
disk_radius = asec2rad(disk_diameter)/2.
x = (np.arange(x0-disk_radius-3*sigma, x0+disk_radius+3*sigma+res_rad, res_rad))
y = (np.arange(y0-disk_radius-3*sigma, y0+disk_radius+3*sigma+res_rad, res_rad))
#(xx_disk, yy_disk) = np.meshgrid(x, y) # search grid
(xx_disk, yy_disk) = meshgrid_lmtscripts(x, y) # search grid
disk = np.zeros(xx_disk.shape)
disk[ ((xx_disk-x0 )**2 + (yy_disk- y0)**2) <= disk_radius**2 ] = 1. #1./(np.pi*disk_radius**2)
#disk = disk/np.sum(np.sum(disk))
blurred_disk = scipy.ndimage.filters.gaussian_filter(disk, sigma_pixels, mode='constant', cval=0.0)
#blurred_disk = blurred_disk*( np.sqrt(2*np.pi) * (sigma)**2 )
#blurred_disk = blurred_disk/np.sum(np.sum(blurred_disk))
#interpfunc = scipy.interpolate.RectBivariateSpline(xx_disk.flatten(), yy_disk.flatten(), blurred_disk.flatten())
interpfunc = scipy.interpolate.RectBivariateSpline(y, x, blurred_disk)
model = interpfunc(ypos, xpos, grid=False)
#plt.figure(); plt.plot(model)
#plt.figure()
#plt.axis(aspect=1.0)
#plt.imshow(blurred_disk, extent=(rad2asec(min(x)), rad2asec(max(x)), rad2asec(min(y)), rad2asec(max(y))), interpolation='nearest', origin='lower', cmap='afmhot_r')
#plt.plot(rad2asec(xpos), rad2asec(ypos), '-', color='violet', ls='--', lw=1.5, alpha=0.75)
return model
def focus_model_disk(xpos, ypos, zs, x0=0, y0=0, fwhm=11., z0=0, alpha=0, disk_diameter=0., res=2.):
# generate a disk image at radian positions xx and yy
disk_radius = asec2rad(disk_diameter)/2.
scaling = 10
res_rad = asec2rad(res)
x = (np.arange(x0-scaling*disk_radius, x0+scaling*disk_radius+res_rad, res_rad))
y = (np.arange(y0-scaling*disk_radius, y0+scaling*disk_radius+res_rad, res_rad))
#(xx_disk, yy_disk) = np.meshgrid(x, y) # search grid
(xx_disk, yy_disk) = meshgrid_lmtscripts(x, y) # search grid
disk = np.zeros(xx_disk.shape)
disk[ ((xx_disk-x0)**2 + (yy_disk-y0)**2) <= (disk_radius)**2 ] = 1.
fwhm2stdev_factor = 1/2.335
sigma = asec2rad(fwhm) * fwhm2stdev_factor
alpha_rad = asec2rad(alpha) * fwhm2stdev_factor
count = 0
models = []
for z in zs:
sigma_z = np.sqrt(sigma**2 + (alpha_rad*np.abs(z-z0))**2)
amplitude_z = 1/( np.sqrt(2*np.pi) * (sigma_z)**2 )
sigma_z_pixels = sigma_z/asec2rad(res)
blurred_disk = scipy.ndimage.filters.gaussian_filter(disk, sigma_z_pixels, mode='constant', cval=0.0)
#interpfunc = scipy.interpolate.RectBivariateSpline(xx_disk.flatten(), yy_disk.flatten(), blurred_disk.flatten())
interpfunc = scipy.interpolate.RectBivariateSpline(y, x, blurred_disk)
m_z = interpfunc(ypos[count], xpos[count], grid=False)
models.append(m_z)
count = count + 1
#plt.figure(); plt.imshow(blurred_disk)
return models
def gridPower(first, last=None, win=50., res=2., fwhm=11., channel='b', plot=True):
if last is None:
last = first
scans = range(first, last+1)
z = mfilt(scans)
meas = z.__dict__[channel]
# compute the x and y coordinates that we are computing the maps over
x = asec2rad(np.arange(-win, win+res, res))
y = asec2rad(np.arange(-win, win+res, res))
#(xx, yy) = np.meshgrid(x, y) # search grid
(xx, yy) = meshgrid_lmtscripts(x, y) # search grid
gridded = scipy.interpolate.griddata(np.array([z.x[:-1], z.y[:-1]]).T , meas[:-1], (xx, yy), method='linear', fill_value=0.)
imax = np.argmax(gridded.ravel())
(xmax, ymax) = (xx.ravel()[imax], yy.ravel()[imax])
peakval = (gridded.ravel()[imax])
if plot:
plt.figure()
plt.clf()
plt.axis(aspect=1.0)
plt.imshow(gridded, extent=(-win-res/2., win+res/2., -win-res/2., win+res/2.), interpolation='nearest', origin='lower', cmap='afmhot_r')
plt.plot(rad2asec(z.x), rad2asec(z.y), '-', color='violet', ls='--', lw=1.5, alpha=0.75)
plt.text(-0.99*win-res/2, 0.98*win+res/2, '[%.1f, %.1f]"' % (rad2asec(xmax), rad2asec(ymax)), va='top', ha='left', color='black')
plt.text(.99*win+res/2, .98*win+res/2, '[%.1f mV]' % (peakval*1e3), va='top', ha='right', color='black')
plt.title(z.source.strip() + ' scan:' + str(scans[0]) + '-' + str(scans[-1]) )
plt.xlabel('$\Delta$x [arcsec]')
plt.ylabel('$\Delta$y [arcsec]')
plt.gca().set_aspect(1.0)
plt.gca().set_axis_bgcolor('white')
plt.grid(alpha=0.5)
plt.ylim(-win-res/2, win+res/2)
plt.xlim(-win-res/2, win+res/2)
plt.tight_layout()
return peakval, xmax, ymax
def focus_origMap(first, last=None, win=50., res=2., fwhm=11., channel='b', plot=True):
if last is None:
last = first
scans = range(first, last+1)
vmeans = []
vstds = []
z_position = []
for scan in scans:
z_position.append(rawopen(scan).nc.variables['Header.M2.ZReq'].data)
peakval, xmax, ymax = gridPower(scan, last=None, win=win, res=res, fwhm=fwhm, channel=channel, plot=plot)
vmeans.append(peakval)
plt.figure(); plt.plot(z_position, vmeans)
vmean_fit_flipped, stats = np.polynomial.polynomial.polyfit(np.array(z_position), np.array(vmeans), 2, rcond=None, full=True)
vmean_fit = vmean_fit_flipped[::-1]
p = np.poly1d(vmean_fit)
znews = np.linspace(np.min(z_position), np.max(z_position),100)
pnews = p(znews)
plt.plot(znews, pnews)
imax = np.argmax(pnews)
z0 = znews[imax]
print 'estimated z0'
print z0
plt.text(min(znews), min(pnews), '[peak $\mathbf{z}$: %.3f]' % (z0), va='top', ha='left', color='black')
plt.title('Focusing')
plt.xlabel('$\mathbf{z}$')
plt.ylabel('amplitude')
return
| false | true |
1c499fc2f86c8a8a004cb58ecc8b62a1fa49790d | 485 | py | Python | parsl/tests/low_latency/utils.py | cylondata/parsl | 00ff9372bd841dafef8a0b3566c79ffe68f0e367 | [
"Apache-2.0"
] | 323 | 2017-07-28T21:31:27.000Z | 2022-03-05T13:06:05.000Z | parsl/tests/low_latency/utils.py | cylondata/parsl | 00ff9372bd841dafef8a0b3566c79ffe68f0e367 | [
"Apache-2.0"
] | 1,286 | 2017-06-01T16:50:00.000Z | 2022-03-31T16:45:14.000Z | parsl/tests/low_latency/utils.py | cylondata/parsl | 00ff9372bd841dafef8a0b3566c79ffe68f0e367 | [
"Apache-2.0"
] | 113 | 2017-06-03T11:38:40.000Z | 2022-03-26T16:43:05.000Z | import subprocess
def ping_time(ip, n=5):
"""
Returns the average ping time in microseconds.
Note: This function is inherently platform specific.
It currently works on Midway.
"""
cmd = "ping {} -c {}".format(ip, n)
p = subprocess.Popen(cmd.split(" "), stdout=subprocess.PIPE)
output = str(p.communicate()[0])
stats = output.split("\n")[-1].split(" = ")[-1].split("/")
avg_ping_time = float(stats[1]) # In ms
return avg_ping_time * 1000
| 28.529412 | 64 | 0.62268 | import subprocess
def ping_time(ip, n=5):
cmd = "ping {} -c {}".format(ip, n)
p = subprocess.Popen(cmd.split(" "), stdout=subprocess.PIPE)
output = str(p.communicate()[0])
stats = output.split("\n")[-1].split(" = ")[-1].split("/")
avg_ping_time = float(stats[1]) return avg_ping_time * 1000
| true | true |
1c49a038547d3f33fd2423eef8a4c1f27b6e6093 | 2,699 | py | Python | src/remotedb/triggers.py | garyhsiao/pycon.tw | 40e54b6aaf12b837d5536bc980c6963e05d6d3cd | [
"MIT"
] | 1 | 2019-04-04T12:17:26.000Z | 2019-04-04T12:17:26.000Z | src/remotedb/triggers.py | garyhsiao/pycon.tw | 40e54b6aaf12b837d5536bc980c6963e05d6d3cd | [
"MIT"
] | null | null | null | src/remotedb/triggers.py | garyhsiao/pycon.tw | 40e54b6aaf12b837d5536bc980c6963e05d6d3cd | [
"MIT"
] | 2 | 2019-04-04T12:18:04.000Z | 2019-04-14T13:57:57.000Z | import functools
import itertools
import logging
from firebase.firebase import FirebaseApplication
from django.conf import settings
from django_q import tasks
from events.models import (
CustomEvent, KeynoteEvent, ProposedTalkEvent, SponsoredEvent,
)
from proposals.models import TalkProposal
from . import dumpers
logger = logging.getLogger(__name__)
def mock_on_debug(f):
"""Mock out decorated function when `settings.DEBUG` is True.
"""
@functools.wraps(f)
def wrapped(*args, **kwargs):
if settings.DJANGO_Q_DEBUG:
logger.info('Called {0} with args={args!r}, kwargs={kw!r}'.format(
f.__name__, args=args, kw=kwargs,
))
return
return f(*args, **kwargs)
return wrapped
def update_firebase(path, key, data):
app = FirebaseApplication(settings.FIREBASE_URL)
app.put('{}/{}'.format(settings.FIREBASE_DB, path), key, data)
def _sync_proposal_detail(proposal):
detail_data = dumpers.dump_proposal(proposal)
update_firebase('events', str(proposal.pk), detail_data)
@mock_on_debug
def sync_proposal_detail(proposal):
tasks.async(_sync_proposal_detail, proposal)
def _sync_sponsored_talk_detail(event):
detail_data = dumpers.dump_sponsored_event_detail(event)
update_firebase('events', 'sponsored_{}'.format(event.pk), detail_data)
@mock_on_debug
def sync_sponsored_talk_detail(event):
tasks.async(_sync_sponsored_talk_detail, event)
def _sync_schedule():
custeom_event_qs = (
CustomEvent.objects
.select_related('begin_time', 'end_time')
)
keynote_event_qs = (
KeynoteEvent.objects
.select_related('begin_time', 'end_time')
)
proposed_talk_event_qs = (
ProposedTalkEvent.objects
.select_related('begin_time', 'end_time', 'proposal__submitter')
)
sponsored_event_qs = (
SponsoredEvent.objects
.select_related('begin_time', 'end_time', 'host')
)
schedule_data = dumpers.dump_schedule(itertools.chain(
custeom_event_qs,
keynote_event_qs,
proposed_talk_event_qs,
sponsored_event_qs,
))
for key, value in schedule_data.items():
schedule_date_data = {'date': key, 'slots': value}
update_firebase('schedule', key, schedule_date_data)
@mock_on_debug
def sync_schedule():
tasks.async(_sync_schedule)
def _sync_user_events(user):
for e in SponsoredEvent.objects.filter(host=user):
_sync_sponsored_talk_detail(e)
for p in TalkProposal.objects.filter_viewable(user=user):
_sync_proposal_detail(p)
@mock_on_debug
def sync_user_events(user):
tasks.async(_sync_user_events, user)
| 25.704762 | 78 | 0.705076 | import functools
import itertools
import logging
from firebase.firebase import FirebaseApplication
from django.conf import settings
from django_q import tasks
from events.models import (
CustomEvent, KeynoteEvent, ProposedTalkEvent, SponsoredEvent,
)
from proposals.models import TalkProposal
from . import dumpers
logger = logging.getLogger(__name__)
def mock_on_debug(f):
"""Mock out decorated function when `settings.DEBUG` is True.
"""
@functools.wraps(f)
def wrapped(*args, **kwargs):
if settings.DJANGO_Q_DEBUG:
logger.info('Called {0} with args={args!r}, kwargs={kw!r}'.format(
f.__name__, args=args, kw=kwargs,
))
return
return f(*args, **kwargs)
return wrapped
def update_firebase(path, key, data):
app = FirebaseApplication(settings.FIREBASE_URL)
app.put('{}/{}'.format(settings.FIREBASE_DB, path), key, data)
def _sync_proposal_detail(proposal):
detail_data = dumpers.dump_proposal(proposal)
update_firebase('events', str(proposal.pk), detail_data)
@mock_on_debug
def sync_proposal_detail(proposal):
tasks.async(_sync_proposal_detail, proposal)
def _sync_sponsored_talk_detail(event):
detail_data = dumpers.dump_sponsored_event_detail(event)
update_firebase('events', 'sponsored_{}'.format(event.pk), detail_data)
@mock_on_debug
def sync_sponsored_talk_detail(event):
tasks.async(_sync_sponsored_talk_detail, event)
def _sync_schedule():
custeom_event_qs = (
CustomEvent.objects
.select_related('begin_time', 'end_time')
)
keynote_event_qs = (
KeynoteEvent.objects
.select_related('begin_time', 'end_time')
)
proposed_talk_event_qs = (
ProposedTalkEvent.objects
.select_related('begin_time', 'end_time', 'proposal__submitter')
)
sponsored_event_qs = (
SponsoredEvent.objects
.select_related('begin_time', 'end_time', 'host')
)
schedule_data = dumpers.dump_schedule(itertools.chain(
custeom_event_qs,
keynote_event_qs,
proposed_talk_event_qs,
sponsored_event_qs,
))
for key, value in schedule_data.items():
schedule_date_data = {'date': key, 'slots': value}
update_firebase('schedule', key, schedule_date_data)
@mock_on_debug
def sync_schedule():
tasks.async(_sync_schedule)
def _sync_user_events(user):
for e in SponsoredEvent.objects.filter(host=user):
_sync_sponsored_talk_detail(e)
for p in TalkProposal.objects.filter_viewable(user=user):
_sync_proposal_detail(p)
@mock_on_debug
def sync_user_events(user):
tasks.async(_sync_user_events, user)
| false | true |
1c49a06cb0153ff1b55abd18a79af8ed0e7294aa | 3,625 | py | Python | readless/Summarization/clusterrank.py | Santhoshkumard11/senpai | f517aba8f2b442714811bd7748b95ee6e5473820 | [
"MIT"
] | 59 | 2016-11-16T13:41:09.000Z | 2022-01-26T01:56:38.000Z | readless/Summarization/clusterrank.py | AndiChiou/senpai | f517aba8f2b442714811bd7748b95ee6e5473820 | [
"MIT"
] | null | null | null | readless/Summarization/clusterrank.py | AndiChiou/senpai | f517aba8f2b442714811bd7748b95ee6e5473820 | [
"MIT"
] | 13 | 2016-11-15T13:09:50.000Z | 2021-03-13T11:04:45.000Z | #!/usr/bin/python
# *****************************************************************************
#
# Author: Aditya Chatterjee
#
# Interweb/ contacts: GitHub.com/AdiChat
# Email: [email protected]
#
# Implementation of the ClusterRank Algorithm
#
# MIT License
#
# To keep up with the latest version, consult repository: GitHub.com/AdiChat/Read-Less
#
# To get an overview of the ClusterRank Algorithm, consult wiki: Github.com/AdiChat/Read-Less/wiki/ClusterRank
#
# *****************************************************************************
import io
import nltk
import itertools
from operator import itemgetter
import networkx as nx
import os
import texttiling
import parse
class ClusterRank():
def __init__(self):
print "Cluster Rank"
def lDistance(self, firstString, secondString):
'''
Finds the levenshtein distance between 2 strings
Arguments:
firstString: first input string
secondString: second input string
Returns:
the levenshtein distance between the two input strings
Raises:
None
'''
if len(firstString) > len(secondString):
firstString, secondString = secondString, firstString
distances = range(len(firstString) + 1)
for index2, char2 in enumerate(secondString):
newDistances = [index2 + 1]
for index1, char1 in enumerate(firstString):
if char1 == char2:
newDistances.append(distances[index1])
else:
newDistances.append(1 + min((distances[index1], distances[index1+1], newDistances[-1])))
distances = newDistances
return distances[-1]
def buildGraph(self, nodes):
'''
Builds the graph with a token of words as a node
Arguments:
nodes: list of nodes/ token of words
Returns:
the graph
Raises:
None
'''
gr = nx.Graph()
gr.add_nodes_from(nodes)
nodePairs = list(itertools.combinations(nodes, 2))
for pair in nodePairs:
firstString = pair[0]
secondString = pair[1]
levDistance = self.lDistance(firstString, secondString)
gr.add_edge(firstString, secondString, weight=levDistance)
return gr
def extractSentences(self, text):
'''
Extracts sentences from the graph using pagerank
Arguments:
text: input textual data
Returns:
summary: a bunch of sentences
Raises:
None
'''
sentenceTokens = text
print "Building graph"
graph = self.buildGraph(sentenceTokens)
print "Computing page rank"
calculated_page_rank = nx.pagerank(graph, weight='weight')
#most important sentences in ascending order of importance
print "Assigning score to sentences"
sentences = sorted(calculated_page_rank, key=calculated_page_rank.get, reverse=True)
#return a 100 word summary
print "Generating summary"
summary = ' '.join(sentences)
summaryWords = summary.split()
summaryWords = summaryWords[0:201]
summary = ' '.join(summaryWords)
print "Operation completed"
return summary
def summarize(self, data):
'''
Summarizes a text data
Arguments:
data: input textual data
Returns:
The summary of input file
Raises:
None
'''
t = texttiling.TextTiling()
text = t.run(data)
return self.extractSentences(text)
def summarizeFile(self, pathToFile):
'''
Summarizes a document
Arguments:
pathToFile: path to the file to be summarized
Returns:
The summary of the input file
Raises:
None
'''
p = parse.Parse()
t = texttiling.TextTiling()
data = p.dataFromFile(pathToFile)
text = t.run(data)
return self.extractSentences(text) | 26.459854 | 110 | 0.656 | import io
import nltk
import itertools
from operator import itemgetter
import networkx as nx
import os
import texttiling
import parse
class ClusterRank():
def __init__(self):
print "Cluster Rank"
def lDistance(self, firstString, secondString):
'''
Finds the levenshtein distance between 2 strings
Arguments:
firstString: first input string
secondString: second input string
Returns:
the levenshtein distance between the two input strings
Raises:
None
'''
if len(firstString) > len(secondString):
firstString, secondString = secondString, firstString
distances = range(len(firstString) + 1)
for index2, char2 in enumerate(secondString):
newDistances = [index2 + 1]
for index1, char1 in enumerate(firstString):
if char1 == char2:
newDistances.append(distances[index1])
else:
newDistances.append(1 + min((distances[index1], distances[index1+1], newDistances[-1])))
distances = newDistances
return distances[-1]
def buildGraph(self, nodes):
'''
Builds the graph with a token of words as a node
Arguments:
nodes: list of nodes/ token of words
Returns:
the graph
Raises:
None
'''
gr = nx.Graph()
gr.add_nodes_from(nodes)
nodePairs = list(itertools.combinations(nodes, 2))
for pair in nodePairs:
firstString = pair[0]
secondString = pair[1]
levDistance = self.lDistance(firstString, secondString)
gr.add_edge(firstString, secondString, weight=levDistance)
return gr
def extractSentences(self, text):
'''
Extracts sentences from the graph using pagerank
Arguments:
text: input textual data
Returns:
summary: a bunch of sentences
Raises:
None
'''
sentenceTokens = text
print "Building graph"
graph = self.buildGraph(sentenceTokens)
print "Computing page rank"
calculated_page_rank = nx.pagerank(graph, weight='weight')
print "Assigning score to sentences"
sentences = sorted(calculated_page_rank, key=calculated_page_rank.get, reverse=True)
print "Generating summary"
summary = ' '.join(sentences)
summaryWords = summary.split()
summaryWords = summaryWords[0:201]
summary = ' '.join(summaryWords)
print "Operation completed"
return summary
def summarize(self, data):
'''
Summarizes a text data
Arguments:
data: input textual data
Returns:
The summary of input file
Raises:
None
'''
t = texttiling.TextTiling()
text = t.run(data)
return self.extractSentences(text)
def summarizeFile(self, pathToFile):
'''
Summarizes a document
Arguments:
pathToFile: path to the file to be summarized
Returns:
The summary of the input file
Raises:
None
'''
p = parse.Parse()
t = texttiling.TextTiling()
data = p.dataFromFile(pathToFile)
text = t.run(data)
return self.extractSentences(text) | false | true |
1c49a0de93121462a77b6c4abf13849b5433cd52 | 4,847 | py | Python | chapter07/detect_car_bow_svm_sliding_window.py | insoo223/openCVhowse | d8885ab4f87a9d577fd660e60d41222dc2156332 | [
"BSD-3-Clause"
] | 286 | 2019-06-29T11:47:40.000Z | 2022-03-29T08:41:28.000Z | chapter07/detect_car_bow_svm_sliding_window.py | insoo223/openCVhowse | d8885ab4f87a9d577fd660e60d41222dc2156332 | [
"BSD-3-Clause"
] | 8 | 2020-10-01T17:48:04.000Z | 2022-03-26T04:27:06.000Z | chapter07/detect_car_bow_svm_sliding_window.py | insoo223/openCVhowse | d8885ab4f87a9d577fd660e60d41222dc2156332 | [
"BSD-3-Clause"
] | 153 | 2019-07-01T02:53:02.000Z | 2022-03-28T08:43:44.000Z | import cv2
import numpy as np
import os
from non_max_suppression import non_max_suppression_fast as nms
if not os.path.isdir('CarData'):
print('CarData folder not found. Please download and unzip '
'http://l2r.cs.uiuc.edu/~cogcomp/Data/Car/CarData.tar.gz '
'or https://github.com/gcr/arc-evaluator/raw/master/CarData.tar.gz '
'into the same folder as this script.')
exit(1)
BOW_NUM_TRAINING_SAMPLES_PER_CLASS = 10
SVM_NUM_TRAINING_SAMPLES_PER_CLASS = 100
SVM_SCORE_THRESHOLD = 1.8
NMS_OVERLAP_THRESHOLD = 0.15
sift = cv2.xfeatures2d.SIFT_create()
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = {}
flann = cv2.FlannBasedMatcher(index_params, search_params)
bow_kmeans_trainer = cv2.BOWKMeansTrainer(12)
bow_extractor = cv2.BOWImgDescriptorExtractor(sift, flann)
def get_pos_and_neg_paths(i):
pos_path = 'CarData/TrainImages/pos-%d.pgm' % (i+1)
neg_path = 'CarData/TrainImages/neg-%d.pgm' % (i+1)
return pos_path, neg_path
def add_sample(path):
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
keypoints, descriptors = sift.detectAndCompute(img, None)
if descriptors is not None:
bow_kmeans_trainer.add(descriptors)
for i in range(BOW_NUM_TRAINING_SAMPLES_PER_CLASS):
pos_path, neg_path = get_pos_and_neg_paths(i)
add_sample(pos_path)
add_sample(neg_path)
voc = bow_kmeans_trainer.cluster()
bow_extractor.setVocabulary(voc)
def extract_bow_descriptors(img):
features = sift.detect(img)
return bow_extractor.compute(img, features)
training_data = []
training_labels = []
for i in range(SVM_NUM_TRAINING_SAMPLES_PER_CLASS):
pos_path, neg_path = get_pos_and_neg_paths(i)
pos_img = cv2.imread(pos_path, cv2.IMREAD_GRAYSCALE)
pos_descriptors = extract_bow_descriptors(pos_img)
if pos_descriptors is not None:
training_data.extend(pos_descriptors)
training_labels.append(1)
neg_img = cv2.imread(neg_path, cv2.IMREAD_GRAYSCALE)
neg_descriptors = extract_bow_descriptors(neg_img)
if neg_descriptors is not None:
training_data.extend(neg_descriptors)
training_labels.append(-1)
svm = cv2.ml.SVM_create()
svm.setType(cv2.ml.SVM_C_SVC)
svm.setC(50)
svm.train(np.array(training_data), cv2.ml.ROW_SAMPLE,
np.array(training_labels))
def pyramid(img, scale_factor=1.25, min_size=(200, 80),
max_size=(600, 600)):
h, w = img.shape
min_w, min_h = min_size
max_w, max_h = max_size
while w >= min_w and h >= min_h:
if w <= max_w and h <= max_h:
yield img
w /= scale_factor
h /= scale_factor
img = cv2.resize(img, (int(w), int(h)),
interpolation=cv2.INTER_AREA)
def sliding_window(img, step=20, window_size=(100, 40)):
img_h, img_w = img.shape
window_w, window_h = window_size
for y in range(0, img_w, step):
for x in range(0, img_h, step):
roi = img[y:y+window_h, x:x+window_w]
roi_h, roi_w = roi.shape
if roi_w == window_w and roi_h == window_h:
yield (x, y, roi)
for test_img_path in ['CarData/TestImages/test-0.pgm',
'CarData/TestImages/test-1.pgm',
'../images/car.jpg',
'../images/haying.jpg',
'../images/statue.jpg',
'../images/woodcutters.jpg']:
img = cv2.imread(test_img_path)
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
pos_rects = []
for resized in pyramid(gray_img):
for x, y, roi in sliding_window(resized):
descriptors = extract_bow_descriptors(roi)
if descriptors is None:
continue
prediction = svm.predict(descriptors)
if prediction[1][0][0] == 1.0:
raw_prediction = svm.predict(
descriptors, flags=cv2.ml.STAT_MODEL_RAW_OUTPUT)
score = -raw_prediction[1][0][0]
if score > SVM_SCORE_THRESHOLD:
h, w = roi.shape
scale = gray_img.shape[0] / float(resized.shape[0])
pos_rects.append([int(x * scale),
int(y * scale),
int((x+w) * scale),
int((y+h) * scale),
score])
pos_rects = nms(np.array(pos_rects), NMS_OVERLAP_THRESHOLD)
for x0, y0, x1, y1, score in pos_rects:
cv2.rectangle(img, (int(x0), int(y0)), (int(x1), int(y1)),
(0, 255, 255), 2)
text = '%.2f' % score
cv2.putText(img, text, (int(x0), int(y0) - 20),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
cv2.imshow(test_img_path, img)
cv2.waitKey(0)
| 36.443609 | 78 | 0.621003 | import cv2
import numpy as np
import os
from non_max_suppression import non_max_suppression_fast as nms
if not os.path.isdir('CarData'):
print('CarData folder not found. Please download and unzip '
'http://l2r.cs.uiuc.edu/~cogcomp/Data/Car/CarData.tar.gz '
'or https://github.com/gcr/arc-evaluator/raw/master/CarData.tar.gz '
'into the same folder as this script.')
exit(1)
BOW_NUM_TRAINING_SAMPLES_PER_CLASS = 10
SVM_NUM_TRAINING_SAMPLES_PER_CLASS = 100
SVM_SCORE_THRESHOLD = 1.8
NMS_OVERLAP_THRESHOLD = 0.15
sift = cv2.xfeatures2d.SIFT_create()
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = {}
flann = cv2.FlannBasedMatcher(index_params, search_params)
bow_kmeans_trainer = cv2.BOWKMeansTrainer(12)
bow_extractor = cv2.BOWImgDescriptorExtractor(sift, flann)
def get_pos_and_neg_paths(i):
pos_path = 'CarData/TrainImages/pos-%d.pgm' % (i+1)
neg_path = 'CarData/TrainImages/neg-%d.pgm' % (i+1)
return pos_path, neg_path
def add_sample(path):
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
keypoints, descriptors = sift.detectAndCompute(img, None)
if descriptors is not None:
bow_kmeans_trainer.add(descriptors)
for i in range(BOW_NUM_TRAINING_SAMPLES_PER_CLASS):
pos_path, neg_path = get_pos_and_neg_paths(i)
add_sample(pos_path)
add_sample(neg_path)
voc = bow_kmeans_trainer.cluster()
bow_extractor.setVocabulary(voc)
def extract_bow_descriptors(img):
features = sift.detect(img)
return bow_extractor.compute(img, features)
training_data = []
training_labels = []
for i in range(SVM_NUM_TRAINING_SAMPLES_PER_CLASS):
pos_path, neg_path = get_pos_and_neg_paths(i)
pos_img = cv2.imread(pos_path, cv2.IMREAD_GRAYSCALE)
pos_descriptors = extract_bow_descriptors(pos_img)
if pos_descriptors is not None:
training_data.extend(pos_descriptors)
training_labels.append(1)
neg_img = cv2.imread(neg_path, cv2.IMREAD_GRAYSCALE)
neg_descriptors = extract_bow_descriptors(neg_img)
if neg_descriptors is not None:
training_data.extend(neg_descriptors)
training_labels.append(-1)
svm = cv2.ml.SVM_create()
svm.setType(cv2.ml.SVM_C_SVC)
svm.setC(50)
svm.train(np.array(training_data), cv2.ml.ROW_SAMPLE,
np.array(training_labels))
def pyramid(img, scale_factor=1.25, min_size=(200, 80),
max_size=(600, 600)):
h, w = img.shape
min_w, min_h = min_size
max_w, max_h = max_size
while w >= min_w and h >= min_h:
if w <= max_w and h <= max_h:
yield img
w /= scale_factor
h /= scale_factor
img = cv2.resize(img, (int(w), int(h)),
interpolation=cv2.INTER_AREA)
def sliding_window(img, step=20, window_size=(100, 40)):
img_h, img_w = img.shape
window_w, window_h = window_size
for y in range(0, img_w, step):
for x in range(0, img_h, step):
roi = img[y:y+window_h, x:x+window_w]
roi_h, roi_w = roi.shape
if roi_w == window_w and roi_h == window_h:
yield (x, y, roi)
for test_img_path in ['CarData/TestImages/test-0.pgm',
'CarData/TestImages/test-1.pgm',
'../images/car.jpg',
'../images/haying.jpg',
'../images/statue.jpg',
'../images/woodcutters.jpg']:
img = cv2.imread(test_img_path)
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
pos_rects = []
for resized in pyramid(gray_img):
for x, y, roi in sliding_window(resized):
descriptors = extract_bow_descriptors(roi)
if descriptors is None:
continue
prediction = svm.predict(descriptors)
if prediction[1][0][0] == 1.0:
raw_prediction = svm.predict(
descriptors, flags=cv2.ml.STAT_MODEL_RAW_OUTPUT)
score = -raw_prediction[1][0][0]
if score > SVM_SCORE_THRESHOLD:
h, w = roi.shape
scale = gray_img.shape[0] / float(resized.shape[0])
pos_rects.append([int(x * scale),
int(y * scale),
int((x+w) * scale),
int((y+h) * scale),
score])
pos_rects = nms(np.array(pos_rects), NMS_OVERLAP_THRESHOLD)
for x0, y0, x1, y1, score in pos_rects:
cv2.rectangle(img, (int(x0), int(y0)), (int(x1), int(y1)),
(0, 255, 255), 2)
text = '%.2f' % score
cv2.putText(img, text, (int(x0), int(y0) - 20),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
cv2.imshow(test_img_path, img)
cv2.waitKey(0)
| true | true |
1c49a501a055fdf11fd4806f3310e16640bf3419 | 1,209 | py | Python | experiments_dikower/controllers/drlbox/net/q_net.py | prokhn/onti-2019-bigdata | b9296141958f544177388be94072efce7bdc7814 | [
"MIT"
] | null | null | null | experiments_dikower/controllers/drlbox/net/q_net.py | prokhn/onti-2019-bigdata | b9296141958f544177388be94072efce7bdc7814 | [
"MIT"
] | null | null | null | experiments_dikower/controllers/drlbox/net/q_net.py | prokhn/onti-2019-bigdata | b9296141958f544177388be94072efce7bdc7814 | [
"MIT"
] | null | null | null |
import tensorflow as tf
from drlbox.common.namescope import TF_NAMESCOPE
from drlbox.net.net_base import RLNet
class QNet(RLNet):
def set_model(self, model):
self.model = model
self.weights = model.weights
self.ph_state, = model.inputs
self.tf_values, = model.outputs
def set_loss(self):
with tf.name_scope(TF_NAMESCOPE):
ph_action = tf.placeholder(tf.int32, [None])
onehot_act = tf.one_hot(ph_action, depth=self.tf_values.shape[1])
ph_target = tf.placeholder(tf.float32, [None])
value_act = tf.reduce_sum(self.tf_values * onehot_act, axis=1)
# loss
self.tf_loss = tf.losses.huber_loss(ph_target, value_act,
reduction=tf.losses.Reduction.NONE)
# error for prioritization: abs td error
self.tf_error = tf.abs(ph_target - value_act)
# kfac loss list
self.kfac_loss_list = [('normal_predictive', (self.tf_values,))]
# placeholder list
self.ph_train_list = [self.ph_state, ph_action, ph_target]
def action_values(self, state):
return self.sess.run(self.tf_values, feed_dict={self.ph_state: state})
| 31.815789 | 78 | 0.643507 |
import tensorflow as tf
from drlbox.common.namescope import TF_NAMESCOPE
from drlbox.net.net_base import RLNet
class QNet(RLNet):
def set_model(self, model):
self.model = model
self.weights = model.weights
self.ph_state, = model.inputs
self.tf_values, = model.outputs
def set_loss(self):
with tf.name_scope(TF_NAMESCOPE):
ph_action = tf.placeholder(tf.int32, [None])
onehot_act = tf.one_hot(ph_action, depth=self.tf_values.shape[1])
ph_target = tf.placeholder(tf.float32, [None])
value_act = tf.reduce_sum(self.tf_values * onehot_act, axis=1)
self.tf_loss = tf.losses.huber_loss(ph_target, value_act,
reduction=tf.losses.Reduction.NONE)
self.tf_error = tf.abs(ph_target - value_act)
self.kfac_loss_list = [('normal_predictive', (self.tf_values,))]
self.ph_train_list = [self.ph_state, ph_action, ph_target]
def action_values(self, state):
return self.sess.run(self.tf_values, feed_dict={self.ph_state: state})
| true | true |
1c49a67e7bb4ec8b16df469d4c6b5c559bb2054a | 493 | py | Python | dostaweemvse/dostaweemvse/models/order.py | ale3otik/DostaweemWse | 0887d47cbe5fba30c3c2b0ecf064d151efd961d0 | [
"MIT"
] | 3 | 2017-12-10T17:41:22.000Z | 2017-12-12T20:27:31.000Z | dostaweemvse/dostaweemvse/models/order.py | ale3otik/DostaweemWse | 0887d47cbe5fba30c3c2b0ecf064d151efd961d0 | [
"MIT"
] | null | null | null | dostaweemvse/dostaweemvse/models/order.py | ale3otik/DostaweemWse | 0887d47cbe5fba30c3c2b0ecf064d151efd961d0 | [
"MIT"
] | null | null | null | from django.db import models
from .route import Route
from .location import Location
class Order(models.Model):
route = models.ForeignKey(Route, on_delete=models.CASCADE)
metadata = models.CharField(max_length=50)
from_location = models.ForeignKey(Location, on_delete=models.CASCADE, related_name='location3')
to_location = models.ForeignKey(Location, on_delete=models.CASCADE, related_name='location4')
max_cost = models.IntegerField()
weight = models.IntegerField()
| 41.083333 | 99 | 0.774848 | from django.db import models
from .route import Route
from .location import Location
class Order(models.Model):
route = models.ForeignKey(Route, on_delete=models.CASCADE)
metadata = models.CharField(max_length=50)
from_location = models.ForeignKey(Location, on_delete=models.CASCADE, related_name='location3')
to_location = models.ForeignKey(Location, on_delete=models.CASCADE, related_name='location4')
max_cost = models.IntegerField()
weight = models.IntegerField()
| true | true |
1c49a68dfda9e081e60939541072bfa57b8b2ac7 | 99 | py | Python | qiniuFolderSync/utils.py | ipconfiger/qiniuFolderSync | 0e1362bb3dda6ca898040f8e019e712d6ed1db6b | [
"MIT"
] | null | null | null | qiniuFolderSync/utils.py | ipconfiger/qiniuFolderSync | 0e1362bb3dda6ca898040f8e019e712d6ed1db6b | [
"MIT"
] | null | null | null | qiniuFolderSync/utils.py | ipconfiger/qiniuFolderSync | 0e1362bb3dda6ca898040f8e019e712d6ed1db6b | [
"MIT"
] | null | null | null | # coding=utf8
import os
def getpath(dir_path, *path):
return os.path.join(dir_path, *path)
| 11 | 40 | 0.686869 |
import os
def getpath(dir_path, *path):
return os.path.join(dir_path, *path)
| true | true |
1c49a6bad5a201d8712133721c743ef53f0ea197 | 2,275 | py | Python | tests/unit/flow/test_flow_before_after.py | afizs/jina | 52c554c2d593e24129e86dfe3c71bf04f1495082 | [
"Apache-2.0"
] | 3 | 2021-07-30T09:47:54.000Z | 2021-07-31T22:29:20.000Z | tests/unit/flow/test_flow_before_after.py | sheetal01761/jina | 520fc0794fb43d96e1fc85534e9df3cf9c89c42e | [
"Apache-2.0"
] | 2 | 2021-07-14T14:07:18.000Z | 2022-02-06T05:00:41.000Z | tests/unit/flow/test_flow_before_after.py | sheetal01761/jina | 520fc0794fb43d96e1fc85534e9df3cf9c89c42e | [
"Apache-2.0"
] | 2 | 2021-10-06T07:28:11.000Z | 2021-11-18T20:20:18.000Z | import pytest
from jina import Executor, requests, __default_executor__
from jina import Flow
from tests import random_docs
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow(protocol):
docs = random_docs(10)
f = Flow(protocol=protocol).add(name='p1')
with f:
f.index(docs)
assert f.num_pods == 2
assert f._pod_nodes['p1'].num_peas == 1
assert f.num_peas == 2
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_before(protocol):
class MyExec(Executor):
@requests
def foo(self, **kwargs):
pass
docs = random_docs(10)
f = Flow(protocol=protocol).add(uses_before=MyExec, name='p1')
with f:
f.index(docs)
assert f.num_pods == 2
assert f._pod_nodes['p1'].num_peas == 2
assert f.num_peas == 3
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_after(protocol):
class MyExec(Executor):
@requests
def foo(self, **kwargs):
pass
docs = random_docs(10)
f = Flow(protocol=protocol).add(uses_after=MyExec, name='p1')
with f:
f.index(docs)
assert f.num_pods == 2
assert f._pod_nodes['p1'].num_peas == 2
assert f.num_peas == 3
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_default_before_after_is_ignored(protocol):
docs = random_docs(10)
f = Flow(protocol=protocol).add(
uses_after=__default_executor__, uses_before=__default_executor__, name='p1'
)
with f:
f.index(docs)
assert f.num_pods == 2
assert f._pod_nodes['p1'].num_peas == 1
assert f.num_peas == 2
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_before_after(protocol):
class MyExec(Executor):
@requests
def foo(self, **kwargs):
pass
docs = random_docs(10)
f = Flow(protocol=protocol).add(uses_before=MyExec, uses_after=MyExec, name='p1')
with f:
f.index(docs)
assert f.num_pods == 2
assert f._pod_nodes['p1'].num_peas == 3
assert f.num_peas == 4
| 26.149425 | 85 | 0.627692 | import pytest
from jina import Executor, requests, __default_executor__
from jina import Flow
from tests import random_docs
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow(protocol):
docs = random_docs(10)
f = Flow(protocol=protocol).add(name='p1')
with f:
f.index(docs)
assert f.num_pods == 2
assert f._pod_nodes['p1'].num_peas == 1
assert f.num_peas == 2
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_before(protocol):
class MyExec(Executor):
@requests
def foo(self, **kwargs):
pass
docs = random_docs(10)
f = Flow(protocol=protocol).add(uses_before=MyExec, name='p1')
with f:
f.index(docs)
assert f.num_pods == 2
assert f._pod_nodes['p1'].num_peas == 2
assert f.num_peas == 3
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_after(protocol):
class MyExec(Executor):
@requests
def foo(self, **kwargs):
pass
docs = random_docs(10)
f = Flow(protocol=protocol).add(uses_after=MyExec, name='p1')
with f:
f.index(docs)
assert f.num_pods == 2
assert f._pod_nodes['p1'].num_peas == 2
assert f.num_peas == 3
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_default_before_after_is_ignored(protocol):
docs = random_docs(10)
f = Flow(protocol=protocol).add(
uses_after=__default_executor__, uses_before=__default_executor__, name='p1'
)
with f:
f.index(docs)
assert f.num_pods == 2
assert f._pod_nodes['p1'].num_peas == 1
assert f.num_peas == 2
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_before_after(protocol):
class MyExec(Executor):
@requests
def foo(self, **kwargs):
pass
docs = random_docs(10)
f = Flow(protocol=protocol).add(uses_before=MyExec, uses_after=MyExec, name='p1')
with f:
f.index(docs)
assert f.num_pods == 2
assert f._pod_nodes['p1'].num_peas == 3
assert f.num_peas == 4
| true | true |
1c49a6c86e3fa57ac97eba6d634a4f64dce2ee44 | 654 | py | Python | runs/snort/10KB/src1-tgt1/ssl-par-max-iter00200.cfg.py | Largio/broeval | 89e831d07f066100afdd1a5b220f9f08f1c10b3d | [
"MIT"
] | null | null | null | runs/snort/10KB/src1-tgt1/ssl-par-max-iter00200.cfg.py | Largio/broeval | 89e831d07f066100afdd1a5b220f9f08f1c10b3d | [
"MIT"
] | null | null | null | runs/snort/10KB/src1-tgt1/ssl-par-max-iter00200.cfg.py | Largio/broeval | 89e831d07f066100afdd1a5b220f9f08f1c10b3d | [
"MIT"
] | null | null | null |
# Write results to this file
OUTFILE = 'runs/snort/10KB/src1-tgt1/ssl-par-max-iter00200.result.csv'
# Source computers for the request
SOURCE = ['10.0.0.1']
# Target machines for the requests (aka server)
TARGET = ['10.0.0.2']
# IDS Mode. (ATM: noids, min, max, http, ssl, ftp, icmp, mysql)
IDSMODE = 'max'
# Connection mode (par = parallel, seq = sequential)
MODE = 'par'
# Number of evaluation repititions to run
EPOCHS = 100
# Number of iterations to be run in each evaluation repitition
ITER = 200
# Size of the file to be downloaded from target (in Bytes * 10^SIZE)
SIZE = 4
# Protocol to be used e.g. HTTP, SSL, FTP, MYSQL
PROTOCOL = 'ssl' | 24.222222 | 70 | 0.701835 |
OUTFILE = 'runs/snort/10KB/src1-tgt1/ssl-par-max-iter00200.result.csv'
SOURCE = ['10.0.0.1']
TARGET = ['10.0.0.2']
IDSMODE = 'max'
MODE = 'par'
EPOCHS = 100
ITER = 200
SIZE = 4
PROTOCOL = 'ssl' | true | true |
1c49a6f7de9175a674e3e1ba8b8c5f27a3fcd695 | 290 | py | Python | 1138_05_12-merge.py | nchaparr/Geospatial-Analysis-with-Python | 6e0d1ff429baa4205c63bf842ab950ed4176536f | [
"CC0-1.0"
] | null | null | null | 1138_05_12-merge.py | nchaparr/Geospatial-Analysis-with-Python | 6e0d1ff429baa4205c63bf842ab950ed4176536f | [
"CC0-1.0"
] | null | null | null | 1138_05_12-merge.py | nchaparr/Geospatial-Analysis-with-Python | 6e0d1ff429baa4205c63bf842ab950ed4176536f | [
"CC0-1.0"
] | null | null | null | """Merge multiple shapefiles"""
import glob
import shapefile
files = glob.glob("footprints_*shp")
w = shapefile.Writer()
r = None
for f in files:
r = shapefile.Reader(f)
w._shapes.extend(r.shapes())
w.records.extend(r.records())
w.fields = list(r.fields)
w.save("Merged")
| 22.307692 | 37 | 0.675862 | import glob
import shapefile
files = glob.glob("footprints_*shp")
w = shapefile.Writer()
r = None
for f in files:
r = shapefile.Reader(f)
w._shapes.extend(r.shapes())
w.records.extend(r.records())
w.fields = list(r.fields)
w.save("Merged")
| true | true |
1c49a7f9ce091fecb34b660858eb1cfba5796214 | 7,240 | py | Python | pynot/tests.py | adamora/pynot | 47abb7e9db85301a976c012380e3963c64590414 | [
"Apache-2.0"
] | 6 | 2018-09-15T08:05:34.000Z | 2019-01-19T22:51:27.000Z | pynot/tests.py | intelligenia/pynot | 47abb7e9db85301a976c012380e3963c64590414 | [
"Apache-2.0"
] | null | null | null | pynot/tests.py | intelligenia/pynot | 47abb7e9db85301a976c012380e3963c64590414 | [
"Apache-2.0"
] | null | null | null | from django.test import TestCase
from pynot.models import *
from pynot.factories import *
from rest_assured.testcases import *
from django.contrib.auth import get_user_model
from rest_framework.authtoken.models import Token
from rest_framework import serializers
class CategoryTestCase(ReadRESTAPITestCaseMixin,
BaseRESTAPITestCase):
base_name = 'category'
factory_class = CategoryFactory
lookup_field = 'id'
attributes_to_check = ['id', 'name']
def setUp(self):
admin=get_user_model().objects.create_superuser(
email="[email protected]", password="admin")
token = Token.objects.get_or_create(user=admin)[0].key
headers = {'HTTP_AUTHORIZATION': 'Token ' + token}
self.client.credentials(**headers)
models.PyNot.sync_settings()
super(CategoryTestCase, self).setUp()
class EventTestCase(DetailAPITestCaseMixin,
BaseRESTAPITestCase):
base_name = 'event'
factory_class = EventFactory
lookup_field = 'id'
attributes_to_check = ['id', 'name', 'description']
def setUp(self):
admin=get_user_model().objects.create_superuser(
email="[email protected]", password="admin")
token = Token.objects.get_or_create(user=admin)[0].key
headers = {'HTTP_AUTHORIZATION': 'Token ' + token}
self.client.credentials(**headers)
super(EventTestCase, self).setUp()
class ParameterTestSerializer(serializers.ModelSerializer):
class Meta:
model = Event
fields = ('id', 'name')
extra_fields_human_name = {'id':'ID',
'name':'Nombre'}
extra_fields_email = ('name',)
class EventTestSerializer(serializers.ModelSerializer):
parameters = ParameterTestSerializer(many=True)
class Meta:
model = Event
fields = ('id', 'name', 'description', 'parameters')
extra_fields_human_name = {'id':'ID',
'name':'Nombre',
'description':u'Descripción',
'parameters':u'Parámetros'}
extra_fields_group = ('id',)
class CategoryTestSerializer(serializers.ModelSerializer):
events = EventTestSerializer(many=True)
class Meta:
model = Category
fields = ('id', 'name', 'events')
extra_fields_human_name = {'name':'Nombre',
'events':'Eventos'}
extra_fields_email = ('name',)
extra_fields_user = ('id',)
class ParameterTestCase(TestCase):
category = None
event = None
parameter = None
notification = None
def setUp(self):
self.category = CategoryFactory.create(name='cat_name')
self.event = EventFactory.create(category=self.category,
name='event_name',
slug='slug_event')
event = EventFactory.create(category=self.category,
name='event_name2')
self.parameter = ParameterFactory.create(event=self.event,
serializer='pynot.tests.CategoryTestSerializer',
name='param_name',
human_name='Categoria')
parameter = ParameterFactory.create(event=event,
serializer='pynot.tests.CategoryTestSerializer',
name='param_name2',
human_name='Parametro 2')
parameter = ParameterFactory.create(event=event,
serializer='pynot.tests.CategoryTestSerializer',
name='param_name3',
human_name='Parametro 3')
self.notification = EventNotificationFactory.create(event=self.event,
name='Mensaje de alta de usuario',
message='El nombre de la categoria es param_name.name')
EventNotificationRecipientFactory.create(
notification=self.notification, recipient='[email protected]',
type='email')
EventNotificationRecipientFactory.create(
notification=self.notification, recipient='param_name.events.parameters.name',
type='email')
EventNotificationRecipientFactory.create(
notification=self.notification, recipient='1',
type='user')
EventNotificationRecipientFactory.create(
notification=self.notification, recipient='2',
type='user')
def test_get_serializer_data_body(self):
data_body = self.parameter.data_body
self.assertEqual(data_body["id"]["human_name"], "id")
self.assertTrue('events' not in data_body)
def test_get_serializer_data_emails(self):
data_email = self.parameter.data_email
self.assertTrue("id" not in data_email)
self.assertEqual(data_email["events"]["data"]["parameters"]\
["data"]["name"]["human_name"], "Nombre")
def test_get_serializer_data_users(self):
data_user = self.parameter.data_user
self.assertTrue("name" not in data_user)
self.assertTrue("id" in data_user)
def test_get_serializer_data_groups(self):
data_group = self.parameter.data_group
self.assertTrue("id" not in data_group)
self.assertEqual(data_group["events"]["data"]["id"]["human_name"],
"ID")
def test_fire(self):
self.event.fire(param_name=CategoryTestSerializer(self.category))
self.assertEqual(models.EventNotificationFire.objects.all().count(), 1)
self.assertEqual(models.Notification.objects.all().count(), 6)
self.event.fire(param_name=self.category)
self.assertEqual(models.EventNotificationFire.objects.all().count(), 2)
self.notification.collective = True
self.notification.save()
PyNot.event('slug_event').fire(param_name=self.category)
self.assertEqual(models.EventNotificationFire.objects.all().count(), 3)
self.assertEqual(models.Notification.objects.all().count(), 17) # 6 + 6 + 5 (collective one)
class EventNotificationTestCase(DetailAPITestCaseMixin,
WriteRESTAPITestCaseMixin,
BaseRESTAPITestCase):
base_name = 'eventnotification'
factory_class = EventNotificationFactory
lookup_field = 'id'
attributes_to_check = ['id', 'name', 'subject', 'message']
create_data = {'name' : 'Test notification',
'subject' : 'Test notification',
'message' : 'Test notification'}
update_data = {'name': 'Test notification updated',
'subject': 'Test notification updated',
'message': 'Test notification updated',
'recipients': []}
def setUp(self):
admin=get_user_model().objects.create_superuser(
email="[email protected]", password="admin")
token = Token.objects.get_or_create(user=admin)[0].key
headers = {'HTTP_AUTHORIZATION': 'Token ' + token}
self.client.credentials(**headers)
super(EventNotificationTestCase, self).setUp()
def get_create_data(self):
data = self.create_data
data['event']=self.object.event_id
return data
| 36.565657 | 100 | 0.623895 | from django.test import TestCase
from pynot.models import *
from pynot.factories import *
from rest_assured.testcases import *
from django.contrib.auth import get_user_model
from rest_framework.authtoken.models import Token
from rest_framework import serializers
class CategoryTestCase(ReadRESTAPITestCaseMixin,
BaseRESTAPITestCase):
base_name = 'category'
factory_class = CategoryFactory
lookup_field = 'id'
attributes_to_check = ['id', 'name']
def setUp(self):
admin=get_user_model().objects.create_superuser(
email="[email protected]", password="admin")
token = Token.objects.get_or_create(user=admin)[0].key
headers = {'HTTP_AUTHORIZATION': 'Token ' + token}
self.client.credentials(**headers)
models.PyNot.sync_settings()
super(CategoryTestCase, self).setUp()
class EventTestCase(DetailAPITestCaseMixin,
BaseRESTAPITestCase):
base_name = 'event'
factory_class = EventFactory
lookup_field = 'id'
attributes_to_check = ['id', 'name', 'description']
def setUp(self):
admin=get_user_model().objects.create_superuser(
email="[email protected]", password="admin")
token = Token.objects.get_or_create(user=admin)[0].key
headers = {'HTTP_AUTHORIZATION': 'Token ' + token}
self.client.credentials(**headers)
super(EventTestCase, self).setUp()
class ParameterTestSerializer(serializers.ModelSerializer):
class Meta:
model = Event
fields = ('id', 'name')
extra_fields_human_name = {'id':'ID',
'name':'Nombre'}
extra_fields_email = ('name',)
class EventTestSerializer(serializers.ModelSerializer):
parameters = ParameterTestSerializer(many=True)
class Meta:
model = Event
fields = ('id', 'name', 'description', 'parameters')
extra_fields_human_name = {'id':'ID',
'name':'Nombre',
'description':u'Descripción',
'parameters':u'Parámetros'}
extra_fields_group = ('id',)
class CategoryTestSerializer(serializers.ModelSerializer):
events = EventTestSerializer(many=True)
class Meta:
model = Category
fields = ('id', 'name', 'events')
extra_fields_human_name = {'name':'Nombre',
'events':'Eventos'}
extra_fields_email = ('name',)
extra_fields_user = ('id',)
class ParameterTestCase(TestCase):
category = None
event = None
parameter = None
notification = None
def setUp(self):
self.category = CategoryFactory.create(name='cat_name')
self.event = EventFactory.create(category=self.category,
name='event_name',
slug='slug_event')
event = EventFactory.create(category=self.category,
name='event_name2')
self.parameter = ParameterFactory.create(event=self.event,
serializer='pynot.tests.CategoryTestSerializer',
name='param_name',
human_name='Categoria')
parameter = ParameterFactory.create(event=event,
serializer='pynot.tests.CategoryTestSerializer',
name='param_name2',
human_name='Parametro 2')
parameter = ParameterFactory.create(event=event,
serializer='pynot.tests.CategoryTestSerializer',
name='param_name3',
human_name='Parametro 3')
self.notification = EventNotificationFactory.create(event=self.event,
name='Mensaje de alta de usuario',
message='El nombre de la categoria es param_name.name')
EventNotificationRecipientFactory.create(
notification=self.notification, recipient='[email protected]',
type='email')
EventNotificationRecipientFactory.create(
notification=self.notification, recipient='param_name.events.parameters.name',
type='email')
EventNotificationRecipientFactory.create(
notification=self.notification, recipient='1',
type='user')
EventNotificationRecipientFactory.create(
notification=self.notification, recipient='2',
type='user')
def test_get_serializer_data_body(self):
data_body = self.parameter.data_body
self.assertEqual(data_body["id"]["human_name"], "id")
self.assertTrue('events' not in data_body)
def test_get_serializer_data_emails(self):
data_email = self.parameter.data_email
self.assertTrue("id" not in data_email)
self.assertEqual(data_email["events"]["data"]["parameters"]\
["data"]["name"]["human_name"], "Nombre")
def test_get_serializer_data_users(self):
data_user = self.parameter.data_user
self.assertTrue("name" not in data_user)
self.assertTrue("id" in data_user)
def test_get_serializer_data_groups(self):
data_group = self.parameter.data_group
self.assertTrue("id" not in data_group)
self.assertEqual(data_group["events"]["data"]["id"]["human_name"],
"ID")
def test_fire(self):
self.event.fire(param_name=CategoryTestSerializer(self.category))
self.assertEqual(models.EventNotificationFire.objects.all().count(), 1)
self.assertEqual(models.Notification.objects.all().count(), 6)
self.event.fire(param_name=self.category)
self.assertEqual(models.EventNotificationFire.objects.all().count(), 2)
self.notification.collective = True
self.notification.save()
PyNot.event('slug_event').fire(param_name=self.category)
self.assertEqual(models.EventNotificationFire.objects.all().count(), 3)
self.assertEqual(models.Notification.objects.all().count(), 17)
class EventNotificationTestCase(DetailAPITestCaseMixin,
WriteRESTAPITestCaseMixin,
BaseRESTAPITestCase):
base_name = 'eventnotification'
factory_class = EventNotificationFactory
lookup_field = 'id'
attributes_to_check = ['id', 'name', 'subject', 'message']
create_data = {'name' : 'Test notification',
'subject' : 'Test notification',
'message' : 'Test notification'}
update_data = {'name': 'Test notification updated',
'subject': 'Test notification updated',
'message': 'Test notification updated',
'recipients': []}
def setUp(self):
admin=get_user_model().objects.create_superuser(
email="[email protected]", password="admin")
token = Token.objects.get_or_create(user=admin)[0].key
headers = {'HTTP_AUTHORIZATION': 'Token ' + token}
self.client.credentials(**headers)
super(EventNotificationTestCase, self).setUp()
def get_create_data(self):
data = self.create_data
data['event']=self.object.event_id
return data
| true | true |
1c49a7fda9f17643c78c4e525ad84da9839557ec | 7,469 | py | Python | FastRCNN/BrainScript/PARAMETERS.py | jakkaj/CNTK_AMLWorkbench | 27a496c665f2565e15450da8743807f528326d5e | [
"MIT"
] | 6 | 2017-11-16T21:26:38.000Z | 2020-05-04T21:06:10.000Z | FastRCNN/BrainScript/PARAMETERS.py | jakkaj/CNTK_AMLWorkbench | 27a496c665f2565e15450da8743807f528326d5e | [
"MIT"
] | null | null | null | FastRCNN/BrainScript/PARAMETERS.py | jakkaj/CNTK_AMLWorkbench | 27a496c665f2565e15450da8743807f528326d5e | [
"MIT"
] | 2 | 2017-11-22T05:42:27.000Z | 2018-12-31T11:18:26.000Z | from __future__ import print_function
import os
from imdb_data import imdb_data
import fastRCNN, time, datetime
from fastRCNN.pascal_voc import pascal_voc # as nmsPython
print (datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
dataset = "Grocery"
#dataset = "pascalVoc"
#dataset = "pascalVoc_aeroplanesOnly"
#dataset = "CustomDataset"
############################
# default parameters
############################
class Parameters():
def __init__(self, datasetName):
# cntk params
self.datasetName = datasetName
self.cntk_nrRois = 100 # how many ROIs to zero-pad. Use 100 to get quick result. Use 2000 to get good results.
self.cntk_padWidth = 1000
self.cntk_padHeight = 1000
# directories
self.rootDir = os.path.dirname(os.path.abspath(__file__))
self.imgDir = os.path.join(self.rootDir, "..", "..", "..", "DataSets", datasetName)
# derived directories
self.procDir = os.path.join(self.rootDir, "proc", datasetName + "_{}".format(self.cntk_nrRois))
self.resultsDir = os.path.join(self.rootDir, "results", datasetName + "_{}".format(self.cntk_nrRois))
self.roiDir = os.path.join(self.procDir, "rois")
self.cntkFilesDir = os.path.join(self.procDir, "cntkFiles")
self.cntkTemplateDir = self.rootDir
# ROI generation
self.roi_minDimRel = 0.01 # minium relative width/height of a ROI
self.roi_maxDimRel = 1.0 # maximum relative width/height of a ROI
self.roi_minNrPixelsRel = 0 # minium relative area covered by ROI
self.roi_maxNrPixelsRel = 1.0 # maximm relative area covered by ROI
self.roi_maxAspectRatio = 4.0 # maximum aspect Ratio of a ROI vertically and horizontally
self.roi_maxImgDim = 200 # image size used for ROI generation
self.ss_scale = 100 # selective search ROIS: parameter controlling cluster size for segmentation
self.ss_sigma = 1.2 # selective search ROIs: width of gaussian kernal for segmentation
self.ss_minSize = 20 # selective search ROIs: minimum component size for segmentation
self.grid_nrScales = 7 # uniform grid ROIs: number of iterations from largest possible ROI to smaller ROIs
self.grid_aspectRatios = [1.0, 2.0, 0.5] # uniform grid ROIs: aspect ratio of ROIs
# thresholds
self.train_posOverlapThres = 0.5 # threshold for marking ROIs as positive.
self.nmsThreshold = 0.3 # Non-Maxima suppression threshold (in range [0,1]).
# The lower the more ROIs will be combined. Used in 5_evaluateResults and 5_visualizeResults.
self.cntk_num_train_images = -1 # set per data set below
self.cntk_num_test_images = -1 # set per data set below
self.cntk_mb_size = -1 # set per data set below
self.cntk_max_epochs = -1 # set per data set below
self.cntk_momentum_time_constant = -1 # set per data set below
############################
# project-specific parameters
############################
class GroceryParameters(Parameters):
def __init__(self, datasetName):
super(GroceryParameters,self).__init__(datasetName)
self.classes = ('__background__', # always index 0
'avocado', 'orange', 'butter', 'champagne', 'eggBox', 'gerkin', 'joghurt', 'ketchup',
'orangeJuice', 'onion', 'pepper', 'tomato', 'water', 'milk', 'tabasco', 'mustard')
# roi generation
self.roi_minDimRel = 0.04
self.roi_maxDimRel = 0.4
self.roi_minNrPixelsRel = 2 * self.roi_minDimRel * self.roi_minDimRel
self.roi_maxNrPixelsRel = 0.33 * self.roi_maxDimRel * self.roi_maxDimRel
# model training / scoring
self.classifier = 'nn'
self.cntk_num_train_images = 25
self.cntk_num_test_images = 5
self.cntk_mb_size = 5
self.cntk_max_epochs = 20
self.cntk_momentum_time_constant = 10
# postprocessing
self.nmsThreshold = 0.01
# database
self.imdbs = dict() # database provider of images and image annotations
for image_set in ["train", "test"]:
self.imdbs[image_set] = imdb_data(image_set, self.classes, self.cntk_nrRois, self.imgDir, self.roiDir, self.cntkFilesDir, boAddGroundTruthRois=(image_set!='test'))
class CustomDataset(Parameters):
def __init__(self, datasetName):
super(CustomDataset,self).__init__(datasetName)
class PascalParameters(Parameters):
def __init__(self, datasetName):
super(PascalParameters,self).__init__(datasetName)
if datasetName.startswith("pascalVoc_aeroplanesOnly"):
self.classes = ('__background__', 'aeroplane')
self.lutImageSet = {"train": "trainval.aeroplaneOnly", "test": "test.aeroplaneOnly"}
else:
self.classes = ('__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable',
'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')
self.lutImageSet = {"train": "trainval", "test": "test"}
# use cntk_nrRois = 4000. more than 99% of the test images have less than 4000 rois, but 50% more than 2000
# model training / scoring
self.classifier = 'nn'
self.cntk_num_train_images = 5011
self.cntk_num_test_images = 4952
self.cntk_mb_size = 2
self.cntk_max_epochs = 17
self.cntk_momentum_time_constant = 20
self.pascalDataDir = os.path.join(self.rootDir, "..", "..", "DataSets", "Pascal")
self.imgDir = self.pascalDataDir
# database
self.imdbs = dict()
for image_set, year in zip(["train", "test"], ["2007", "2007"]):
self.imdbs[image_set] = fastRCNN.pascal_voc(self.lutImageSet[image_set], year, self.classes, self.cntk_nrRois, cacheDir=self.cntkFilesDir, devkit_path=self.pascalDataDir)
print ("Number of {} images: {}".format(image_set, self.imdbs[image_set].num_images))
def get_parameters_for_dataset(datasetName=dataset):
if datasetName == "Grocery":
parameters = GroceryParameters(datasetName)
elif datasetName.startswith("pascalVoc"):
parameters = PascalParameters(datasetName)
elif dataset.Name == "CustomDataset":
parameters = CustomDataset(datasetName)
else:
ERROR
############################
# computed parameters
############################
nrClasses = len(parameters.classes)
parameters.cntk_featureDimensions = {'nn': nrClasses}
parameters.nrClasses = nrClasses
assert parameters.cntk_padWidth == parameters.cntk_padHeight, "ERROR: different width and height for padding currently not supported."
assert parameters.classifier.lower() in ['svm','nn'], "ERROR: only 'nn' or 'svm' classifier supported."
assert not (parameters.datasetName == 'pascalVoc' and parameters.classifier == 'svm'), "ERROR: while technically possibly, writing 2nd-last layer of CNTK model for all pascalVOC images takes too much disk memory."
print ("PARAMETERS: datasetName = " + datasetName)
print ("PARAMETERS: cntk_nrRois = {}".format(parameters.cntk_nrRois))
return parameters
| 49.463576 | 217 | 0.641317 | from __future__ import print_function
import os
from imdb_data import imdb_data
import fastRCNN, time, datetime
from fastRCNN.pascal_voc import pascal_voc print (datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
dataset = "Grocery"
class Parameters():
def __init__(self, datasetName):
self.datasetName = datasetName
self.cntk_nrRois = 100 self.cntk_padWidth = 1000
self.cntk_padHeight = 1000
self.rootDir = os.path.dirname(os.path.abspath(__file__))
self.imgDir = os.path.join(self.rootDir, "..", "..", "..", "DataSets", datasetName)
self.procDir = os.path.join(self.rootDir, "proc", datasetName + "_{}".format(self.cntk_nrRois))
self.resultsDir = os.path.join(self.rootDir, "results", datasetName + "_{}".format(self.cntk_nrRois))
self.roiDir = os.path.join(self.procDir, "rois")
self.cntkFilesDir = os.path.join(self.procDir, "cntkFiles")
self.cntkTemplateDir = self.rootDir
self.roi_minDimRel = 0.01 self.roi_maxDimRel = 1.0 self.roi_minNrPixelsRel = 0 self.roi_maxNrPixelsRel = 1.0 self.roi_maxAspectRatio = 4.0 self.roi_maxImgDim = 200 self.ss_scale = 100 self.ss_sigma = 1.2 self.ss_minSize = 20 self.grid_nrScales = 7 self.grid_aspectRatios = [1.0, 2.0, 0.5]
self.train_posOverlapThres = 0.5 self.nmsThreshold = 0.3
self.cntk_num_train_images = -1 self.cntk_num_test_images = -1 self.cntk_mb_size = -1 self.cntk_max_epochs = -1 self.cntk_momentum_time_constant = -1
class GroceryParameters(Parameters):
def __init__(self, datasetName):
super(GroceryParameters,self).__init__(datasetName)
self.classes = ('__background__', 'avocado', 'orange', 'butter', 'champagne', 'eggBox', 'gerkin', 'joghurt', 'ketchup',
'orangeJuice', 'onion', 'pepper', 'tomato', 'water', 'milk', 'tabasco', 'mustard')
self.roi_minDimRel = 0.04
self.roi_maxDimRel = 0.4
self.roi_minNrPixelsRel = 2 * self.roi_minDimRel * self.roi_minDimRel
self.roi_maxNrPixelsRel = 0.33 * self.roi_maxDimRel * self.roi_maxDimRel
self.classifier = 'nn'
self.cntk_num_train_images = 25
self.cntk_num_test_images = 5
self.cntk_mb_size = 5
self.cntk_max_epochs = 20
self.cntk_momentum_time_constant = 10
self.nmsThreshold = 0.01
self.imdbs = dict() for image_set in ["train", "test"]:
self.imdbs[image_set] = imdb_data(image_set, self.classes, self.cntk_nrRois, self.imgDir, self.roiDir, self.cntkFilesDir, boAddGroundTruthRois=(image_set!='test'))
class CustomDataset(Parameters):
def __init__(self, datasetName):
super(CustomDataset,self).__init__(datasetName)
class PascalParameters(Parameters):
def __init__(self, datasetName):
super(PascalParameters,self).__init__(datasetName)
if datasetName.startswith("pascalVoc_aeroplanesOnly"):
self.classes = ('__background__', 'aeroplane')
self.lutImageSet = {"train": "trainval.aeroplaneOnly", "test": "test.aeroplaneOnly"}
else:
self.classes = ('__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable',
'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')
self.lutImageSet = {"train": "trainval", "test": "test"}
self.classifier = 'nn'
self.cntk_num_train_images = 5011
self.cntk_num_test_images = 4952
self.cntk_mb_size = 2
self.cntk_max_epochs = 17
self.cntk_momentum_time_constant = 20
self.pascalDataDir = os.path.join(self.rootDir, "..", "..", "DataSets", "Pascal")
self.imgDir = self.pascalDataDir
self.imdbs = dict()
for image_set, year in zip(["train", "test"], ["2007", "2007"]):
self.imdbs[image_set] = fastRCNN.pascal_voc(self.lutImageSet[image_set], year, self.classes, self.cntk_nrRois, cacheDir=self.cntkFilesDir, devkit_path=self.pascalDataDir)
print ("Number of {} images: {}".format(image_set, self.imdbs[image_set].num_images))
def get_parameters_for_dataset(datasetName=dataset):
if datasetName == "Grocery":
parameters = GroceryParameters(datasetName)
elif datasetName.startswith("pascalVoc"):
parameters = PascalParameters(datasetName)
elif dataset.Name == "CustomDataset":
parameters = CustomDataset(datasetName)
else:
ERROR
nrClasses = len(parameters.classes)
parameters.cntk_featureDimensions = {'nn': nrClasses}
parameters.nrClasses = nrClasses
assert parameters.cntk_padWidth == parameters.cntk_padHeight, "ERROR: different width and height for padding currently not supported."
assert parameters.classifier.lower() in ['svm','nn'], "ERROR: only 'nn' or 'svm' classifier supported."
assert not (parameters.datasetName == 'pascalVoc' and parameters.classifier == 'svm'), "ERROR: while technically possibly, writing 2nd-last layer of CNTK model for all pascalVOC images takes too much disk memory."
print ("PARAMETERS: datasetName = " + datasetName)
print ("PARAMETERS: cntk_nrRois = {}".format(parameters.cntk_nrRois))
return parameters
| true | true |
1c49ab770d5e3e8175eecd2530c65d772f092a4d | 3,811 | py | Python | util/lintlib.py | Xanewok/rust-clippy | 9d1792a4265c3645d716c5bf085c07be8749332a | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2019-05-14T09:10:46.000Z | 2019-05-14T09:10:46.000Z | util/lintlib.py | Xanewok/rust-clippy | 9d1792a4265c3645d716c5bf085c07be8749332a | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | util/lintlib.py | Xanewok/rust-clippy | 9d1792a4265c3645d716c5bf085c07be8749332a | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Common utils for the several housekeeping scripts.
import os
import re
import collections
import logging as log
log.basicConfig(level=log.INFO, format='%(levelname)s: %(message)s')
Lint = collections.namedtuple('Lint', 'name level doc sourcefile group')
Config = collections.namedtuple('Config', 'name ty doc default')
lintname_re = re.compile(r'''pub\s+([A-Z_][A-Z_0-9]*)''')
group_re = re.compile(r'''\s*([a-z_][a-z_0-9]+)''')
conf_re = re.compile(r'''define_Conf! {\n([^}]*)\n}''', re.MULTILINE)
confvar_re = re.compile(
r'''/// Lint: (\w+). (.*).*\n\s*\([^,]+,\s+"([^"]+)",\s+([^=\)]+)=>\s+(.*)\),''', re.MULTILINE)
lint_levels = {
"correctness": 'Deny',
"style": 'Warn',
"complexity": 'Warn',
"perf": 'Warn',
"restriction": 'Allow',
"pedantic": 'Allow',
"nursery": 'Allow',
"cargo": 'Allow',
}
def parse_lints(lints, filepath):
last_comment = []
comment = True
clippy = False
deprecated = False
name = ""
with open(filepath) as fp:
for line in fp:
if comment:
if line.startswith("/// "):
last_comment.append(line[4:])
elif line.startswith("///"):
last_comment.append(line[3:])
elif line.startswith("declare_lint!"):
import sys
print("don't use `declare_lint!` in Clippy, use `declare_clippy_lint!` instead")
sys.exit(42)
elif line.startswith("declare_clippy_lint!"):
comment = False
deprecated = False
clippy = True
name = ""
elif line.startswith("declare_deprecated_lint!"):
comment = False
deprecated = True
clippy = False
else:
last_comment = []
if not comment:
m = lintname_re.search(line)
if m:
name = m.group(1).lower()
line = next(fp)
if deprecated:
level = "Deprecated"
group = "deprecated"
else:
while True:
g = group_re.search(line)
if g:
group = g.group(1).lower()
level = lint_levels.get(group, None)
break
line = next(fp)
if level is None:
continue
log.info("found %s with level %s in %s",
name, level, filepath)
lints.append(Lint(name, level, last_comment, filepath, group))
last_comment = []
comment = True
if "}" in line:
log.warn("Warning: missing Lint-Name in %s", filepath)
comment = True
def parse_configs(path):
configs = {}
with open(os.path.join(path, 'utils/conf.rs')) as fp:
contents = fp.read()
match = re.search(conf_re, contents)
confvars = re.findall(confvar_re, match.group(1))
for (lint, doc, name, default, ty) in confvars:
configs[lint.lower()] = Config(name.replace("_", "-"), ty, doc, default)
return configs
def parse_all(path="clippy_lints/src"):
lints = []
for root, dirs, files in os.walk(path):
for fn in files:
if fn.endswith('.rs'):
parse_lints(lints, os.path.join(root, fn))
log.info("got %s lints", len(lints))
configs = parse_configs(path)
log.info("got %d configs", len(configs))
return lints, configs
| 31.758333 | 100 | 0.479402 |
import os
import re
import collections
import logging as log
log.basicConfig(level=log.INFO, format='%(levelname)s: %(message)s')
Lint = collections.namedtuple('Lint', 'name level doc sourcefile group')
Config = collections.namedtuple('Config', 'name ty doc default')
lintname_re = re.compile(r'''pub\s+([A-Z_][A-Z_0-9]*)''')
group_re = re.compile(r'''\s*([a-z_][a-z_0-9]+)''')
conf_re = re.compile(r'''define_Conf! {\n([^}]*)\n}''', re.MULTILINE)
confvar_re = re.compile(
r'''/// Lint: (\w+). (.*).*\n\s*\([^,]+,\s+"([^"]+)",\s+([^=\)]+)=>\s+(.*)\),''', re.MULTILINE)
lint_levels = {
"correctness": 'Deny',
"style": 'Warn',
"complexity": 'Warn',
"perf": 'Warn',
"restriction": 'Allow',
"pedantic": 'Allow',
"nursery": 'Allow',
"cargo": 'Allow',
}
def parse_lints(lints, filepath):
last_comment = []
comment = True
clippy = False
deprecated = False
name = ""
with open(filepath) as fp:
for line in fp:
if comment:
if line.startswith("/// "):
last_comment.append(line[4:])
elif line.startswith("///"):
last_comment.append(line[3:])
elif line.startswith("declare_lint!"):
import sys
print("don't use `declare_lint!` in Clippy, use `declare_clippy_lint!` instead")
sys.exit(42)
elif line.startswith("declare_clippy_lint!"):
comment = False
deprecated = False
clippy = True
name = ""
elif line.startswith("declare_deprecated_lint!"):
comment = False
deprecated = True
clippy = False
else:
last_comment = []
if not comment:
m = lintname_re.search(line)
if m:
name = m.group(1).lower()
line = next(fp)
if deprecated:
level = "Deprecated"
group = "deprecated"
else:
while True:
g = group_re.search(line)
if g:
group = g.group(1).lower()
level = lint_levels.get(group, None)
break
line = next(fp)
if level is None:
continue
log.info("found %s with level %s in %s",
name, level, filepath)
lints.append(Lint(name, level, last_comment, filepath, group))
last_comment = []
comment = True
if "}" in line:
log.warn("Warning: missing Lint-Name in %s", filepath)
comment = True
def parse_configs(path):
configs = {}
with open(os.path.join(path, 'utils/conf.rs')) as fp:
contents = fp.read()
match = re.search(conf_re, contents)
confvars = re.findall(confvar_re, match.group(1))
for (lint, doc, name, default, ty) in confvars:
configs[lint.lower()] = Config(name.replace("_", "-"), ty, doc, default)
return configs
def parse_all(path="clippy_lints/src"):
lints = []
for root, dirs, files in os.walk(path):
for fn in files:
if fn.endswith('.rs'):
parse_lints(lints, os.path.join(root, fn))
log.info("got %s lints", len(lints))
configs = parse_configs(path)
log.info("got %d configs", len(configs))
return lints, configs
| true | true |
1c49abbc287cae05a51c953706e6233eceda83d3 | 682 | py | Python | qiskit/providers/ibmq/exceptions.py | delapuente/qiskit-ibmq-provider | 03322e8df52217ddb91c96f437dbeecebc4564ee | [
"Apache-2.0"
] | null | null | null | qiskit/providers/ibmq/exceptions.py | delapuente/qiskit-ibmq-provider | 03322e8df52217ddb91c96f437dbeecebc4564ee | [
"Apache-2.0"
] | null | null | null | qiskit/providers/ibmq/exceptions.py | delapuente/qiskit-ibmq-provider | 03322e8df52217ddb91c96f437dbeecebc4564ee | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""Exception for the IBMQ module."""
from qiskit.exceptions import QiskitError
class IBMQError(QiskitError):
"""Base class for errors raised by the IBMQ provider module."""
pass
class IBMQAccountError(IBMQError):
"""Base class for errors raised by account management."""
pass
class IBMQBackendError(IBMQError):
"""IBM Q Backend Errors"""
pass
class IBMQBackendValueError(IBMQError, ValueError):
"""Value errors thrown within IBMQBackend """
pass
| 22 | 77 | 0.714076 |
from qiskit.exceptions import QiskitError
class IBMQError(QiskitError):
pass
class IBMQAccountError(IBMQError):
pass
class IBMQBackendError(IBMQError):
pass
class IBMQBackendValueError(IBMQError, ValueError):
pass
| true | true |
1c49ac44226dae72e7824740af6a64b46bbf9717 | 21,771 | py | Python | tests/test_ft_taxii.py | zul126/minemeld-core | 2eb9b9bfd7654aee57aabd5fb280d4e89a438daf | [
"Apache-2.0"
] | 147 | 2016-07-22T18:15:49.000Z | 2022-03-26T23:32:44.000Z | tests/test_ft_taxii.py | zul126/minemeld-core | 2eb9b9bfd7654aee57aabd5fb280d4e89a438daf | [
"Apache-2.0"
] | 167 | 2016-07-27T07:02:25.000Z | 2021-12-16T16:26:52.000Z | tests/test_ft_taxii.py | zul126/minemeld-core | 2eb9b9bfd7654aee57aabd5fb280d4e89a438daf | [
"Apache-2.0"
] | 112 | 2016-07-22T07:14:29.000Z | 2022-03-24T18:43:12.000Z | # -*- coding: utf-8 -*-
# Copyright 2016 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FT TAXII tests
Unit tests for minemeld.ft.taxii
"""
import gevent.monkey
gevent.monkey.patch_all(thread=False, select=False)
import unittest
import mock
import redis
import gevent
import greenlet
import time
import xmltodict
import os
import libtaxii.constants
import re
import lz4
import json
import minemeld.ft.taxii
import minemeld.ft
FTNAME = 'testft-%d' % int(time.time())
MYDIR = os.path.dirname(__file__)
class MockTaxiiContentBlock(object):
def __init__(self, stix_xml):
class _Binding(object):
def __init__(self, id_):
self.binding_id = id_
self.content = stix_xml
self.content_binding = _Binding(libtaxii.constants.CB_STIX_XML_111)
class MineMeldFTTaxiiTests(unittest.TestCase):
@mock.patch.object(gevent, 'Greenlet')
def test_taxiiclient_parse(self, glet_mock):
config = {
'side_config': 'dummy.yml',
'ca_file': 'dummy.crt'
}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.TaxiiClient(FTNAME, chassis, config)
inputs = []
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
testfiles = os.listdir(MYDIR)
testfiles = filter(
lambda x: x.startswith('test_ft_taxii_stix_package_'),
testfiles
)
for t in testfiles:
with open(os.path.join(MYDIR, t), 'r') as f:
sxml = f.read()
mo = re.match('test_ft_taxii_stix_package_([A-Za-z0-9]+)_([0-9]+)_.*', t)
self.assertNotEqual(mo, None)
type_ = mo.group(1)
num_indicators = int(mo.group(2))
stix_objects = {
'observables': {},
'indicators': {},
'ttps': {}
}
content_blocks = [
MockTaxiiContentBlock(sxml)
]
b._handle_content_blocks(
content_blocks,
stix_objects
)
params = {
'ttps': stix_objects['ttps'],
'observables': stix_objects['observables']
}
indicators = [[iid, iv, params] for iid, iv in stix_objects['indicators'].iteritems()]
for i in indicators:
result = b._process_item(i)
self.assertEqual(len(result), num_indicators)
if type_ != 'any':
for r in result:
self.assertEqual(r[1]['type'], type_)
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_init(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
self.assertEqual(b.name, FTNAME)
self.assertEqual(b.chassis, chassis)
self.assertEqual(b.config, config)
self.assertItemsEqual(b.inputs, [])
self.assertEqual(b.output, None)
self.assertEqual(b.redis_skey, FTNAME)
self.assertEqual(b.redis_skey_chkp, FTNAME+'.chkp')
self.assertEqual(b.redis_skey_value, FTNAME+'.value')
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_update_ip(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
# __init__ + get chkp + delete chkp
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
# unicast
b.filtered_update(
'a',
indicator='1.1.1.1',
value={
'type': 'IPv4',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.uncompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.1')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
SR_mock.reset_mock()
# CIDR
b.filtered_update(
'a',
indicator='1.1.1.0/24',
value={
'type': 'IPv4',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.uncompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.0/24')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
SR_mock.reset_mock()
# fake range
b.filtered_update(
'a',
indicator='1.1.1.1-1.1.1.1',
value={
'type': 'IPv4',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.uncompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.1')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
SR_mock.reset_mock()
# fake range 2
b.filtered_update(
'a',
indicator='1.1.1.0-1.1.1.31',
value={
'type': 'IPv4',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.uncompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.0/27')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
# real range
b.filtered_update(
'a',
indicator='1.1.1.0-1.1.1.33',
value={
'type': 'IPv4',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.uncompress(args[2][3:]))
indicator = stixdict['indicators']
cyboxprops = indicator[0]['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.0/27')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
cyboxprops = indicator[1]['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.32/31')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
SR_mock.reset_mock()
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_update_domain(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
# __init__ + get chkp + delete chkp
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
# unicast
b.filtered_update(
'a',
indicator='example.com',
value={
'type': 'domain',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['value'], 'example.com')
self.assertEqual(cyboxprops['type'], 'FQDN')
SR_mock.reset_mock()
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_update_url(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
# __init__ + get chkp + delete chkp
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
# unicast
b.filtered_update(
'a',
indicator='www.example.com/admin.php',
value={
'type': 'URL',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['type'], 'URL')
self.assertEqual(cyboxprops['value'], 'www.example.com/admin.php')
SR_mock.reset_mock()
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_unicode_url(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
# __init__ + get chkp + delete chkp
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
# unicast
b.filtered_update(
'a',
indicator=u'☃.net/påth',
value={
'type': 'URL',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['type'], 'URL')
self.assertEqual(cyboxprops['value'], u'\u2603.net/p\xe5th')
SR_mock.reset_mock()
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_overflow(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
# __init__ + get chkp + delete chkp
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = b.max_entries
# unicast
b.filtered_update(
'a',
indicator=u'☃.net/påth',
value={
'type': 'URL',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
self.fail(msg='hset found')
self.assertEqual(b.statistics['drop.overflow'], 1)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = b.max_entries - 1
# unicast
b.filtered_update(
'a',
indicator=u'☃.net/påth',
value={
'type': 'URL',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['type'], 'URL')
self.assertEqual(cyboxprops['value'], u'\u2603.net/p\xe5th')
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_update_hash(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
# __init__ + get chkp + delete chkp
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
# sha1
b.filtered_update(
'a',
indicator='a6a5418b4d67d9f3a33cbf184b25ac7f9fa87d33',
value={
'type': 'sha1',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['hashes'][0]['simple_hash_value'], 'a6a5418b4d67d9f3a33cbf184b25ac7f9fa87d33')
self.assertEqual(cyboxprops['hashes'][0]['type']['value'], 'SHA1')
SR_mock.reset_mock()
# md5
b.filtered_update(
'a',
indicator='e23fadd6ceef8c618fc1c65191d846fa',
value={
'type': 'md5',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['hashes'][0]['simple_hash_value'], 'e23fadd6ceef8c618fc1c65191d846fa')
self.assertEqual(cyboxprops['hashes'][0]['type']['value'], 'MD5')
SR_mock.reset_mock()
# sha256
b.filtered_update(
'a',
indicator='a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9',
value={
'type': 'sha256',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['hashes'][0]['simple_hash_value'], 'a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9')
self.assertEqual(cyboxprops['hashes'][0]['type']['value'], 'SHA256')
SR_mock.reset_mock()
b.stop()
| 32.253333 | 138 | 0.558495 |
import gevent.monkey
gevent.monkey.patch_all(thread=False, select=False)
import unittest
import mock
import redis
import gevent
import greenlet
import time
import xmltodict
import os
import libtaxii.constants
import re
import lz4
import json
import minemeld.ft.taxii
import minemeld.ft
FTNAME = 'testft-%d' % int(time.time())
MYDIR = os.path.dirname(__file__)
class MockTaxiiContentBlock(object):
def __init__(self, stix_xml):
class _Binding(object):
def __init__(self, id_):
self.binding_id = id_
self.content = stix_xml
self.content_binding = _Binding(libtaxii.constants.CB_STIX_XML_111)
class MineMeldFTTaxiiTests(unittest.TestCase):
@mock.patch.object(gevent, 'Greenlet')
def test_taxiiclient_parse(self, glet_mock):
config = {
'side_config': 'dummy.yml',
'ca_file': 'dummy.crt'
}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.TaxiiClient(FTNAME, chassis, config)
inputs = []
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
testfiles = os.listdir(MYDIR)
testfiles = filter(
lambda x: x.startswith('test_ft_taxii_stix_package_'),
testfiles
)
for t in testfiles:
with open(os.path.join(MYDIR, t), 'r') as f:
sxml = f.read()
mo = re.match('test_ft_taxii_stix_package_([A-Za-z0-9]+)_([0-9]+)_.*', t)
self.assertNotEqual(mo, None)
type_ = mo.group(1)
num_indicators = int(mo.group(2))
stix_objects = {
'observables': {},
'indicators': {},
'ttps': {}
}
content_blocks = [
MockTaxiiContentBlock(sxml)
]
b._handle_content_blocks(
content_blocks,
stix_objects
)
params = {
'ttps': stix_objects['ttps'],
'observables': stix_objects['observables']
}
indicators = [[iid, iv, params] for iid, iv in stix_objects['indicators'].iteritems()]
for i in indicators:
result = b._process_item(i)
self.assertEqual(len(result), num_indicators)
if type_ != 'any':
for r in result:
self.assertEqual(r[1]['type'], type_)
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_init(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
self.assertEqual(b.name, FTNAME)
self.assertEqual(b.chassis, chassis)
self.assertEqual(b.config, config)
self.assertItemsEqual(b.inputs, [])
self.assertEqual(b.output, None)
self.assertEqual(b.redis_skey, FTNAME)
self.assertEqual(b.redis_skey_chkp, FTNAME+'.chkp')
self.assertEqual(b.redis_skey_value, FTNAME+'.value')
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_update_ip(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
b.filtered_update(
'a',
indicator='1.1.1.1',
value={
'type': 'IPv4',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.uncompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.1')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
SR_mock.reset_mock()
b.filtered_update(
'a',
indicator='1.1.1.0/24',
value={
'type': 'IPv4',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.uncompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.0/24')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
SR_mock.reset_mock()
b.filtered_update(
'a',
indicator='1.1.1.1-1.1.1.1',
value={
'type': 'IPv4',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.uncompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.1')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
SR_mock.reset_mock()
b.filtered_update(
'a',
indicator='1.1.1.0-1.1.1.31',
value={
'type': 'IPv4',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.uncompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.0/27')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
b.filtered_update(
'a',
indicator='1.1.1.0-1.1.1.33',
value={
'type': 'IPv4',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.uncompress(args[2][3:]))
indicator = stixdict['indicators']
cyboxprops = indicator[0]['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.0/27')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
cyboxprops = indicator[1]['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.32/31')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
SR_mock.reset_mock()
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_update_domain(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
b.filtered_update(
'a',
indicator='example.com',
value={
'type': 'domain',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['value'], 'example.com')
self.assertEqual(cyboxprops['type'], 'FQDN')
SR_mock.reset_mock()
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_update_url(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
b.filtered_update(
'a',
indicator='www.example.com/admin.php',
value={
'type': 'URL',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['type'], 'URL')
self.assertEqual(cyboxprops['value'], 'www.example.com/admin.php')
SR_mock.reset_mock()
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_unicode_url(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
b.filtered_update(
'a',
indicator=u'☃.net/påth',
value={
'type': 'URL',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['type'], 'URL')
self.assertEqual(cyboxprops['value'], u'\u2603.net/p\xe5th')
SR_mock.reset_mock()
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_overflow(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = b.max_entries
b.filtered_update(
'a',
indicator=u'☃.net/påth',
value={
'type': 'URL',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
self.fail(msg='hset found')
self.assertEqual(b.statistics['drop.overflow'], 1)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = b.max_entries - 1
b.filtered_update(
'a',
indicator=u'☃.net/påth',
value={
'type': 'URL',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['type'], 'URL')
self.assertEqual(cyboxprops['value'], u'\u2603.net/p\xe5th')
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_update_hash(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
b.filtered_update(
'a',
indicator='a6a5418b4d67d9f3a33cbf184b25ac7f9fa87d33',
value={
'type': 'sha1',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['hashes'][0]['simple_hash_value'], 'a6a5418b4d67d9f3a33cbf184b25ac7f9fa87d33')
self.assertEqual(cyboxprops['hashes'][0]['type']['value'], 'SHA1')
SR_mock.reset_mock()
b.filtered_update(
'a',
indicator='e23fadd6ceef8c618fc1c65191d846fa',
value={
'type': 'md5',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['hashes'][0]['simple_hash_value'], 'e23fadd6ceef8c618fc1c65191d846fa')
self.assertEqual(cyboxprops['hashes'][0]['type']['value'], 'MD5')
SR_mock.reset_mock()
b.filtered_update(
'a',
indicator='a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9',
value={
'type': 'sha256',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['hashes'][0]['simple_hash_value'], 'a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9')
self.assertEqual(cyboxprops['hashes'][0]['type']['value'], 'SHA256')
SR_mock.reset_mock()
b.stop()
| true | true |
1c49acb962d8f0d4657693061d94007f4a560623 | 1,487 | py | Python | tests/writers/test_boto3_stubs_package.py | greut/mypy_boto3_builder | e3d7fb4bbfbef72f173414bc6f7f9ed992c58333 | [
"MIT"
] | null | null | null | tests/writers/test_boto3_stubs_package.py | greut/mypy_boto3_builder | e3d7fb4bbfbef72f173414bc6f7f9ed992c58333 | [
"MIT"
] | null | null | null | tests/writers/test_boto3_stubs_package.py | greut/mypy_boto3_builder | e3d7fb4bbfbef72f173414bc6f7f9ed992c58333 | [
"MIT"
] | null | null | null | import tempfile
from pathlib import Path
from unittest.mock import MagicMock, patch
from mypy_boto3_builder.writers.boto3_stubs_package import write_boto3_stubs_package
class TestBoto3StubsPackage:
@patch("mypy_boto3_builder.writers.boto3_stubs_package.sort_imports")
@patch("mypy_boto3_builder.writers.boto3_stubs_package.blackify")
@patch("mypy_boto3_builder.writers.boto3_stubs_package.render_jinja2_template")
def test_write_master_package(
self,
render_jinja2_template_mock: MagicMock,
blackify_mock: MagicMock,
sort_imports_mock: MagicMock,
) -> None:
package_mock = MagicMock()
package_mock.name = "package"
package_mock.service_name.module_name = "module"
blackify_mock.return_value = "blackify"
sort_imports_mock.return_value = "sort_imports"
render_jinja2_template_mock.return_value = "render_jinja2_template_mock"
with tempfile.TemporaryDirectory() as output_dir:
output_path = Path(output_dir)
result = write_boto3_stubs_package(package_mock, output_path, True)
assert len(result) == 29
assert result[0].name == "setup.py"
render_jinja2_template_mock.assert_called_with(
Path("boto3-stubs/boto3-stubs/version.py.jinja2"),
package=package_mock,
)
assert len(blackify_mock.mock_calls) == 6
assert len(sort_imports_mock.mock_calls) == 6
| 40.189189 | 84 | 0.705447 | import tempfile
from pathlib import Path
from unittest.mock import MagicMock, patch
from mypy_boto3_builder.writers.boto3_stubs_package import write_boto3_stubs_package
class TestBoto3StubsPackage:
@patch("mypy_boto3_builder.writers.boto3_stubs_package.sort_imports")
@patch("mypy_boto3_builder.writers.boto3_stubs_package.blackify")
@patch("mypy_boto3_builder.writers.boto3_stubs_package.render_jinja2_template")
def test_write_master_package(
self,
render_jinja2_template_mock: MagicMock,
blackify_mock: MagicMock,
sort_imports_mock: MagicMock,
) -> None:
package_mock = MagicMock()
package_mock.name = "package"
package_mock.service_name.module_name = "module"
blackify_mock.return_value = "blackify"
sort_imports_mock.return_value = "sort_imports"
render_jinja2_template_mock.return_value = "render_jinja2_template_mock"
with tempfile.TemporaryDirectory() as output_dir:
output_path = Path(output_dir)
result = write_boto3_stubs_package(package_mock, output_path, True)
assert len(result) == 29
assert result[0].name == "setup.py"
render_jinja2_template_mock.assert_called_with(
Path("boto3-stubs/boto3-stubs/version.py.jinja2"),
package=package_mock,
)
assert len(blackify_mock.mock_calls) == 6
assert len(sort_imports_mock.mock_calls) == 6
| true | true |
1c49acdbc39a4f246a5459a9ce974252a307a5f5 | 3,326 | py | Python | spec2nii/nifti_orientation.py | NeutralKaon/spec2nii | 52f0dc42ad176fdbb173ac051803372909e9971c | [
"BSD-3-Clause"
] | 5 | 2020-06-24T08:25:51.000Z | 2021-06-30T16:49:37.000Z | spec2nii/nifti_orientation.py | NeutralKaon/spec2nii | 52f0dc42ad176fdbb173ac051803372909e9971c | [
"BSD-3-Clause"
] | 15 | 2021-11-15T14:57:24.000Z | 2022-03-25T10:07:47.000Z | spec2nii/nifti_orientation.py | NeutralKaon/spec2nii | 52f0dc42ad176fdbb173ac051803372909e9971c | [
"BSD-3-Clause"
] | 4 | 2020-06-30T16:16:31.000Z | 2021-08-05T19:13:11.000Z | import numpy as np
from scipy.spatial.transform import Rotation
class NIFTIOrient:
def __init__(self, affine):
self.Q44 = affine
qb, qc, qd, qx, qy, qz, dx, dy, dz, qfac = nifti_mat44_to_quatern(affine)
self.qb = qb
self.qc = qc
self.qd = qd
self.qx = qx
self.qy = qy
self.qz = qz
self.dx = dx
self.dy = dy
self.dz = dz
self.qfac = qfac
def calc_affine(angles, dimensions, shift):
scalingMat = np.diag(dimensions)
rot = Rotation.from_euler('xyz', angles, degrees=True)
m33 = rot.as_matrix() @ scalingMat
m44 = np.zeros((4, 4))
m44[0:3, 0:3] = m33
m44[3, 3] = 1.0
m44[0:3, 3] = shift
return m44
def nifti_mat44_to_quatern(R):
"""4x4 affine to quaternion representation."""
# offset outputs are read out of input matrix
qx = R[0, 3]
qy = R[1, 3]
qz = R[2, 3]
# load 3x3 matrix into local variables
r11 = R[0, 0]
r12 = R[0, 1]
r13 = R[0, 2]
r21 = R[1, 0]
r22 = R[1, 1]
r23 = R[1, 2]
r31 = R[2, 0]
r32 = R[2, 1]
r33 = R[2, 2]
# compute lengths of each column; these determine grid spacings
xd = np.sqrt(r11 * r11 + r21 * r21 + r31 * r31)
yd = np.sqrt(r12 * r12 + r22 * r22 + r32 * r32)
zd = np.sqrt(r13 * r13 + r23 * r23 + r33 * r33)
# if a column length is zero, patch the trouble
if xd == 0.0:
r11 = 1.0
r21 = 0.0
r31 = 0.0
xd = 1.0
if yd == 0.0:
r22 = 1.0
r12 = 0.0
r32 = 0.0
yd = 1.0
if zd == 0.0:
r33 = 1.0
r13 = 0.0
r23 = 0.0
zd = 1.0
# assign the output lengths
dx = xd
dy = yd
dz = zd
# normalize the columns
r11 /= xd
r21 /= xd
r31 /= xd
r12 /= yd
r22 /= yd
r32 /= yd
r13 /= zd
r23 /= zd
r33 /= zd
zd = r11 * r22 * r33\
- r11 * r32 * r23\
- r21 * r12 * r33\
+ r21 * r32 * r13\
+ r31 * r12 * r23\
- r31 * r22 * r13
# zd should be -1 or 1
if zd > 0: # proper
qfac = 1.0
else: # improper ==> flip 3rd column
qfac = -1.0
r13 *= -1.0
r23 *= -1.0
r33 *= -1.0
# now, compute quaternion parameters
a = r11 + r22 + r33 + 1.0
if a > 0.5: # simplest case
a = 0.5 * np.sqrt(a)
b = 0.25 * (r32 - r23) / a
c = 0.25 * (r13 - r31) / a
d = 0.25 * (r21 - r12) / a
else: # trickier case
xd = 1.0 + r11 - (r22 + r33) # 4*b*b
yd = 1.0 + r22 - (r11 + r33) # 4*c*c
zd = 1.0 + r33 - (r11 + r22) # 4*d*d
if xd > 1.0:
b = 0.5 * np.sqrt(xd)
c = 0.25 * (r12 + r21) / b
d = 0.25 * (r13 + r31) / b
a = 0.25 * (r32 - r23) / b
elif yd > 1.0:
c = 0.5 * np.sqrt(yd)
b = 0.25 * (r12 + r21) / c
d = 0.25 * (r23 + r32) / c
a = 0.25 * (r13 - r31) / c
else:
d = 0.5 * np.sqrt(zd)
b = 0.25 * (r13 + r31) / d
c = 0.25 * (r23 + r32) / d
a = 0.25 * (r21 - r12) / d
if a < 0.0:
b = -b
c = -c
d = -d
qb = b
qc = c
qd = d
return qb, qc, qd, qx, qy, qz, dx, dy, dz, qfac
| 23.422535 | 81 | 0.435057 | import numpy as np
from scipy.spatial.transform import Rotation
class NIFTIOrient:
def __init__(self, affine):
self.Q44 = affine
qb, qc, qd, qx, qy, qz, dx, dy, dz, qfac = nifti_mat44_to_quatern(affine)
self.qb = qb
self.qc = qc
self.qd = qd
self.qx = qx
self.qy = qy
self.qz = qz
self.dx = dx
self.dy = dy
self.dz = dz
self.qfac = qfac
def calc_affine(angles, dimensions, shift):
scalingMat = np.diag(dimensions)
rot = Rotation.from_euler('xyz', angles, degrees=True)
m33 = rot.as_matrix() @ scalingMat
m44 = np.zeros((4, 4))
m44[0:3, 0:3] = m33
m44[3, 3] = 1.0
m44[0:3, 3] = shift
return m44
def nifti_mat44_to_quatern(R):
qx = R[0, 3]
qy = R[1, 3]
qz = R[2, 3]
r11 = R[0, 0]
r12 = R[0, 1]
r13 = R[0, 2]
r21 = R[1, 0]
r22 = R[1, 1]
r23 = R[1, 2]
r31 = R[2, 0]
r32 = R[2, 1]
r33 = R[2, 2]
xd = np.sqrt(r11 * r11 + r21 * r21 + r31 * r31)
yd = np.sqrt(r12 * r12 + r22 * r22 + r32 * r32)
zd = np.sqrt(r13 * r13 + r23 * r23 + r33 * r33)
if xd == 0.0:
r11 = 1.0
r21 = 0.0
r31 = 0.0
xd = 1.0
if yd == 0.0:
r22 = 1.0
r12 = 0.0
r32 = 0.0
yd = 1.0
if zd == 0.0:
r33 = 1.0
r13 = 0.0
r23 = 0.0
zd = 1.0
dx = xd
dy = yd
dz = zd
r11 /= xd
r21 /= xd
r31 /= xd
r12 /= yd
r22 /= yd
r32 /= yd
r13 /= zd
r23 /= zd
r33 /= zd
zd = r11 * r22 * r33\
- r11 * r32 * r23\
- r21 * r12 * r33\
+ r21 * r32 * r13\
+ r31 * r12 * r23\
- r31 * r22 * r13
if zd > 0: qfac = 1.0
else: qfac = -1.0
r13 *= -1.0
r23 *= -1.0
r33 *= -1.0
a = r11 + r22 + r33 + 1.0
if a > 0.5: a = 0.5 * np.sqrt(a)
b = 0.25 * (r32 - r23) / a
c = 0.25 * (r13 - r31) / a
d = 0.25 * (r21 - r12) / a
else: xd = 1.0 + r11 - (r22 + r33) yd = 1.0 + r22 - (r11 + r33) zd = 1.0 + r33 - (r11 + r22) if xd > 1.0:
b = 0.5 * np.sqrt(xd)
c = 0.25 * (r12 + r21) / b
d = 0.25 * (r13 + r31) / b
a = 0.25 * (r32 - r23) / b
elif yd > 1.0:
c = 0.5 * np.sqrt(yd)
b = 0.25 * (r12 + r21) / c
d = 0.25 * (r23 + r32) / c
a = 0.25 * (r13 - r31) / c
else:
d = 0.5 * np.sqrt(zd)
b = 0.25 * (r13 + r31) / d
c = 0.25 * (r23 + r32) / d
a = 0.25 * (r21 - r12) / d
if a < 0.0:
b = -b
c = -c
d = -d
qb = b
qc = c
qd = d
return qb, qc, qd, qx, qy, qz, dx, dy, dz, qfac
| true | true |
1c49ad6c5eb75f92292108b9fb7833bf2d72a793 | 4,623 | py | Python | tensornetwork/backends/backend_test.py | DavidBraun777/TensorNetwork | 55942a12a859a8c6f8be473e623dbf0ddfd790b5 | [
"Apache-2.0"
] | null | null | null | tensornetwork/backends/backend_test.py | DavidBraun777/TensorNetwork | 55942a12a859a8c6f8be473e623dbf0ddfd790b5 | [
"Apache-2.0"
] | null | null | null | tensornetwork/backends/backend_test.py | DavidBraun777/TensorNetwork | 55942a12a859a8c6f8be473e623dbf0ddfd790b5 | [
"Apache-2.0"
] | null | null | null | """Tests for graphmode_tensornetwork."""
import builtins
import sys
import pytest
import numpy as np
def clean_tensornetwork_modules():
for mod in list(sys.modules.keys()):
if mod.startswith('tensornetwork'):
sys.modules.pop(mod, None)
@pytest.fixture(autouse=True)
def clean_backend_import():
#never do this outside testing
clean_tensornetwork_modules()
yield # use as teardown
clean_tensornetwork_modules()
@pytest.fixture
def no_backend_dependency(monkeypatch):
import_orig = builtins.__import__
# pylint: disable=redefined-builtin
def mocked_import(name, globals, locals, fromlist, level):
if name in ['torch', 'tensorflow', 'jax']:
raise ImportError()
return import_orig(name, globals, locals, fromlist, level)
monkeypatch.setattr(builtins, '__import__', mocked_import)
@pytest.mark.usefixtures('no_backend_dependency')
def test_backend_pytorch_missing_cannot_initialize_backend():
with pytest.raises(ImportError):
# pylint: disable=import-outside-toplevel
from tensornetwork.backends.pytorch.pytorch_backend import PyTorchBackend
PyTorchBackend()
@pytest.mark.usefixtures('no_backend_dependency')
def test_backend_tensorflow_missing_cannot_initialize_backend():
with pytest.raises(ImportError):
# pylint: disable=import-outside-toplevel
from tensornetwork.backends.tensorflow.tensorflow_backend \
import TensorFlowBackend
TensorFlowBackend()
@pytest.mark.usefixtures('no_backend_dependency')
def test_backend_jax_missing_cannot_initialize_backend():
with pytest.raises(ImportError):
# pylint: disable=import-outside-toplevel
from tensornetwork.backends.jax.jax_backend import JaxBackend
JaxBackend()
@pytest.mark.usefixtures('no_backend_dependency')
def test_config_backend_missing_can_import_config():
#not sure why config is imported here?
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import tensornetwork.config
with pytest.raises(ImportError):
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import torch
with pytest.raises(ImportError):
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import tensorflow as tf
with pytest.raises(ImportError):
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import jax
@pytest.mark.usefixtures('no_backend_dependency')
def test_import_tensornetwork_without_backends():
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import tensornetwork
#pylint: disable=import-outside-toplevel
import tensornetwork.backends.pytorch.pytorch_backend
#pylint: disable=import-outside-toplevel
import tensornetwork.backends.tensorflow.tensorflow_backend
#pylint: disable=import-outside-toplevel
import tensornetwork.backends.jax.jax_backend
#pylint: disable=import-outside-toplevel
import tensornetwork.backends.numpy.numpy_backend
with pytest.raises(ImportError):
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import torch
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import tensorflow as tf
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import jax
@pytest.mark.usefixtures('no_backend_dependency')
def test_basic_numpy_network_without_backends():
#pylint: disable=import-outside-toplevel
import tensornetwork
net = tensornetwork.TensorNetwork(backend="numpy")
a = net.add_node(np.ones((10,)))
b = net.add_node(np.ones((10,)))
edge = net.connect(a[0], b[0])
final_node = net.contract(edge)
assert final_node.tensor == np.array(10.)
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import torch
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import tensorflow as tf
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import jax
@pytest.mark.usefixtures('no_backend_dependency')
def test_basic_network_without_backends_raises_error():
#pylint: disable=import-outside-toplevel
import tensornetwork
with pytest.raises(ImportError):
tensornetwork.TensorNetwork(backend="jax")
with pytest.raises(ImportError):
tensornetwork.TensorNetwork(backend="tensorflow")
with pytest.raises(ImportError):
tensornetwork.TensorNetwork(backend="pytorch")
| 32.787234 | 77 | 0.776119 | import builtins
import sys
import pytest
import numpy as np
def clean_tensornetwork_modules():
for mod in list(sys.modules.keys()):
if mod.startswith('tensornetwork'):
sys.modules.pop(mod, None)
@pytest.fixture(autouse=True)
def clean_backend_import():
clean_tensornetwork_modules()
yield clean_tensornetwork_modules()
@pytest.fixture
def no_backend_dependency(monkeypatch):
import_orig = builtins.__import__
def mocked_import(name, globals, locals, fromlist, level):
if name in ['torch', 'tensorflow', 'jax']:
raise ImportError()
return import_orig(name, globals, locals, fromlist, level)
monkeypatch.setattr(builtins, '__import__', mocked_import)
@pytest.mark.usefixtures('no_backend_dependency')
def test_backend_pytorch_missing_cannot_initialize_backend():
with pytest.raises(ImportError):
from tensornetwork.backends.pytorch.pytorch_backend import PyTorchBackend
PyTorchBackend()
@pytest.mark.usefixtures('no_backend_dependency')
def test_backend_tensorflow_missing_cannot_initialize_backend():
with pytest.raises(ImportError):
from tensornetwork.backends.tensorflow.tensorflow_backend \
import TensorFlowBackend
TensorFlowBackend()
@pytest.mark.usefixtures('no_backend_dependency')
def test_backend_jax_missing_cannot_initialize_backend():
with pytest.raises(ImportError):
from tensornetwork.backends.jax.jax_backend import JaxBackend
JaxBackend()
@pytest.mark.usefixtures('no_backend_dependency')
def test_config_backend_missing_can_import_config():
import tensornetwork.config
with pytest.raises(ImportError):
import torch
with pytest.raises(ImportError):
import tensorflow as tf
with pytest.raises(ImportError):
import jax
@pytest.mark.usefixtures('no_backend_dependency')
def test_import_tensornetwork_without_backends():
import tensornetwork
import tensornetwork.backends.pytorch.pytorch_backend
import tensornetwork.backends.tensorflow.tensorflow_backend
import tensornetwork.backends.jax.jax_backend
import tensornetwork.backends.numpy.numpy_backend
with pytest.raises(ImportError):
import torch
with pytest.raises(ImportError):
import tensorflow as tf
with pytest.raises(ImportError):
import jax
@pytest.mark.usefixtures('no_backend_dependency')
def test_basic_numpy_network_without_backends():
import tensornetwork
net = tensornetwork.TensorNetwork(backend="numpy")
a = net.add_node(np.ones((10,)))
b = net.add_node(np.ones((10,)))
edge = net.connect(a[0], b[0])
final_node = net.contract(edge)
assert final_node.tensor == np.array(10.)
with pytest.raises(ImportError):
import torch
with pytest.raises(ImportError):
import tensorflow as tf
with pytest.raises(ImportError):
import jax
@pytest.mark.usefixtures('no_backend_dependency')
def test_basic_network_without_backends_raises_error():
import tensornetwork
with pytest.raises(ImportError):
tensornetwork.TensorNetwork(backend="jax")
with pytest.raises(ImportError):
tensornetwork.TensorNetwork(backend="tensorflow")
with pytest.raises(ImportError):
tensornetwork.TensorNetwork(backend="pytorch")
| true | true |
1c49ae8cef2d2a65f702a89e8a58574f58e7c5fc | 11,824 | py | Python | tests/test.py | tubaman/pyfacebook | 6fac843c6c52ed916482c9995b4eaa89631aab3a | [
"FSFAP"
] | 1 | 2020-05-19T05:38:35.000Z | 2020-05-19T05:38:35.000Z | tests/test.py | douglaswth/pyfacebook | 4a1427808ba41e33698c0e018d2ed44e7993b1c9 | [
"FSFAP"
] | null | null | null | tests/test.py | douglaswth/pyfacebook | 4a1427808ba41e33698c0e018d2ed44e7993b1c9 | [
"FSFAP"
] | null | null | null | import unittest
import sys
import os
import facebook
import urllib2
try:
from hashlib import md5
md5_constructor = md5
except ImportError:
import md5
md5_constructor = md5.new
try:
import simplejson
except ImportError:
from django.utils import simplejson
import httplib
from minimock import Mock
my_api_key = "e1e9cfeb5e0d7a52e4fbd5d09e1b873e"
my_secret_key = "1bebae7283f5b79aaf9b851addd55b90"
#'{"error_code":100,\
#"error_msg":"Invalid parameter",\
#"request_args":[{"key":"format","value":"JSON"},\
#{"key":"auth_token","value":"24626e24bb12919f2f142145070542e8"},\
#{"key":"sig","value":"36af2af3b93da784149301e77cb1621a"},\
#{"key":"v","value":"1.0"},\
#{"key":"api_key","value":"e1e9cfeb5e0d7a52e4fbd5d09e1b873e"},\
#{"key":"method","value":"facebook.auth.getSession"}]}'
response_str = '{"stuff":"abcd"}'
class MyUrlOpen:
def __init__(self,*args,**kwargs):
pass
def read(self):
global response_str
return response_str
class pyfacebook_UnitTests(unittest.TestCase):
def setUp(self):
facebook.urllib2.urlopen = Mock('urllib2.urlopen')
facebook.urllib2.urlopen.mock_returns_func = MyUrlOpen
pass
def tearDown(self):
pass
def login(self):
pass
def test1(self):
f = facebook.Facebook(api_key=my_api_key, secret_key=my_secret_key)
f.login = self.login
self.assertEquals(f.api_key,my_api_key)
self.assertEquals(f.secret_key,my_secret_key)
self.assertEquals(f.auth_token,None)
self.assertEquals(f.app_name,None)
self.assertEquals(f.callback_path,None)
self.assertEquals(f.internal,None)
def test2(self):
args = {"arg1":"a","arg2":"b","arg3":"c"}
hasher = md5_constructor(''.join(['%s=%s' % (x, args[x]) for x in sorted(args.keys())]))
hasher.update("acdnj")
f = facebook.Facebook(api_key="abcdf", secret_key="acdnj")
f.login = self.login
digest = f._hash_args(args)
self.assertEquals(hasher.hexdigest(),digest)
hasher = md5_constructor(''.join(['%s=%s' % (x, args[x]) for x in sorted(args.keys())]))
hasher.update("klmn")
# trunk code has error hash.updated instead of hash.update
digest = f._hash_args(args,secret="klmn")
self.assertEquals(hasher.hexdigest(),digest)
hasher = md5_constructor(''.join(['%s=%s' % (x, args[x]) for x in sorted(args.keys())]))
f.secret = "klmn"
hasher.update(f.secret)
# trunk code has error hash.updated instead of hash.update
digest = f._hash_args(args)
self.assertEquals(hasher.hexdigest(),digest)
def test3(self):
global response_str
response = {'stuff':'abcd'}
response_str = simplejson.dumps(response)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
fb.auth.createToken()
self.assertEquals(str(fb.auth_token['stuff']),"abcd")
fb.login()
response = {"session_key":"key","uid":"my_uid","secret":"my_secret","expires":"my_expires"}
response_str = simplejson.dumps(response)
res = fb.auth.getSession()
self.assertEquals(str(res["expires"]),response["expires"])
self.assertEquals(str(res["secret"]),response["secret"])
self.assertEquals(str(res["session_key"]),response["session_key"])
self.assertEquals(str(res["uid"]),response["uid"])
def test4(self):
global response_str
response = 'abcdef'
response_str = simplejson.dumps(response)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
fb.auth.createToken()
self.assertEquals(str(fb.auth_token),"abcdef")
url = fb.get_login_url(next="nowhere", popup=True, canvas=True)
self.assertEquals(url,
'http://www.facebook.com/login.php?canvas=1&popup=1&auth_token=abcdef&next=nowhere&v=1.0&api_key=%s'%(my_api_key,))
def test5(self):
class Request:
def __init__(self,post,get,method):
self.POST = post
self.GET = get
self.method = method
req = Request({'fb_sig_in_canvas':1},{},'POST')
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
res = fb.check_session(req)
self.assertFalse(res)
req = Request({'fb_sig':1},{},'POST')
res = fb.check_session(req)
self.assertFalse(res)
req = Request({'fb_sig':fb._hash_args({'in_canvas':'1',
'added':'1',
'expires':'1',
'friends':'joe,mary',
'session_key':'abc',
'user':'bob'}),
'fb_sig_in_canvas':'1',
'fb_sig_added':'1',
'fb_sig_expires':'1',
'fb_sig_friends':'joe,mary',
'fb_sig_session_key':'abc',
'fb_sig_user':'bob'},
{},'POST')
res = fb.check_session(req)
self.assertTrue(res)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
req = Request({'fb_sig':fb._hash_args({'in_canvas':'1',
'added':'1',
'expires':'1',
'friends':'',
'session_key':'abc',
'user':'bob'}),
'fb_sig_in_canvas':'1',
'fb_sig_added':'1',
'fb_sig_expires':'1',
'fb_sig_friends':'',
'fb_sig_session_key':'abc',
'fb_sig_user':'bob'},
{},'POST')
res = fb.check_session(req)
self.assertTrue(res)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
req = Request({'fb_sig':fb._hash_args({'in_canvas':'1',
'added':'1',
'expires':'1',
'friends':'',
'session_key':'abc',
'page_id':'id'}),
'fb_sig_in_canvas':'1',
'fb_sig_added':'1',
'fb_sig_expires':'1',
'fb_sig_friends':'',
'fb_sig_session_key':'abc',
'fb_sig_page_id':'id'},
{},'POST')
res = fb.check_session(req)
self.assertTrue(res)
def test6(self):
global response_str
response = 'abcdef'
response_str = simplejson.dumps(response)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
fb.auth.createToken()
# self.failUnlessRaises(RuntimeError,fb._add_session_args)
response = {"session_key":"key","uid":"my_uid","secret":"my_secret","expires":"my_expires"}
response_str = simplejson.dumps(response)
fb.auth.getSession()
args = fb._add_session_args()
def test7(self):
global response_str
response = 'abcdef'
response_str = simplejson.dumps(response)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
fb.auth.createToken()
self.assertEquals(str(fb.auth_token),"abcdef")
url = fb.get_authorize_url(next="next",next_cancel="next_cancel")
self.assertEquals(url,
'http://www.facebook.com/authorize.php?api_key=%s&next_cancel=next_cancel&v=1.0&next=next' % (my_api_key,))
def test8(self):
class Request:
def __init__(self,post,get,method):
self.POST = post
self.GET = get
self.method = method
global response_str
response = {"session_key":"abcdef","uid":"my_uid","secret":"my_secret","expires":"my_expires"}
response_str = simplejson.dumps(response)
req = Request({},{'installed':1,'fb_page_id':'id','auth_token':'abcdef'},'GET')
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
res = fb.check_session(req)
self.assertTrue(res)
def test9(self):
global response_str
response = 'abcdef'
response_str = simplejson.dumps(response)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
fb.auth.createToken()
self.assertEquals(str(fb.auth_token),"abcdef")
url = fb.get_add_url(next="next")
self.assertEquals(url,
'http://www.facebook.com/install.php?api_key=%s&v=1.0&next=next' % (my_api_key,))
def send(self,xml):
self.xml = xml
def test10(self):
import Image
image1 = Image.new("RGB", (400, 300), (255, 255, 255))
filename = "image_file.jpg"
image1.save(filename)
global response_str
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
facebook.httplib.HTTP = Mock('httplib.HTTP')
http_connection = Mock('http_connection')
facebook.httplib.HTTP.mock_returns = http_connection
http_connection.send.mock_returns_func = self.send
def _http_passes():
return [200,]
http_connection.getreply.mock_returns_func = _http_passes
def read():
response = {"stuff":"stuff"}
response_str = simplejson.dumps(response)
return response_str
http_connection.file.read.mock_returns_func = read
response = {"session_key":"key","uid":"my_uid","secret":"my_secret","expires":"my_expires"}
response_str = simplejson.dumps(response)
res = fb.auth.getSession()
result = fb.photos.upload(image=filename,aid="aid",caption="a caption")
self.assertEquals(str(result["stuff"]),"stuff")
os.remove(filename)
if __name__ == "__main__":
# Build the test suite
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(pyfacebook_UnitTests))
# Execute the test suite
print("Testing Proxy class\n")
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(len(result.errors) + len(result.failures))
| 43.470588 | 142 | 0.505751 | import unittest
import sys
import os
import facebook
import urllib2
try:
from hashlib import md5
md5_constructor = md5
except ImportError:
import md5
md5_constructor = md5.new
try:
import simplejson
except ImportError:
from django.utils import simplejson
import httplib
from minimock import Mock
my_api_key = "e1e9cfeb5e0d7a52e4fbd5d09e1b873e"
my_secret_key = "1bebae7283f5b79aaf9b851addd55b90"
#"error_msg":"Invalid parameter",\
#"request_args":[{"key":"format","value":"JSON"},\
#{"key":"auth_token","value":"24626e24bb12919f2f142145070542e8"},\
#{"key":"sig","value":"36af2af3b93da784149301e77cb1621a"},\
#{"key":"v","value":"1.0"},\
#{"key":"api_key","value":"e1e9cfeb5e0d7a52e4fbd5d09e1b873e"},\
#{"key":"method","value":"facebook.auth.getSession"}]}'
response_str = '{"stuff":"abcd"}'
class MyUrlOpen:
def __init__(self,*args,**kwargs):
pass
def read(self):
global response_str
return response_str
class pyfacebook_UnitTests(unittest.TestCase):
def setUp(self):
facebook.urllib2.urlopen = Mock('urllib2.urlopen')
facebook.urllib2.urlopen.mock_returns_func = MyUrlOpen
pass
def tearDown(self):
pass
def login(self):
pass
def test1(self):
f = facebook.Facebook(api_key=my_api_key, secret_key=my_secret_key)
f.login = self.login
self.assertEquals(f.api_key,my_api_key)
self.assertEquals(f.secret_key,my_secret_key)
self.assertEquals(f.auth_token,None)
self.assertEquals(f.app_name,None)
self.assertEquals(f.callback_path,None)
self.assertEquals(f.internal,None)
def test2(self):
args = {"arg1":"a","arg2":"b","arg3":"c"}
hasher = md5_constructor(''.join(['%s=%s' % (x, args[x]) for x in sorted(args.keys())]))
hasher.update("acdnj")
f = facebook.Facebook(api_key="abcdf", secret_key="acdnj")
f.login = self.login
digest = f._hash_args(args)
self.assertEquals(hasher.hexdigest(),digest)
hasher = md5_constructor(''.join(['%s=%s' % (x, args[x]) for x in sorted(args.keys())]))
hasher.update("klmn")
digest = f._hash_args(args,secret="klmn")
self.assertEquals(hasher.hexdigest(),digest)
hasher = md5_constructor(''.join(['%s=%s' % (x, args[x]) for x in sorted(args.keys())]))
f.secret = "klmn"
hasher.update(f.secret)
digest = f._hash_args(args)
self.assertEquals(hasher.hexdigest(),digest)
def test3(self):
global response_str
response = {'stuff':'abcd'}
response_str = simplejson.dumps(response)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
fb.auth.createToken()
self.assertEquals(str(fb.auth_token['stuff']),"abcd")
fb.login()
response = {"session_key":"key","uid":"my_uid","secret":"my_secret","expires":"my_expires"}
response_str = simplejson.dumps(response)
res = fb.auth.getSession()
self.assertEquals(str(res["expires"]),response["expires"])
self.assertEquals(str(res["secret"]),response["secret"])
self.assertEquals(str(res["session_key"]),response["session_key"])
self.assertEquals(str(res["uid"]),response["uid"])
def test4(self):
global response_str
response = 'abcdef'
response_str = simplejson.dumps(response)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
fb.auth.createToken()
self.assertEquals(str(fb.auth_token),"abcdef")
url = fb.get_login_url(next="nowhere", popup=True, canvas=True)
self.assertEquals(url,
'http://www.facebook.com/login.php?canvas=1&popup=1&auth_token=abcdef&next=nowhere&v=1.0&api_key=%s'%(my_api_key,))
def test5(self):
class Request:
def __init__(self,post,get,method):
self.POST = post
self.GET = get
self.method = method
req = Request({'fb_sig_in_canvas':1},{},'POST')
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
res = fb.check_session(req)
self.assertFalse(res)
req = Request({'fb_sig':1},{},'POST')
res = fb.check_session(req)
self.assertFalse(res)
req = Request({'fb_sig':fb._hash_args({'in_canvas':'1',
'added':'1',
'expires':'1',
'friends':'joe,mary',
'session_key':'abc',
'user':'bob'}),
'fb_sig_in_canvas':'1',
'fb_sig_added':'1',
'fb_sig_expires':'1',
'fb_sig_friends':'joe,mary',
'fb_sig_session_key':'abc',
'fb_sig_user':'bob'},
{},'POST')
res = fb.check_session(req)
self.assertTrue(res)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
req = Request({'fb_sig':fb._hash_args({'in_canvas':'1',
'added':'1',
'expires':'1',
'friends':'',
'session_key':'abc',
'user':'bob'}),
'fb_sig_in_canvas':'1',
'fb_sig_added':'1',
'fb_sig_expires':'1',
'fb_sig_friends':'',
'fb_sig_session_key':'abc',
'fb_sig_user':'bob'},
{},'POST')
res = fb.check_session(req)
self.assertTrue(res)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
req = Request({'fb_sig':fb._hash_args({'in_canvas':'1',
'added':'1',
'expires':'1',
'friends':'',
'session_key':'abc',
'page_id':'id'}),
'fb_sig_in_canvas':'1',
'fb_sig_added':'1',
'fb_sig_expires':'1',
'fb_sig_friends':'',
'fb_sig_session_key':'abc',
'fb_sig_page_id':'id'},
{},'POST')
res = fb.check_session(req)
self.assertTrue(res)
def test6(self):
global response_str
response = 'abcdef'
response_str = simplejson.dumps(response)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
fb.auth.createToken()
response = {"session_key":"key","uid":"my_uid","secret":"my_secret","expires":"my_expires"}
response_str = simplejson.dumps(response)
fb.auth.getSession()
args = fb._add_session_args()
def test7(self):
global response_str
response = 'abcdef'
response_str = simplejson.dumps(response)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
fb.auth.createToken()
self.assertEquals(str(fb.auth_token),"abcdef")
url = fb.get_authorize_url(next="next",next_cancel="next_cancel")
self.assertEquals(url,
'http://www.facebook.com/authorize.php?api_key=%s&next_cancel=next_cancel&v=1.0&next=next' % (my_api_key,))
def test8(self):
class Request:
def __init__(self,post,get,method):
self.POST = post
self.GET = get
self.method = method
global response_str
response = {"session_key":"abcdef","uid":"my_uid","secret":"my_secret","expires":"my_expires"}
response_str = simplejson.dumps(response)
req = Request({},{'installed':1,'fb_page_id':'id','auth_token':'abcdef'},'GET')
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
res = fb.check_session(req)
self.assertTrue(res)
def test9(self):
global response_str
response = 'abcdef'
response_str = simplejson.dumps(response)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
fb.auth.createToken()
self.assertEquals(str(fb.auth_token),"abcdef")
url = fb.get_add_url(next="next")
self.assertEquals(url,
'http://www.facebook.com/install.php?api_key=%s&v=1.0&next=next' % (my_api_key,))
def send(self,xml):
self.xml = xml
def test10(self):
import Image
image1 = Image.new("RGB", (400, 300), (255, 255, 255))
filename = "image_file.jpg"
image1.save(filename)
global response_str
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
facebook.httplib.HTTP = Mock('httplib.HTTP')
http_connection = Mock('http_connection')
facebook.httplib.HTTP.mock_returns = http_connection
http_connection.send.mock_returns_func = self.send
def _http_passes():
return [200,]
http_connection.getreply.mock_returns_func = _http_passes
def read():
response = {"stuff":"stuff"}
response_str = simplejson.dumps(response)
return response_str
http_connection.file.read.mock_returns_func = read
response = {"session_key":"key","uid":"my_uid","secret":"my_secret","expires":"my_expires"}
response_str = simplejson.dumps(response)
res = fb.auth.getSession()
result = fb.photos.upload(image=filename,aid="aid",caption="a caption")
self.assertEquals(str(result["stuff"]),"stuff")
os.remove(filename)
if __name__ == "__main__":
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(pyfacebook_UnitTests))
print("Testing Proxy class\n")
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(len(result.errors) + len(result.failures))
| true | true |
1c49aea1b2fec5e7a7e723bbfc78bae2a63ad735 | 3,912 | py | Python | atom/nucleus/python/nucleus_api/models/decision_tree_result_vo.py | sumit4-ttn/SDK | b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff | [
"Apache-2.0"
] | null | null | null | atom/nucleus/python/nucleus_api/models/decision_tree_result_vo.py | sumit4-ttn/SDK | b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff | [
"Apache-2.0"
] | null | null | null | atom/nucleus/python/nucleus_api/models/decision_tree_result_vo.py | sumit4-ttn/SDK | b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Hydrogen Atom API
The Hydrogen Atom API # noqa: E501
OpenAPI spec version: 1.7.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DecisionTreeResultVO(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'entity_id': 'list[str]',
'entity_type': 'str'
}
attribute_map = {
'entity_id': 'entity_id',
'entity_type': 'entity_type'
}
def __init__(self, entity_id=None, entity_type=None): # noqa: E501
"""DecisionTreeResultVO - a model defined in Swagger""" # noqa: E501
self._entity_id = None
self._entity_type = None
self.discriminator = None
if entity_id is not None:
self.entity_id = entity_id
if entity_type is not None:
self.entity_type = entity_type
@property
def entity_id(self):
"""Gets the entity_id of this DecisionTreeResultVO. # noqa: E501
:return: The entity_id of this DecisionTreeResultVO. # noqa: E501
:rtype: list[str]
"""
return self._entity_id
@entity_id.setter
def entity_id(self, entity_id):
"""Sets the entity_id of this DecisionTreeResultVO.
:param entity_id: The entity_id of this DecisionTreeResultVO. # noqa: E501
:type: list[str]
"""
self._entity_id = entity_id
@property
def entity_type(self):
"""Gets the entity_type of this DecisionTreeResultVO. # noqa: E501
:return: The entity_type of this DecisionTreeResultVO. # noqa: E501
:rtype: str
"""
return self._entity_type
@entity_type.setter
def entity_type(self, entity_type):
"""Sets the entity_type of this DecisionTreeResultVO.
:param entity_type: The entity_type of this DecisionTreeResultVO. # noqa: E501
:type: str
"""
self._entity_type = entity_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DecisionTreeResultVO, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DecisionTreeResultVO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.549296 | 87 | 0.57771 |
import pprint
import re
import six
class DecisionTreeResultVO(object):
swagger_types = {
'entity_id': 'list[str]',
'entity_type': 'str'
}
attribute_map = {
'entity_id': 'entity_id',
'entity_type': 'entity_type'
}
def __init__(self, entity_id=None, entity_type=None):
self._entity_id = None
self._entity_type = None
self.discriminator = None
if entity_id is not None:
self.entity_id = entity_id
if entity_type is not None:
self.entity_type = entity_type
@property
def entity_id(self):
return self._entity_id
@entity_id.setter
def entity_id(self, entity_id):
self._entity_id = entity_id
@property
def entity_type(self):
return self._entity_type
@entity_type.setter
def entity_type(self, entity_type):
self._entity_type = entity_type
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DecisionTreeResultVO, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, DecisionTreeResultVO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c49af06fb182620ad77a0022f91480b1c789ab3 | 909 | py | Python | posts/migrations/0001_initial.py | CodeEnvironment/django-rest-framework-deploy-heroku | c6ffb20961c193b0f4dc1289de904b5d6750f335 | [
"MIT"
] | 3 | 2021-04-05T14:02:44.000Z | 2022-01-25T07:50:20.000Z | posts/migrations/0001_initial.py | CodeEnvironment/django-rest-framework-deploy-aws | d9cf1d016e22b9b5697c769bd094776d25a3f90b | [
"MIT"
] | null | null | null | posts/migrations/0001_initial.py | CodeEnvironment/django-rest-framework-deploy-aws | d9cf1d016e22b9b5697c769bd094776d25a3f90b | [
"MIT"
] | 1 | 2022-01-23T15:09:59.000Z | 2022-01-23T15:09:59.000Z | # Generated by Django 2.2.5 on 2020-08-16 10:43
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Posts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post_title', models.CharField(max_length=200)),
('post_body', models.TextField(max_length=1000)),
],
),
migrations.CreateModel(
name='PostsRates',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('likes', models.BigIntegerField(default=0)),
('dislikes', models.BigIntegerField(default=0)),
],
),
]
| 29.322581 | 114 | 0.561056 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Posts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post_title', models.CharField(max_length=200)),
('post_body', models.TextField(max_length=1000)),
],
),
migrations.CreateModel(
name='PostsRates',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('likes', models.BigIntegerField(default=0)),
('dislikes', models.BigIntegerField(default=0)),
],
),
]
| true | true |
1c49af27f673eeb7de969da70743dbbc5e30b487 | 154 | py | Python | Python/euclidian.py | AbdulConsole/Hacktoberfest2019-2 | b9619361b6cecf9b3e734972af3b0a03dba98d2e | [
"MIT"
] | 1 | 2019-10-28T20:12:23.000Z | 2019-10-28T20:12:23.000Z | Python/euclidian.py | AbdulConsole/Hacktoberfest2019-2 | b9619361b6cecf9b3e734972af3b0a03dba98d2e | [
"MIT"
] | null | null | null | Python/euclidian.py | AbdulConsole/Hacktoberfest2019-2 | b9619361b6cecf9b3e734972af3b0a03dba98d2e | [
"MIT"
] | 1 | 2020-10-16T14:10:12.000Z | 2020-10-16T14:10:12.000Z | from math import sqrt
# define euclidian distance
def euclidienne(x,y):
return sqrt((x[0]-y[0])**2+(x[1]-y[1])**2)
print(euclidienne((1,3),(2,2)))
| 17.111111 | 46 | 0.62987 | from math import sqrt
def euclidienne(x,y):
return sqrt((x[0]-y[0])**2+(x[1]-y[1])**2)
print(euclidienne((1,3),(2,2)))
| true | true |
1c49afe5fad9b3edfdb62cbe3f1abd9199670eec | 15,586 | py | Python | pscript/parser3.py | JesusZerpa/pscript | 5ac86c1b5983b47f3f0554e0801893d284f84b2d | [
"BSD-2-Clause"
] | 190 | 2018-02-26T00:19:37.000Z | 2022-03-29T13:35:33.000Z | pscript/parser3.py | JesusZerpa/pscript | 5ac86c1b5983b47f3f0554e0801893d284f84b2d | [
"BSD-2-Clause"
] | 53 | 2018-03-21T22:39:46.000Z | 2022-01-15T05:22:05.000Z | pscript/parser3.py | JesusZerpa/pscript | 5ac86c1b5983b47f3f0554e0801893d284f84b2d | [
"BSD-2-Clause"
] | 21 | 2018-04-16T21:13:00.000Z | 2022-02-27T23:28:14.000Z | """
Python Builtins
---------------
Most builtin functions (that make sense in JS) are automatically
translated to JavaScript: isinstance, issubclass, callable, hasattr,
getattr, setattr, delattr, print, len, max, min, chr, ord, dict, list,
tuple, range, pow, sum, round, int, float, str, bool, abs, divmod, all,
any, enumerate, zip, reversed, sorted, filter, map.
Further all methods for list, dict and str are implemented (except str
methods: encode, decode, format_map, isprintable, maketrans).
.. pscript_example::
# "self" is replaced with "this"
self.foo
# Printing just works
print('some test')
print(a, b, c, sep='-')
# Getting the length of a string or array
len(foo)
# Rounding and abs
round(foo) # round to nearest integer
int(foo) # round towards 0 as in Python
abs(foo)
# min and max
min(foo)
min(a, b, c)
max(foo)
max(a, b, c)
# divmod
a, b = divmod(100, 7) # -> 14, 2
# Aggregation
sum(foo)
all(foo)
any(foo)
# Turning things into numbers, bools and strings
str(s)
float(x)
bool(y)
int(z) # this rounds towards zero like in Python
chr(65) # -> 'A'
ord('A') # -> 65
# Turning things into lists and dicts
dict([['foo', 1], ['bar', 2]]) # -> {'foo': 1, 'bar': 2}
list('abc') # -> ['a', 'b', 'c']
dict(other_dict) # make a copy
list(other_list) # make copy
The isinstance function (and friends)
-------------------------------------
The ``isinstance()`` function works for all JS primitive types, but also
for user-defined classes.
.. pscript_example::
# Basic types
isinstance(3, float) # in JS there are no ints
isinstance('', str)
isinstance([], list)
isinstance({}, dict)
isinstance(foo, types.FunctionType)
# Can also use JS strings
isinstance(3, 'number')
isinstance('', 'string')
isinstance([], 'array')
isinstance({}, 'object')
isinstance(foo, 'function')
# You can use it on your own types too ...
isinstance(x, MyClass)
isinstance(x, 'MyClass') # equivalent
isinstance(x, 'Object') # also yields true (subclass of Object)
# issubclass works too
issubclass(Foo, Bar)
# As well as callable
callable(foo)
hasattr, getattr, setattr and delattr
-------------------------------------
.. pscript_example::
a = {'foo': 1, 'bar': 2}
hasattr(a, 'foo') # -> True
hasattr(a, 'fooo') # -> False
hasattr(null, 'foo') # -> False
getattr(a, 'foo') # -> 1
getattr(a, 'fooo') # -> raise AttributeError
getattr(a, 'fooo', 3) # -> 3
getattr(null, 'foo', 3) # -> 3
setattr(a, 'foo', 2)
delattr(a, 'foo')
Creating sequences
------------------
.. pscript_example::
range(10)
range(2, 10, 2)
range(100, 0, -1)
reversed(foo)
sorted(foo)
enumerate(foo)
zip(foo, bar)
filter(func, foo)
map(func, foo)
List methods
------------
.. pscript_example::
# Call a.append() if it exists, otherwise a.push()
a.append(x)
# Similar for remove()
a.remove(x)
Dict methods
------------
.. pscript_example::
a = {'foo': 3}
a['foo']
a.get('foo', 0)
a.get('foo')
a.keys()
Str methods
-----------
.. pscript_example::
"foobar".startswith('foo')
"foobar".replace('foo', 'bar')
"foobar".upper()
Using JS specific functionality
-------------------------------
When writing PScript inside Python modules, we recommend that where
specific JavaScript functionality is used, that the references are
prefixed with ``window.`` Where ``window`` represents the global JS
namespace. All global JavaScript objects, functions, and variables
automatically become members of the ``window`` object. This helps
make it clear that the functionality is specific to JS, and also
helps static code analysis tools like flake8.
.. pscript_example::
from pscript import window # this is a stub
def foo(a):
return window.Math.cos(a)
Aside from ``window``, ``pscript`` also provides ``undefined``,
``Inifinity``, and ``NaN``.
"""
from . import commonast as ast
from . import stdlib
from .parser2 import Parser2, JSError, unify # noqa
from .stubs import RawJS
# This class has several `function_foo()` and `method_bar()` methods
# to implement corresponding functionality. Most of these are
# auto-generated from the stdlib. However, some methods need explicit
# implementation, e.g. to parse keyword arguments, or are inlined rather
# than implemented via the stlib.
#
# Note that when the number of arguments does not match, almost all
# functions raise a compile-time error. The methods, however, will
# bypass the stdlib in this case, because it is assumed that the user
# intended to call a special method on the object.
class Parser3(Parser2):
""" Parser to transcompile Python to JS, allowing more Pythonic
code, like ``self``, ``print()``, ``len()``, list methods, etc.
"""
def function_this_is_js(self, node):
# Note that we handle this_is_js() shortcuts in the if-statement
# directly. This replacement with a string is when this_is_js()
# is used outside an if statement.
if len(node.arg_nodes) != 0:
raise JSError('this_is_js() expects zero arguments.')
return ('"this_is_js()"')
def function_RawJS(self, node):
if len(node.arg_nodes) == 1:
if not isinstance(node.arg_nodes[0], ast.Str):
raise JSError('RawJS needs a verbatim string (use multiple '
'args to bypass PScript\'s RawJS).')
lines = RawJS._str2lines(node.arg_nodes[0].value.strip())
nl = '\n' + (self._indent * 4) * ' '
return nl.join(lines)
else:
return None # maybe RawJS is a thing
## Python builtin functions
def function_isinstance(self, node):
if len(node.arg_nodes) != 2:
raise JSError('isinstance() expects two arguments.')
ob = unify(self.parse(node.arg_nodes[0]))
cls = unify(self.parse(node.arg_nodes[1]))
if cls[0] in '"\'':
cls = cls[1:-1] # remove quotes
BASIC_TYPES = ('number', 'boolean', 'string', 'function', 'array',
'object', 'null', 'undefined')
MAP = {'[int, float]': 'number', '[float, int]': 'number', 'float': 'number',
'str': 'string', 'basestring': 'string', 'string_types': 'string',
'bool': 'boolean',
'FunctionType': 'function', 'types.FunctionType': 'function',
'list': 'array', 'tuple': 'array',
'[list, tuple]': 'array', '[tuple, list]': 'array',
'dict': 'object',
}
cmp = MAP.get(cls, cls)
if cmp == 'array':
return ['Array.isArray(', ob, ')']
elif cmp.lower() in BASIC_TYPES:
# Basic type, use Object.prototype.toString
return ["Object.prototype.toString.call(", ob ,
").slice(8,-1).toLowerCase() === '%s'" % cmp.lower()]
# In http://stackoverflow.com/questions/11108877 the following is
# proposed, which might be better in theory, but is > 50% slower
return ["({}).toString.call(",
ob,
r").match(/\s([a-zA-Z]+)/)[1].toLowerCase() === ",
"'%s'" % cmp.lower()
]
else:
# User defined type, use instanceof
# http://tobyho.com/2011/01/28/checking-types-in-javascript/
cmp = unify(cls)
if cmp[0] == '(':
raise JSError('isinstance() can only compare to simple types')
return ob, " instanceof ", cmp
def function_issubclass(self, node):
# issubclass only needs to work on custom classes
if len(node.arg_nodes) != 2:
raise JSError('issubclass() expects two arguments.')
cls1 = unify(self.parse(node.arg_nodes[0]))
cls2 = unify(self.parse(node.arg_nodes[1]))
if cls2 == 'object':
cls2 = 'Object'
return '(%s.prototype instanceof %s)' % (cls1, cls2)
def function_print(self, node):
# Process keywords
sep, end = '" "', ''
for kw in node.kwarg_nodes:
if kw.name == 'sep':
sep = ''.join(self.parse(kw.value_node))
elif kw.name == 'end':
end = ''.join(self.parse(kw.value_node))
elif kw.name in ('file', 'flush'):
raise JSError('print() file and flush args not supported')
else:
raise JSError('Invalid argument for print(): %r' % kw.name)
# Combine args
args = [unify(self.parse(arg)) for arg in node.arg_nodes]
end = (" + %s" % end) if (args and end and end != '\n') else ''
combiner = ' + %s + ' % sep
args_concat = combiner.join(args) or '""'
return 'console.log(' + args_concat + end + ')'
def function_len(self, node):
if len(node.arg_nodes) == 1:
return unify(self.parse(node.arg_nodes[0])), '.length'
else:
return None # don't apply this feature
def function_max(self, node):
if len(node.arg_nodes) == 0:
raise JSError('max() needs at least one argument')
elif len(node.arg_nodes) == 1:
arg = ''.join(self.parse(node.arg_nodes[0]))
return 'Math.max.apply(null, ', arg, ')'
else:
args = ', '.join([unify(self.parse(arg)) for arg in node.arg_nodes])
return 'Math.max(', args, ')'
def function_min(self, node):
if len(node.arg_nodes) == 0:
raise JSError('min() needs at least one argument')
elif len(node.arg_nodes) == 1:
arg = ''.join(self.parse(node.arg_nodes[0]))
return 'Math.min.apply(null, ', arg, ')'
else:
args = ', '.join([unify(self.parse(arg)) for arg in node.arg_nodes])
return 'Math.min(', args, ')'
def function_callable(self, node):
if len(node.arg_nodes) == 1:
arg = unify(self.parse(node.arg_nodes[0]))
return '(typeof %s === "function")' % arg
else:
raise JSError('callable() needs at least one argument')
def function_chr(self, node):
if len(node.arg_nodes) == 1:
arg = ''.join(self.parse(node.arg_nodes[0]))
return 'String.fromCharCode(%s)' % arg
else:
raise JSError('chr() needs at least one argument')
def function_ord(self, node):
if len(node.arg_nodes) == 1:
arg = ''.join(self.parse(node.arg_nodes[0]))
return '%s.charCodeAt(0)' % arg
else:
raise JSError('ord() needs at least one argument')
def function_dict(self, node):
if len(node.arg_nodes) == 0:
kwargs = ['%s:%s' % (arg.name, unify(self.parse(arg.value_node)))
for arg in node.kwarg_nodes]
return '{%s}' % ', '.join(kwargs)
if len(node.arg_nodes) == 1:
return self.use_std_function('dict', node.arg_nodes)
else:
raise JSError('dict() needs at least one argument')
def function_list(self, node):
if len(node.arg_nodes) == 0:
return '[]'
if len(node.arg_nodes) == 1:
return self.use_std_function('list', node.arg_nodes)
else:
raise JSError('list() needs at least one argument')
def function_tuple(self, node):
return self.function_list(node)
def function_range(self, node):
if len(node.arg_nodes) == 1:
args = ast.Num(0), node.arg_nodes[0], ast.Num(1)
return self.use_std_function('range', args)
elif len(node.arg_nodes) == 2:
args = node.arg_nodes[0], node.arg_nodes[1], ast.Num(1)
return self.use_std_function('range', args)
elif len(node.arg_nodes) == 3:
return self.use_std_function('range', node.arg_nodes)
else:
raise JSError('range() needs 1, 2 or 3 arguments')
def function_sorted(self, node):
if len(node.arg_nodes) == 1:
key, reverse = ast.Name('undefined'), ast.NameConstant(False)
for kw in node.kwarg_nodes:
if kw.name == 'key':
key = kw.value_node
elif kw.name == 'reverse':
reverse = kw.value_node
else:
raise JSError('Invalid keyword argument for sorted: %r' % kw.name)
return self.use_std_function('sorted', [node.arg_nodes[0], key, reverse])
else:
raise JSError('sorted() needs one argument')
## Methods of list/dict/str
def method_sort(self, node, base):
if len(node.arg_nodes) == 0: # sorts args are keyword-only
key, reverse = ast.Name('undefined'), ast.NameConstant(False)
for kw in node.kwarg_nodes:
if kw.name == 'key':
key = kw.value_node
elif kw.name == 'reverse':
reverse = kw.value_node
else:
raise JSError('Invalid keyword argument for sort: %r' % kw.name)
return self.use_std_method(base, 'sort', [key, reverse])
def method_format(self, node, base):
if node.kwarg_nodes:
raise JSError('Method format() does not support keyword args.')
return self.use_std_method(base, 'format', node.arg_nodes)
# Add functions and methods to the class, using the stdib functions ...
def make_function(name, nargs, function_deps, method_deps):
def function_X(self, node):
if node.kwarg_nodes:
raise JSError('Function %s does not support keyword args.' % name)
if len(node.arg_nodes) not in nargs:
raise JSError('Function %s needs #args in %r.' % (name, nargs))
for dep in function_deps:
self.use_std_function(dep, [])
for dep in method_deps:
self.use_std_method('x', dep, [])
return self.use_std_function(name, node.arg_nodes)
return function_X
def make_method(name, nargs, function_deps, method_deps):
def method_X(self, node, base):
if node.kwarg_nodes:
raise JSError('Method %s does not support keyword args.' % name)
if len(node.arg_nodes) not in nargs:
return None # call as-is, don't use our variant
for dep in function_deps:
self.use_std_function(dep, [])
for dep in method_deps:
self.use_std_method('x', dep, [])
return self.use_std_method(base, name, node.arg_nodes)
return method_X
for name, code in stdlib.METHODS.items():
nargs, function_deps, method_deps = stdlib.get_std_info(code)
if nargs and not hasattr(Parser3, 'method_' + name):
m = make_method(name, tuple(nargs), function_deps, method_deps)
setattr(Parser3, 'method_' + name, m)
for name, code in stdlib.FUNCTIONS.items():
nargs, function_deps, method_deps = stdlib.get_std_info(code)
if nargs and not hasattr(Parser3, 'function_' + name):
m = make_function(name, tuple(nargs), function_deps, method_deps)
setattr(Parser3, 'function_' + name, m)
| 33.663067 | 86 | 0.568074 |
from . import commonast as ast
from . import stdlib
from .parser2 import Parser2, JSError, unify from .stubs import RawJS
class Parser3(Parser2):
def function_this_is_js(self, node):
if len(node.arg_nodes) != 0:
raise JSError('this_is_js() expects zero arguments.')
return ('"this_is_js()"')
def function_RawJS(self, node):
if len(node.arg_nodes) == 1:
if not isinstance(node.arg_nodes[0], ast.Str):
raise JSError('RawJS needs a verbatim string (use multiple '
'args to bypass PScript\'s RawJS).')
lines = RawJS._str2lines(node.arg_nodes[0].value.strip())
nl = '\n' + (self._indent * 4) * ' '
return nl.join(lines)
else:
return None # maybe RawJS is a thing
## Python builtin functions
def function_isinstance(self, node):
if len(node.arg_nodes) != 2:
raise JSError('isinstance() expects two arguments.')
ob = unify(self.parse(node.arg_nodes[0]))
cls = unify(self.parse(node.arg_nodes[1]))
if cls[0] in '"\'':
cls = cls[1:-1] # remove quotes
BASIC_TYPES = ('number', 'boolean', 'string', 'function', 'array',
'object', 'null', 'undefined')
MAP = {'[int, float]': 'number', '[float, int]': 'number', 'float': 'number',
'str': 'string', 'basestring': 'string', 'string_types': 'string',
'bool': 'boolean',
'FunctionType': 'function', 'types.FunctionType': 'function',
'list': 'array', 'tuple': 'array',
'[list, tuple]': 'array', '[tuple, list]': 'array',
'dict': 'object',
}
cmp = MAP.get(cls, cls)
if cmp == 'array':
return ['Array.isArray(', ob, ')']
elif cmp.lower() in BASIC_TYPES:
# Basic type, use Object.prototype.toString
return ["Object.prototype.toString.call(", ob ,
").slice(8,-1).toLowerCase() === '%s'" % cmp.lower()]
# In http://stackoverflow.com/questions/11108877 the following is
# proposed, which might be better in theory, but is > 50% slower
return ["({}).toString.call(",
ob,
r").match(/\s([a-zA-Z]+)/)[1].toLowerCase() === ",
"'%s'" % cmp.lower()
]
else:
# User defined type, use instanceof
# http://tobyho.com/2011/01/28/checking-types-in-javascript/
cmp = unify(cls)
if cmp[0] == '(':
raise JSError('isinstance() can only compare to simple types')
return ob, " instanceof ", cmp
def function_issubclass(self, node):
# issubclass only needs to work on custom classes
if len(node.arg_nodes) != 2:
raise JSError('issubclass() expects two arguments.')
cls1 = unify(self.parse(node.arg_nodes[0]))
cls2 = unify(self.parse(node.arg_nodes[1]))
if cls2 == 'object':
cls2 = 'Object'
return '(%s.prototype instanceof %s)' % (cls1, cls2)
def function_print(self, node):
# Process keywords
sep, end = '" "', ''
for kw in node.kwarg_nodes:
if kw.name == 'sep':
sep = ''.join(self.parse(kw.value_node))
elif kw.name == 'end':
end = ''.join(self.parse(kw.value_node))
elif kw.name in ('file', 'flush'):
raise JSError('print() file and flush args not supported')
else:
raise JSError('Invalid argument for print(): %r' % kw.name)
# Combine args
args = [unify(self.parse(arg)) for arg in node.arg_nodes]
end = (" + %s" % end) if (args and end and end != '\n') else ''
combiner = ' + %s + ' % sep
args_concat = combiner.join(args) or '""'
return 'console.log(' + args_concat + end + ')'
def function_len(self, node):
if len(node.arg_nodes) == 1:
return unify(self.parse(node.arg_nodes[0])), '.length'
else:
return None # don't apply this feature
def function_max(self, node):
if len(node.arg_nodes) == 0:
raise JSError('max() needs at least one argument')
elif len(node.arg_nodes) == 1:
arg = ''.join(self.parse(node.arg_nodes[0]))
return 'Math.max.apply(null, ', arg, ')'
else:
args = ', '.join([unify(self.parse(arg)) for arg in node.arg_nodes])
return 'Math.max(', args, ')'
def function_min(self, node):
if len(node.arg_nodes) == 0:
raise JSError('min() needs at least one argument')
elif len(node.arg_nodes) == 1:
arg = ''.join(self.parse(node.arg_nodes[0]))
return 'Math.min.apply(null, ', arg, ')'
else:
args = ', '.join([unify(self.parse(arg)) for arg in node.arg_nodes])
return 'Math.min(', args, ')'
def function_callable(self, node):
if len(node.arg_nodes) == 1:
arg = unify(self.parse(node.arg_nodes[0]))
return '(typeof %s === "function")' % arg
else:
raise JSError('callable() needs at least one argument')
def function_chr(self, node):
if len(node.arg_nodes) == 1:
arg = ''.join(self.parse(node.arg_nodes[0]))
return 'String.fromCharCode(%s)' % arg
else:
raise JSError('chr() needs at least one argument')
def function_ord(self, node):
if len(node.arg_nodes) == 1:
arg = ''.join(self.parse(node.arg_nodes[0]))
return '%s.charCodeAt(0)' % arg
else:
raise JSError('ord() needs at least one argument')
def function_dict(self, node):
if len(node.arg_nodes) == 0:
kwargs = ['%s:%s' % (arg.name, unify(self.parse(arg.value_node)))
for arg in node.kwarg_nodes]
return '{%s}' % ', '.join(kwargs)
if len(node.arg_nodes) == 1:
return self.use_std_function('dict', node.arg_nodes)
else:
raise JSError('dict() needs at least one argument')
def function_list(self, node):
if len(node.arg_nodes) == 0:
return '[]'
if len(node.arg_nodes) == 1:
return self.use_std_function('list', node.arg_nodes)
else:
raise JSError('list() needs at least one argument')
def function_tuple(self, node):
return self.function_list(node)
def function_range(self, node):
if len(node.arg_nodes) == 1:
args = ast.Num(0), node.arg_nodes[0], ast.Num(1)
return self.use_std_function('range', args)
elif len(node.arg_nodes) == 2:
args = node.arg_nodes[0], node.arg_nodes[1], ast.Num(1)
return self.use_std_function('range', args)
elif len(node.arg_nodes) == 3:
return self.use_std_function('range', node.arg_nodes)
else:
raise JSError('range() needs 1, 2 or 3 arguments')
def function_sorted(self, node):
if len(node.arg_nodes) == 1:
key, reverse = ast.Name('undefined'), ast.NameConstant(False)
for kw in node.kwarg_nodes:
if kw.name == 'key':
key = kw.value_node
elif kw.name == 'reverse':
reverse = kw.value_node
else:
raise JSError('Invalid keyword argument for sorted: %r' % kw.name)
return self.use_std_function('sorted', [node.arg_nodes[0], key, reverse])
else:
raise JSError('sorted() needs one argument')
## Methods of list/dict/str
def method_sort(self, node, base):
if len(node.arg_nodes) == 0: # sorts args are keyword-only
key, reverse = ast.Name('undefined'), ast.NameConstant(False)
for kw in node.kwarg_nodes:
if kw.name == 'key':
key = kw.value_node
elif kw.name == 'reverse':
reverse = kw.value_node
else:
raise JSError('Invalid keyword argument for sort: %r' % kw.name)
return self.use_std_method(base, 'sort', [key, reverse])
def method_format(self, node, base):
if node.kwarg_nodes:
raise JSError('Method format() does not support keyword args.')
return self.use_std_method(base, 'format', node.arg_nodes)
# Add functions and methods to the class, using the stdib functions ...
def make_function(name, nargs, function_deps, method_deps):
def function_X(self, node):
if node.kwarg_nodes:
raise JSError('Function %s does not support keyword args.' % name)
if len(node.arg_nodes) not in nargs:
raise JSError('Function %s needs #args in %r.' % (name, nargs))
for dep in function_deps:
self.use_std_function(dep, [])
for dep in method_deps:
self.use_std_method('x', dep, [])
return self.use_std_function(name, node.arg_nodes)
return function_X
def make_method(name, nargs, function_deps, method_deps):
def method_X(self, node, base):
if node.kwarg_nodes:
raise JSError('Method %s does not support keyword args.' % name)
if len(node.arg_nodes) not in nargs:
return None # call as-is, don't use our variant
for dep in function_deps:
self.use_std_function(dep, [])
for dep in method_deps:
self.use_std_method('x', dep, [])
return self.use_std_method(base, name, node.arg_nodes)
return method_X
for name, code in stdlib.METHODS.items():
nargs, function_deps, method_deps = stdlib.get_std_info(code)
if nargs and not hasattr(Parser3, 'method_' + name):
m = make_method(name, tuple(nargs), function_deps, method_deps)
setattr(Parser3, 'method_' + name, m)
for name, code in stdlib.FUNCTIONS.items():
nargs, function_deps, method_deps = stdlib.get_std_info(code)
if nargs and not hasattr(Parser3, 'function_' + name):
m = make_function(name, tuple(nargs), function_deps, method_deps)
setattr(Parser3, 'function_' + name, m)
| true | true |
1c49b1235f8371ebf0a2bfea75889cf5b89b0310 | 977 | py | Python | fhir_post_from_directory.py | NimbusInformatics/bdcat-fhir-azure-prototype | c35184d037423c7bf4e7ccb7c9d2a91a1fc161ca | [
"Apache-2.0"
] | null | null | null | fhir_post_from_directory.py | NimbusInformatics/bdcat-fhir-azure-prototype | c35184d037423c7bf4e7ccb7c9d2a91a1fc161ca | [
"Apache-2.0"
] | null | null | null | fhir_post_from_directory.py | NimbusInformatics/bdcat-fhir-azure-prototype | c35184d037423c7bf4e7ccb7c9d2a91a1fc161ca | [
"Apache-2.0"
] | 1 | 2020-10-17T20:19:57.000Z | 2020-10-17T20:19:57.000Z | # Given a FHIR server, an auth token, and directory, this script finds the
# the .json files in the directory, and posts them to the FHIR server
import requests
import sys
import json
import urllib3
from pathlib import Path
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
fhir_server_uri = sys.argv[1]
token = sys.argv[2]
directory_path = sys.argv[3]
headers = {'Authorization' : "Bearer " + token, 'Accept' : 'application/json', 'Content-Type' : 'application/json'}
# List all files in directory using pathlib
basepath = Path(directory_path)
files_in_basepath = (entry for entry in basepath.iterdir() if entry.is_file())
for item in files_in_basepath:
if (item.name.endswith('.json')):
print(directory_path + '/' + item.name)
with open(directory_path + '/' + item.name) as json_file:
json_data = json.load(json_file)
r = requests.post(fhir_server_uri, data=json.dumps(json_data), headers=headers, verify=False)
print(r.json())
| 31.516129 | 115 | 0.741044 |
import requests
import sys
import json
import urllib3
from pathlib import Path
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
fhir_server_uri = sys.argv[1]
token = sys.argv[2]
directory_path = sys.argv[3]
headers = {'Authorization' : "Bearer " + token, 'Accept' : 'application/json', 'Content-Type' : 'application/json'}
basepath = Path(directory_path)
files_in_basepath = (entry for entry in basepath.iterdir() if entry.is_file())
for item in files_in_basepath:
if (item.name.endswith('.json')):
print(directory_path + '/' + item.name)
with open(directory_path + '/' + item.name) as json_file:
json_data = json.load(json_file)
r = requests.post(fhir_server_uri, data=json.dumps(json_data), headers=headers, verify=False)
print(r.json())
| true | true |
1c49b15084ebbefe896922b679e5df502b62eff9 | 889 | py | Python | tests/bugs/core_1489_test.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | tests/bugs/core_1489_test.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | tests/bugs/core_1489_test.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | #coding:utf-8
#
# id: bugs.core_1489
# title: DATEADD wrong work with NULL arguments
# decription:
# tracker_id: CORE-1489
# min_versions: []
# versions: 2.1.0
# qmid: bugs.core_1489
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.1.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """SELECT 1, DATEADD(SECOND, Null, CAST('01.01.2007' AS DATE)) FROM RDB$DATABASE;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
CONSTANT DATEADD
============ ===========
1 <null>
"""
@pytest.mark.version('>=2.1.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout
| 21.682927 | 97 | 0.656918 |
import pytest
from firebird.qa import db_factory, isql_act, Action
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """SELECT 1, DATEADD(SECOND, Null, CAST('01.01.2007' AS DATE)) FROM RDB$DATABASE;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
CONSTANT DATEADD
============ ===========
1 <null>
"""
@pytest.mark.version('>=2.1.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout
| true | true |
1c49b49b5655f4bd51f7a622c855b147b4dbaa5f | 6,632 | py | Python | phypartspiecharts/phypartspiecharts.py | joelnitta/phyloscripts | ef308fc45e8aae904bf8e235ec0a9809b588a6ea | [
"MIT"
] | null | null | null | phypartspiecharts/phypartspiecharts.py | joelnitta/phyloscripts | ef308fc45e8aae904bf8e235ec0a9809b588a6ea | [
"MIT"
] | null | null | null | phypartspiecharts/phypartspiecharts.py | joelnitta/phyloscripts | ef308fc45e8aae904bf8e235ec0a9809b588a6ea | [
"MIT"
] | 2 | 2020-06-08T18:11:32.000Z | 2021-04-05T13:43:14.000Z | #!/usr/bin/env python
helptext= '''
Generate the "Pie Chart" representation of gene tree conflict from Smith et al. 2015 from
the output of phyparts, the bipartition summary software described in the same paper.
The input files include three files produced by PhyParts, and a file containing a species
tree in Newick format (likely, the tree used for PhyParts). The output is an SVG containing
the phylogeny along with pie charts at each node.
Requirements:
Python 2.7
ete3
matplotlib
'''
import matplotlib,sys,argparse
from ete3 import Tree, TreeStyle, TextFace,NodeStyle,faces, COLOR_SCHEMES
#Read in species tree and convert to ultrametric
#Match phyparts nodes to ete3 nodes
def get_phyparts_nodes(sptree_fn,phyparts_root):
sptree = Tree(sptree_fn)
sptree.convert_to_ultrametric()
phyparts_node_key = [line for line in open(phyparts_root+".node.key")]
subtrees_dict = {n.split()[0]:Tree(n.split()[1]+";") for n in phyparts_node_key}
subtrees_topids = {}
for x in subtrees_dict:
subtrees_topids[x] = subtrees_dict[x].get_topology_id()
#print(subtrees_topids['1'])
#print()
for node in sptree.traverse():
node_topid = node.get_topology_id()
if "Takakia_4343a" in node.get_leaf_names():
print(node_topid)
print(node)
for subtree in subtrees_dict:
if node_topid == subtrees_topids[subtree]:
node.name = subtree
return sptree,subtrees_dict,subtrees_topids
#Summarize concordance and conflict from Phyparts
def get_concord_and_conflict(phyparts_root,subtrees_dict,subtrees_topids):
with open(phyparts_root + ".concon.tre") as phyparts_trees:
concon_tree = Tree(phyparts_trees.readline())
conflict_tree = Tree(phyparts_trees.readline())
concord_dict = {}
conflict_dict = {}
for node in concon_tree.traverse():
node_topid = node.get_topology_id()
for subtree in subtrees_dict:
if node_topid == subtrees_topids[subtree]:
concord_dict[subtree] = node.support
for node in conflict_tree.traverse():
node_topid = node.get_topology_id()
for subtree in subtrees_dict:
if node_topid == subtrees_topids[subtree]:
conflict_dict[subtree] = node.support
return concord_dict, conflict_dict
#Generate Pie Chart data
def get_pie_chart_data(phyparts_root,total_genes,concord_dict,conflict_dict):
phyparts_hist = [line for line in open(phyparts_root + ".hist")]
phyparts_pies = {}
phyparts_dict = {}
for n in phyparts_hist:
n = n.split(",")
tot_genes = float(n.pop(-1))
node_name = n.pop(0)[4:]
concord = float(n.pop(0))
concord = concord_dict[node_name]
all_conflict = conflict_dict[node_name]
if len(n) > 0:
most_conflict = max([float(x) for x in n])
else:
most_conflict = 0.0
adj_concord = (concord/total_genes) * 100
adj_most_conflict = (most_conflict/total_genes) * 100
other_conflict = (all_conflict - most_conflict) / total_genes * 100
the_rest = (total_genes - concord - all_conflict) / total_genes * 100
pie_list = [adj_concord,adj_most_conflict,other_conflict,the_rest]
phyparts_pies[node_name] = pie_list
phyparts_dict[node_name] = [int(round(concord,0)),int(round(tot_genes-concord,0))]
return phyparts_dict, phyparts_pies
def node_text_layout(mynode):
F = faces.TextFace(mynode.name,fsize=20)
faces.add_face_to_node(F,mynode,0,position="branch-right")
parser = argparse.ArgumentParser(description=helptext,formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('species_tree',help="Newick formatted species tree topology.")
parser.add_argument('phyparts_root',help="File root name used for Phyparts.")
parser.add_argument('num_genes',type=int,default=0,help="Number of total gene trees. Used to properly scale pie charts.")
parser.add_argument('--taxon_subst',help="Comma-delimted file to translate tip names.")
parser.add_argument("--svg_name",help="File name for SVG generated by script",default="pies.svg")
parser.add_argument("--show_nodes",help="Also show tree with nodes labeled same as PhyParts",action="store_true",default=False)
parser.add_argument("--colors",help="Four colors of the pie chart: concordance (blue) top conflict (green), other conflict (red), no signal (gray)",nargs="+",default=["blue","green","red","dark gray"])
parser.add_argument("--no_ladderize",help="Do not ladderize the input species tree.",action="store_true",default=False)
args = parser.parse_args()
if args.no_ladderize:
ladderize=False
else:
ladderize=True
plot_tree,subtrees_dict,subtrees_topids = get_phyparts_nodes(args.species_tree, args.phyparts_root)
#print(subtrees_dict)
concord_dict, conflict_dict = get_concord_and_conflict(args.phyparts_root,subtrees_dict,subtrees_topids)
phyparts_dist, phyparts_pies = get_pie_chart_data(args.phyparts_root,args.num_genes,concord_dict,conflict_dict)
if args.taxon_subst:
taxon_subst = {line.split(",")[0]:line.split(",")[1] for line in open(args.taxon_subst,'U')}
for leaf in plot_tree.get_leaves():
try:
leaf.name = taxon_subst[leaf.name]
except KeyError:
print(leaf.name)
continue
def phyparts_pie_layout(mynode):
if mynode.name in phyparts_pies:
pie= faces.PieChartFace(phyparts_pies[mynode.name],
#colors=COLOR_SCHEMES["set1"],
colors = args.colors,
width=50, height=50)
pie.border.width = None
pie.opacity = 1
faces.add_face_to_node(pie,mynode, 0, position="branch-right")
concord_text = faces.TextFace(str(int(concord_dict[mynode.name]))+' ',fsize=20)
conflict_text = faces.TextFace(str(int(conflict_dict[mynode.name]))+' ',fsize=20)
faces.add_face_to_node(concord_text,mynode,0,position = "branch-top")
faces.add_face_to_node(conflict_text,mynode,0,position="branch-bottom")
else:
F = faces.TextFace(mynode.name,fsize=20)
faces.add_face_to_node(F,mynode,0,position="aligned")
#Plot Pie Chart
ts = TreeStyle()
ts.show_leaf_name = False
ts.layout_fn = phyparts_pie_layout
nstyle = NodeStyle()
nstyle["size"] = 0
for n in plot_tree.traverse():
n.set_style(nstyle)
n.img_style["vt_line_width"] = 0
ts.draw_guiding_lines = True
ts.guiding_lines_color = "black"
ts.guiding_lines_type = 0
ts.scale = 30
ts.branch_vertical_margin = 10
plot_tree.convert_to_ultrametric()
if ladderize:
plot_tree.ladderize(direction=1)
my_svg = plot_tree.render(args.svg_name,tree_style=ts,w=595,dpi=300)
if args.show_nodes:
node_style = TreeStyle()
node_style.show_leaf_name=False
node_style.layout_fn = node_text_layout
plot_tree.render("tree_nodes.pdf",tree_style=node_style)
| 35.465241 | 202 | 0.740199 |
helptext= '''
Generate the "Pie Chart" representation of gene tree conflict from Smith et al. 2015 from
the output of phyparts, the bipartition summary software described in the same paper.
The input files include three files produced by PhyParts, and a file containing a species
tree in Newick format (likely, the tree used for PhyParts). The output is an SVG containing
the phylogeny along with pie charts at each node.
Requirements:
Python 2.7
ete3
matplotlib
'''
import matplotlib,sys,argparse
from ete3 import Tree, TreeStyle, TextFace,NodeStyle,faces, COLOR_SCHEMES
def get_phyparts_nodes(sptree_fn,phyparts_root):
sptree = Tree(sptree_fn)
sptree.convert_to_ultrametric()
phyparts_node_key = [line for line in open(phyparts_root+".node.key")]
subtrees_dict = {n.split()[0]:Tree(n.split()[1]+";") for n in phyparts_node_key}
subtrees_topids = {}
for x in subtrees_dict:
subtrees_topids[x] = subtrees_dict[x].get_topology_id()
for node in sptree.traverse():
node_topid = node.get_topology_id()
if "Takakia_4343a" in node.get_leaf_names():
print(node_topid)
print(node)
for subtree in subtrees_dict:
if node_topid == subtrees_topids[subtree]:
node.name = subtree
return sptree,subtrees_dict,subtrees_topids
def get_concord_and_conflict(phyparts_root,subtrees_dict,subtrees_topids):
with open(phyparts_root + ".concon.tre") as phyparts_trees:
concon_tree = Tree(phyparts_trees.readline())
conflict_tree = Tree(phyparts_trees.readline())
concord_dict = {}
conflict_dict = {}
for node in concon_tree.traverse():
node_topid = node.get_topology_id()
for subtree in subtrees_dict:
if node_topid == subtrees_topids[subtree]:
concord_dict[subtree] = node.support
for node in conflict_tree.traverse():
node_topid = node.get_topology_id()
for subtree in subtrees_dict:
if node_topid == subtrees_topids[subtree]:
conflict_dict[subtree] = node.support
return concord_dict, conflict_dict
def get_pie_chart_data(phyparts_root,total_genes,concord_dict,conflict_dict):
phyparts_hist = [line for line in open(phyparts_root + ".hist")]
phyparts_pies = {}
phyparts_dict = {}
for n in phyparts_hist:
n = n.split(",")
tot_genes = float(n.pop(-1))
node_name = n.pop(0)[4:]
concord = float(n.pop(0))
concord = concord_dict[node_name]
all_conflict = conflict_dict[node_name]
if len(n) > 0:
most_conflict = max([float(x) for x in n])
else:
most_conflict = 0.0
adj_concord = (concord/total_genes) * 100
adj_most_conflict = (most_conflict/total_genes) * 100
other_conflict = (all_conflict - most_conflict) / total_genes * 100
the_rest = (total_genes - concord - all_conflict) / total_genes * 100
pie_list = [adj_concord,adj_most_conflict,other_conflict,the_rest]
phyparts_pies[node_name] = pie_list
phyparts_dict[node_name] = [int(round(concord,0)),int(round(tot_genes-concord,0))]
return phyparts_dict, phyparts_pies
def node_text_layout(mynode):
F = faces.TextFace(mynode.name,fsize=20)
faces.add_face_to_node(F,mynode,0,position="branch-right")
parser = argparse.ArgumentParser(description=helptext,formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('species_tree',help="Newick formatted species tree topology.")
parser.add_argument('phyparts_root',help="File root name used for Phyparts.")
parser.add_argument('num_genes',type=int,default=0,help="Number of total gene trees. Used to properly scale pie charts.")
parser.add_argument('--taxon_subst',help="Comma-delimted file to translate tip names.")
parser.add_argument("--svg_name",help="File name for SVG generated by script",default="pies.svg")
parser.add_argument("--show_nodes",help="Also show tree with nodes labeled same as PhyParts",action="store_true",default=False)
parser.add_argument("--colors",help="Four colors of the pie chart: concordance (blue) top conflict (green), other conflict (red), no signal (gray)",nargs="+",default=["blue","green","red","dark gray"])
parser.add_argument("--no_ladderize",help="Do not ladderize the input species tree.",action="store_true",default=False)
args = parser.parse_args()
if args.no_ladderize:
ladderize=False
else:
ladderize=True
plot_tree,subtrees_dict,subtrees_topids = get_phyparts_nodes(args.species_tree, args.phyparts_root)
concord_dict, conflict_dict = get_concord_and_conflict(args.phyparts_root,subtrees_dict,subtrees_topids)
phyparts_dist, phyparts_pies = get_pie_chart_data(args.phyparts_root,args.num_genes,concord_dict,conflict_dict)
if args.taxon_subst:
taxon_subst = {line.split(",")[0]:line.split(",")[1] for line in open(args.taxon_subst,'U')}
for leaf in plot_tree.get_leaves():
try:
leaf.name = taxon_subst[leaf.name]
except KeyError:
print(leaf.name)
continue
def phyparts_pie_layout(mynode):
if mynode.name in phyparts_pies:
pie= faces.PieChartFace(phyparts_pies[mynode.name],
colors = args.colors,
width=50, height=50)
pie.border.width = None
pie.opacity = 1
faces.add_face_to_node(pie,mynode, 0, position="branch-right")
concord_text = faces.TextFace(str(int(concord_dict[mynode.name]))+' ',fsize=20)
conflict_text = faces.TextFace(str(int(conflict_dict[mynode.name]))+' ',fsize=20)
faces.add_face_to_node(concord_text,mynode,0,position = "branch-top")
faces.add_face_to_node(conflict_text,mynode,0,position="branch-bottom")
else:
F = faces.TextFace(mynode.name,fsize=20)
faces.add_face_to_node(F,mynode,0,position="aligned")
ts = TreeStyle()
ts.show_leaf_name = False
ts.layout_fn = phyparts_pie_layout
nstyle = NodeStyle()
nstyle["size"] = 0
for n in plot_tree.traverse():
n.set_style(nstyle)
n.img_style["vt_line_width"] = 0
ts.draw_guiding_lines = True
ts.guiding_lines_color = "black"
ts.guiding_lines_type = 0
ts.scale = 30
ts.branch_vertical_margin = 10
plot_tree.convert_to_ultrametric()
if ladderize:
plot_tree.ladderize(direction=1)
my_svg = plot_tree.render(args.svg_name,tree_style=ts,w=595,dpi=300)
if args.show_nodes:
node_style = TreeStyle()
node_style.show_leaf_name=False
node_style.layout_fn = node_text_layout
plot_tree.render("tree_nodes.pdf",tree_style=node_style)
| true | true |
1c49b4d84b1d5b5b155c9774146d459ed14b8043 | 408 | py | Python | examples/use_cases/case4_show_commands/send_command.py | johnbarneta/netmiko | 331187987526f0f784bdf28c85c5256c480d955e | [
"MIT"
] | 2 | 2019-07-23T02:27:19.000Z | 2019-07-23T02:27:25.000Z | examples/use_cases/case4_show_commands/send_command.py | johnbarneta/netmiko | 331187987526f0f784bdf28c85c5256c480d955e | [
"MIT"
] | 4 | 2020-03-21T22:58:35.000Z | 2020-03-25T12:11:26.000Z | examples/use_cases/case4_show_commands/send_command.py | johnbarneta/netmiko | 331187987526f0f784bdf28c85c5256c480d955e | [
"MIT"
] | 1 | 2019-10-16T19:02:32.000Z | 2019-10-16T19:02:32.000Z | #!/usr/bin/env python
from netmiko import Netmiko
from getpass import getpass
cisco1 = {
"host": "cisco1.twb-tech.com",
"username": "pyclass",
"password": getpass(),
"device_type": "cisco_ios",
}
net_connect = Netmiko(**cisco1)
command = "show ip int brief"
print()
print(net_connect.find_prompt())
output = net_connect.send_command(command)
net_connect.disconnect()
print(output)
print()
| 19.428571 | 42 | 0.708333 | from netmiko import Netmiko
from getpass import getpass
cisco1 = {
"host": "cisco1.twb-tech.com",
"username": "pyclass",
"password": getpass(),
"device_type": "cisco_ios",
}
net_connect = Netmiko(**cisco1)
command = "show ip int brief"
print()
print(net_connect.find_prompt())
output = net_connect.send_command(command)
net_connect.disconnect()
print(output)
print()
| true | true |
1c49b4ed9d05d014fcba032d54090b1151a01932 | 2,265 | py | Python | physics3d/character_controller.py | B3CTOR/runner-ursina-engine | 59ce82d1107420f17e3129cbe00ddbbd7047f68b | [
"MIT"
] | null | null | null | physics3d/character_controller.py | B3CTOR/runner-ursina-engine | 59ce82d1107420f17e3129cbe00ddbbd7047f68b | [
"MIT"
] | null | null | null | physics3d/character_controller.py | B3CTOR/runner-ursina-engine | 59ce82d1107420f17e3129cbe00ddbbd7047f68b | [
"MIT"
] | null | null | null | from ursina import Entity, Vec3, application
from panda3d.bullet import BulletWorld, BulletCapsuleShape, BulletCharacterControllerNode
class CharacterController(BulletCharacterControllerNode):
def __init__(self, world:BulletWorld, entity:Entity, radius=1, height=2, name='Player', **opts) -> None:
super().__init__(BulletCapsuleShape(radius/2, height/2, 1), radius/2, name)
self.np = application.base.render.attachNewNode(self)
if entity.parent:
self.np.reparent_to(entity.parent)
rotation = Vec3(0, 0, 0)
if None in rotation:
hpr = entity.getHpr()
for x in range(len(hpr)):
rotation[x] = hpr[x]
self.np.setHpr(rotation)
self.np.setPos(entity.x, entity.y, entity.z)
entity.reparent_to(self.np)
world.attachCharacter(self)
self.__fall_speed = None
self.__jump_speed = None
self.__max_jump_height = None
for x in opts:
setattr(self, x, opts[x])
def jump(self):
self.doJump()
def move(self, vel:Vec3, is_local:bool):
self.setLinearMovement(vel, is_local)
def rotate(self, omega:float):
self.setAngularMovement(omega)
@property
def can_jump(self):
return self.canJump()
@property
def fall_speed(self):
return self.__fall_speed
@fall_speed.setter
def fall_speed(self, speed:float):
self.__fall_speed = speed
self.setFallSpeed(speed)
@property
def gravity(self):
return self.gravity
@gravity.setter
def gravity(self, grav:float):
self.setGravity(grav)
@property
def jump_speed(self):
return self.__jump_speed
@jump_speed.setter
def jump_speed(self, speed:float):
self.__jump_speed = speed
self.setJumpSpeed(speed)
@property
def max_jump_height(self):
return self.__max_jump_height
@max_jump_height.setter
def max_jump_height(self, max_jump_height:float):
self.__max_jump_height = max_jump_height
self.setMaxJumpHeight(max_jump_height)
@property
def on_ground(self):
return self.isOnGround()
| 27.621951 | 108 | 0.628256 | from ursina import Entity, Vec3, application
from panda3d.bullet import BulletWorld, BulletCapsuleShape, BulletCharacterControllerNode
class CharacterController(BulletCharacterControllerNode):
def __init__(self, world:BulletWorld, entity:Entity, radius=1, height=2, name='Player', **opts) -> None:
super().__init__(BulletCapsuleShape(radius/2, height/2, 1), radius/2, name)
self.np = application.base.render.attachNewNode(self)
if entity.parent:
self.np.reparent_to(entity.parent)
rotation = Vec3(0, 0, 0)
if None in rotation:
hpr = entity.getHpr()
for x in range(len(hpr)):
rotation[x] = hpr[x]
self.np.setHpr(rotation)
self.np.setPos(entity.x, entity.y, entity.z)
entity.reparent_to(self.np)
world.attachCharacter(self)
self.__fall_speed = None
self.__jump_speed = None
self.__max_jump_height = None
for x in opts:
setattr(self, x, opts[x])
def jump(self):
self.doJump()
def move(self, vel:Vec3, is_local:bool):
self.setLinearMovement(vel, is_local)
def rotate(self, omega:float):
self.setAngularMovement(omega)
@property
def can_jump(self):
return self.canJump()
@property
def fall_speed(self):
return self.__fall_speed
@fall_speed.setter
def fall_speed(self, speed:float):
self.__fall_speed = speed
self.setFallSpeed(speed)
@property
def gravity(self):
return self.gravity
@gravity.setter
def gravity(self, grav:float):
self.setGravity(grav)
@property
def jump_speed(self):
return self.__jump_speed
@jump_speed.setter
def jump_speed(self, speed:float):
self.__jump_speed = speed
self.setJumpSpeed(speed)
@property
def max_jump_height(self):
return self.__max_jump_height
@max_jump_height.setter
def max_jump_height(self, max_jump_height:float):
self.__max_jump_height = max_jump_height
self.setMaxJumpHeight(max_jump_height)
@property
def on_ground(self):
return self.isOnGround()
| true | true |
1c49b5f49bd919e8e9e35d1e105629de687aa9d2 | 2,770 | py | Python | nasa/nasa_response_handler.py | mariosyb/pub_twitter_apod_bot | a0da8ae049cee5cb7df2d702e750615e332a9668 | [
"Apache-2.0"
] | null | null | null | nasa/nasa_response_handler.py | mariosyb/pub_twitter_apod_bot | a0da8ae049cee5cb7df2d702e750615e332a9668 | [
"Apache-2.0"
] | null | null | null | nasa/nasa_response_handler.py | mariosyb/pub_twitter_apod_bot | a0da8ae049cee5cb7df2d702e750615e332a9668 | [
"Apache-2.0"
] | null | null | null | import enum
from datetime import date, timedelta
COPYRIGHT_PARAMETER = 'copyright' # this will be in response if the image is not public domain
TITLE_PARAMETER = 'title'
EXPLANATION_PARAMETER = 'explanation'
MEDIA_TYPE_PARAMETER = 'media_type'
HD_URL_PARAMETER = 'hdurl'
DATE_FORMAT = '%Y-%m-%d'
class NasaMediaType(enum.Enum):
image = 'IMAGE'
video = 'VIDEO'
def getImageUrl(response):
"""gets photo url
Args:
response (Dict): Custom response from NASA APOD API
Returns:
String: image url
"""
url = response['response_data_raw'][HD_URL_PARAMETER]
return url
def getImageExplanation(response):
"""gets photo explanation
Args:
response (Dict): Custom response from NASA APOD API
Returns:
String: image explanation
"""
explanation = response['response_data_raw'][EXPLANATION_PARAMETER]
return explanation
def getImageTitle(response):
"""gets image title
Args:
response (Dict): Custom response from NASA APOD API
Returns:
String: image title
"""
return response['response_data_raw'][TITLE_PARAMETER]
def getImageCopyright(response):
"""checks if the image has copyright or not
Args:
response (Dict): Custom response from NASA APOD API
Returns:
String: copyright value or 'public domain' if the image has no copyright
"""
copyright = None
if COPYRIGHT_PARAMETER in response['response_data_raw']:
copyright = response['response_data_raw'][COPYRIGHT_PARAMETER]
else:
copyright = 'public domain'
return copyright
def validateMedia(mediaType, response):
"""validates media type os NASA response
Args:
mediaType (NasaMediaType): desire media type from enum
response (Dict): cusntom service response
Returns:
Boolean: True if media type in resonse is the same passed as argument False otherwise
"""
isMediaType = None
responseMediaType = response['response_data_raw'][MEDIA_TYPE_PARAMETER]
if 'IMAGE' == mediaType:
isMediaType = NasaMediaType.image.value == responseMediaType.upper()
elif 'VIDEO' == mediaType:
isMediaType = NasaMediaType.video.value == responseMediaType.upper()
else:
print(f'ERROR: not supported media type: {mediaType}')
return
return isMediaType
def subtractDaysFromCurrentDate(days):
"""subtracts a quantity of days from today's date
Args:
days (integer): number of days to subtract
Returns:
String: formatted YYYY-MM-DD substracted date
"""
today = date.today()
subtractedDate = today - timedelta(days=days)
strFormattedDate = subtractedDate.strftime(DATE_FORMAT)
return strFormattedDate
| 23.87931 | 95 | 0.685199 | import enum
from datetime import date, timedelta
COPYRIGHT_PARAMETER = 'copyright' TITLE_PARAMETER = 'title'
EXPLANATION_PARAMETER = 'explanation'
MEDIA_TYPE_PARAMETER = 'media_type'
HD_URL_PARAMETER = 'hdurl'
DATE_FORMAT = '%Y-%m-%d'
class NasaMediaType(enum.Enum):
image = 'IMAGE'
video = 'VIDEO'
def getImageUrl(response):
url = response['response_data_raw'][HD_URL_PARAMETER]
return url
def getImageExplanation(response):
explanation = response['response_data_raw'][EXPLANATION_PARAMETER]
return explanation
def getImageTitle(response):
return response['response_data_raw'][TITLE_PARAMETER]
def getImageCopyright(response):
copyright = None
if COPYRIGHT_PARAMETER in response['response_data_raw']:
copyright = response['response_data_raw'][COPYRIGHT_PARAMETER]
else:
copyright = 'public domain'
return copyright
def validateMedia(mediaType, response):
isMediaType = None
responseMediaType = response['response_data_raw'][MEDIA_TYPE_PARAMETER]
if 'IMAGE' == mediaType:
isMediaType = NasaMediaType.image.value == responseMediaType.upper()
elif 'VIDEO' == mediaType:
isMediaType = NasaMediaType.video.value == responseMediaType.upper()
else:
print(f'ERROR: not supported media type: {mediaType}')
return
return isMediaType
def subtractDaysFromCurrentDate(days):
today = date.today()
subtractedDate = today - timedelta(days=days)
strFormattedDate = subtractedDate.strftime(DATE_FORMAT)
return strFormattedDate
| true | true |
1c49b6165bb06b0ed8202ea757ce392b9cc7274e | 896 | py | Python | tests/bcbio/test_bcbio.py | parlundin/scilifelab | e5f4be45e2e9ff6c0756be46ad34dfb7d20a4b4a | [
"MIT"
] | 1 | 2016-03-21T14:04:09.000Z | 2016-03-21T14:04:09.000Z | tests/bcbio/test_bcbio.py | parlundin/scilifelab | e5f4be45e2e9ff6c0756be46ad34dfb7d20a4b4a | [
"MIT"
] | 35 | 2015-01-22T08:25:02.000Z | 2020-02-17T12:09:12.000Z | tests/bcbio/test_bcbio.py | parlundin/scilifelab | e5f4be45e2e9ff6c0756be46ad34dfb7d20a4b4a | [
"MIT"
] | 6 | 2015-01-16T15:32:08.000Z | 2020-01-30T14:34:40.000Z | import os
import tempfile
import shutil
import unittest
from ..data import data_files
from scilifelab.bcbio.qc import RunInfoParser
filedir = os.path.abspath(os.path.realpath(os.path.dirname(__file__)))
RunInfo = data_files["RunInfo.xml"]
class TestBcbioQC(unittest.TestCase):
"""Test for bcbio qc module"""
def setUp(self):
self.rootdir = tempfile.mkdtemp(prefix="test_bcbio_qc_")
def tearDown(self):
shutil.rmtree(self.rootdir)
def test_parse_runinfo(self):
temp = tempfile.TemporaryFile(mode="w+t")
temp.write(RunInfo)
temp.seek(0)
rip = RunInfoParser()
res = rip.parse(temp)
self.assertEqual(res["Id"], "120924_SN0002_0003_CC003CCCXX")
self.assertEqual(res["Flowcell"], "CC003CCCXX")
self.assertEqual(res["Instrument"], "SN0002")
self.assertEqual(res["Date"], "120924")
| 28.903226 | 70 | 0.670759 | import os
import tempfile
import shutil
import unittest
from ..data import data_files
from scilifelab.bcbio.qc import RunInfoParser
filedir = os.path.abspath(os.path.realpath(os.path.dirname(__file__)))
RunInfo = data_files["RunInfo.xml"]
class TestBcbioQC(unittest.TestCase):
def setUp(self):
self.rootdir = tempfile.mkdtemp(prefix="test_bcbio_qc_")
def tearDown(self):
shutil.rmtree(self.rootdir)
def test_parse_runinfo(self):
temp = tempfile.TemporaryFile(mode="w+t")
temp.write(RunInfo)
temp.seek(0)
rip = RunInfoParser()
res = rip.parse(temp)
self.assertEqual(res["Id"], "120924_SN0002_0003_CC003CCCXX")
self.assertEqual(res["Flowcell"], "CC003CCCXX")
self.assertEqual(res["Instrument"], "SN0002")
self.assertEqual(res["Date"], "120924")
| true | true |
1c49b706b8a5d60732fbf10c9acffef8503ea28d | 7,072 | py | Python | examples/simple_dqn.py | ofantomas/rlax | 7bf3bf13d4496f1b708f4ccb5865215a16c618d6 | [
"Apache-2.0"
] | null | null | null | examples/simple_dqn.py | ofantomas/rlax | 7bf3bf13d4496f1b708f4ccb5865215a16c618d6 | [
"Apache-2.0"
] | null | null | null | examples/simple_dqn.py | ofantomas/rlax | 7bf3bf13d4496f1b708f4ccb5865215a16c618d6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple double-DQN agent trained to play BSuite's Catch env."""
import collections
import random
from absl import app
from absl import flags
from bsuite.environments import catch
import haiku as hk
from haiku import nets
import jax
import jax.numpy as jnp
import numpy as np
import optax
import rlax
import experiment
Params = collections.namedtuple("Params", "online target")
ActorState = collections.namedtuple("ActorState", "count")
ActorOutput = collections.namedtuple("ActorOutput", "actions q_values")
LearnerState = collections.namedtuple("LearnerState", "count opt_state")
Data = collections.namedtuple("Data", "obs_tm1 a_tm1 r_t discount_t obs_t")
FLAGS = flags.FLAGS
flags.DEFINE_integer("seed", 42, "Random seed.")
flags.DEFINE_integer("train_episodes", 301, "Number of train episodes.")
flags.DEFINE_integer("batch_size", 32, "Size of the training batch")
flags.DEFINE_float("target_period", 50, "How often to update the target net.")
flags.DEFINE_integer("replay_capacity", 2000, "Capacity of the replay buffer.")
flags.DEFINE_integer("hidden_units", 50, "Number of network hidden units.")
flags.DEFINE_float("epsilon_begin", 1., "Initial epsilon-greedy exploration.")
flags.DEFINE_float("epsilon_end", 0.01, "Final epsilon-greedy exploration.")
flags.DEFINE_integer("epsilon_steps", 1000, "Steps over which to anneal eps.")
flags.DEFINE_float("discount_factor", 0.99, "Q-learning discount factor.")
flags.DEFINE_float("learning_rate", 0.005, "Optimizer learning rate.")
flags.DEFINE_integer("eval_episodes", 100, "Number of evaluation episodes.")
flags.DEFINE_integer("evaluate_every", 50,
"Number of episodes between evaluations.")
def build_network(num_actions: int) -> hk.Transformed:
"""Factory for a simple MLP network for approximating Q-values."""
def q(obs):
network = hk.Sequential(
[hk.Flatten(),
nets.MLP([FLAGS.hidden_units, num_actions])])
return network(obs)
return hk.without_apply_rng(hk.transform(q))
class ReplayBuffer(object):
"""A simple Python replay buffer."""
def __init__(self, capacity):
self._prev = None
self._action = None
self._latest = None
self.buffer = collections.deque(maxlen=capacity)
def push(self, env_output, action):
self._prev = self._latest
self._action = action
self._latest = env_output
if action is not None:
self.buffer.append(
(self._prev.observation, self._action, self._latest.reward,
self._latest.discount, self._latest.observation))
def sample(self, batch_size):
obs_tm1, a_tm1, r_t, discount_t, obs_t = zip(
*random.sample(self.buffer, batch_size))
return (np.stack(obs_tm1), np.asarray(a_tm1), np.asarray(r_t),
np.asarray(discount_t) * FLAGS.discount_factor, np.stack(obs_t))
def is_ready(self, batch_size):
return batch_size <= len(self.buffer)
class DQN:
"""A simple DQN agent."""
def __init__(self, observation_spec, action_spec, epsilon_cfg, target_period,
learning_rate):
self._observation_spec = observation_spec
self._action_spec = action_spec
self._target_period = target_period
# Neural net and optimiser.
self._network = build_network(action_spec.num_values)
self._optimizer = optax.adam(learning_rate)
self._epsilon_by_frame = optax.polynomial_schedule(**epsilon_cfg)
# Jitting for speed.
self.actor_step = jax.jit(self.actor_step)
self.learner_step = jax.jit(self.learner_step)
def initial_params(self, key):
sample_input = self._observation_spec.generate_value()
sample_input = jnp.expand_dims(sample_input, 0)
online_params = self._network.init(key, sample_input)
return Params(online_params, online_params)
def initial_actor_state(self):
actor_count = jnp.zeros((), dtype=jnp.float32)
return ActorState(actor_count)
def initial_learner_state(self, params):
learner_count = jnp.zeros((), dtype=jnp.float32)
opt_state = self._optimizer.init(params.online)
return LearnerState(learner_count, opt_state)
def actor_step(self, params, env_output, actor_state, key, evaluation):
obs = jnp.expand_dims(env_output.observation, 0) # add dummy batch
q = self._network.apply(params.online, obs)[0] # remove dummy batch
epsilon = self._epsilon_by_frame(actor_state.count)
train_a = rlax.epsilon_greedy(epsilon).sample(key, q)
eval_a = rlax.greedy().sample(key, q)
a = jax.lax.select(evaluation, eval_a, train_a)
return ActorOutput(actions=a, q_values=q), ActorState(actor_state.count + 1)
def learner_step(self, params, data, learner_state, unused_key):
target_params = rlax.periodic_update(
params.online, params.target, learner_state.count, self._target_period)
dloss_dtheta = jax.grad(self._loss)(params.online, target_params, *data)
updates, opt_state = self._optimizer.update(
dloss_dtheta, learner_state.opt_state)
online_params = optax.apply_updates(params.online, updates)
return (
Params(online_params, target_params),
LearnerState(learner_state.count + 1, opt_state))
def _loss(self, online_params, target_params,
obs_tm1, a_tm1, r_t, discount_t, obs_t):
q_tm1 = self._network.apply(online_params, obs_tm1)
q_t_val = self._network.apply(target_params, obs_t)
q_t_select = self._network.apply(online_params, obs_t)
batched_loss = jax.vmap(rlax.double_q_learning)
td_error = batched_loss(q_tm1, a_tm1, r_t, discount_t, q_t_val, q_t_select)
return jnp.mean(rlax.l2_loss(td_error))
def main(unused_arg):
env = catch.Catch(seed=FLAGS.seed)
epsilon_cfg = dict(
init_value=FLAGS.epsilon_begin,
end_value=FLAGS.epsilon_end,
transition_steps=FLAGS.epsilon_steps,
power=1.)
agent = DQN(
observation_spec=env.observation_spec(),
action_spec=env.action_spec(),
epsilon_cfg=epsilon_cfg,
target_period=FLAGS.target_period,
learning_rate=FLAGS.learning_rate,
)
accumulator = ReplayBuffer(FLAGS.replay_capacity)
experiment.run_loop(
agent=agent,
environment=env,
accumulator=accumulator,
seed=FLAGS.seed,
batch_size=FLAGS.batch_size,
train_episodes=FLAGS.train_episodes,
evaluate_every=FLAGS.evaluate_every,
eval_episodes=FLAGS.eval_episodes,
)
if __name__ == "__main__":
app.run(main)
| 38.021505 | 80 | 0.724689 |
import collections
import random
from absl import app
from absl import flags
from bsuite.environments import catch
import haiku as hk
from haiku import nets
import jax
import jax.numpy as jnp
import numpy as np
import optax
import rlax
import experiment
Params = collections.namedtuple("Params", "online target")
ActorState = collections.namedtuple("ActorState", "count")
ActorOutput = collections.namedtuple("ActorOutput", "actions q_values")
LearnerState = collections.namedtuple("LearnerState", "count opt_state")
Data = collections.namedtuple("Data", "obs_tm1 a_tm1 r_t discount_t obs_t")
FLAGS = flags.FLAGS
flags.DEFINE_integer("seed", 42, "Random seed.")
flags.DEFINE_integer("train_episodes", 301, "Number of train episodes.")
flags.DEFINE_integer("batch_size", 32, "Size of the training batch")
flags.DEFINE_float("target_period", 50, "How often to update the target net.")
flags.DEFINE_integer("replay_capacity", 2000, "Capacity of the replay buffer.")
flags.DEFINE_integer("hidden_units", 50, "Number of network hidden units.")
flags.DEFINE_float("epsilon_begin", 1., "Initial epsilon-greedy exploration.")
flags.DEFINE_float("epsilon_end", 0.01, "Final epsilon-greedy exploration.")
flags.DEFINE_integer("epsilon_steps", 1000, "Steps over which to anneal eps.")
flags.DEFINE_float("discount_factor", 0.99, "Q-learning discount factor.")
flags.DEFINE_float("learning_rate", 0.005, "Optimizer learning rate.")
flags.DEFINE_integer("eval_episodes", 100, "Number of evaluation episodes.")
flags.DEFINE_integer("evaluate_every", 50,
"Number of episodes between evaluations.")
def build_network(num_actions: int) -> hk.Transformed:
def q(obs):
network = hk.Sequential(
[hk.Flatten(),
nets.MLP([FLAGS.hidden_units, num_actions])])
return network(obs)
return hk.without_apply_rng(hk.transform(q))
class ReplayBuffer(object):
def __init__(self, capacity):
self._prev = None
self._action = None
self._latest = None
self.buffer = collections.deque(maxlen=capacity)
def push(self, env_output, action):
self._prev = self._latest
self._action = action
self._latest = env_output
if action is not None:
self.buffer.append(
(self._prev.observation, self._action, self._latest.reward,
self._latest.discount, self._latest.observation))
def sample(self, batch_size):
obs_tm1, a_tm1, r_t, discount_t, obs_t = zip(
*random.sample(self.buffer, batch_size))
return (np.stack(obs_tm1), np.asarray(a_tm1), np.asarray(r_t),
np.asarray(discount_t) * FLAGS.discount_factor, np.stack(obs_t))
def is_ready(self, batch_size):
return batch_size <= len(self.buffer)
class DQN:
def __init__(self, observation_spec, action_spec, epsilon_cfg, target_period,
learning_rate):
self._observation_spec = observation_spec
self._action_spec = action_spec
self._target_period = target_period
self._network = build_network(action_spec.num_values)
self._optimizer = optax.adam(learning_rate)
self._epsilon_by_frame = optax.polynomial_schedule(**epsilon_cfg)
self.actor_step = jax.jit(self.actor_step)
self.learner_step = jax.jit(self.learner_step)
def initial_params(self, key):
sample_input = self._observation_spec.generate_value()
sample_input = jnp.expand_dims(sample_input, 0)
online_params = self._network.init(key, sample_input)
return Params(online_params, online_params)
def initial_actor_state(self):
actor_count = jnp.zeros((), dtype=jnp.float32)
return ActorState(actor_count)
def initial_learner_state(self, params):
learner_count = jnp.zeros((), dtype=jnp.float32)
opt_state = self._optimizer.init(params.online)
return LearnerState(learner_count, opt_state)
def actor_step(self, params, env_output, actor_state, key, evaluation):
obs = jnp.expand_dims(env_output.observation, 0) q = self._network.apply(params.online, obs)[0] epsilon = self._epsilon_by_frame(actor_state.count)
train_a = rlax.epsilon_greedy(epsilon).sample(key, q)
eval_a = rlax.greedy().sample(key, q)
a = jax.lax.select(evaluation, eval_a, train_a)
return ActorOutput(actions=a, q_values=q), ActorState(actor_state.count + 1)
def learner_step(self, params, data, learner_state, unused_key):
target_params = rlax.periodic_update(
params.online, params.target, learner_state.count, self._target_period)
dloss_dtheta = jax.grad(self._loss)(params.online, target_params, *data)
updates, opt_state = self._optimizer.update(
dloss_dtheta, learner_state.opt_state)
online_params = optax.apply_updates(params.online, updates)
return (
Params(online_params, target_params),
LearnerState(learner_state.count + 1, opt_state))
def _loss(self, online_params, target_params,
obs_tm1, a_tm1, r_t, discount_t, obs_t):
q_tm1 = self._network.apply(online_params, obs_tm1)
q_t_val = self._network.apply(target_params, obs_t)
q_t_select = self._network.apply(online_params, obs_t)
batched_loss = jax.vmap(rlax.double_q_learning)
td_error = batched_loss(q_tm1, a_tm1, r_t, discount_t, q_t_val, q_t_select)
return jnp.mean(rlax.l2_loss(td_error))
def main(unused_arg):
env = catch.Catch(seed=FLAGS.seed)
epsilon_cfg = dict(
init_value=FLAGS.epsilon_begin,
end_value=FLAGS.epsilon_end,
transition_steps=FLAGS.epsilon_steps,
power=1.)
agent = DQN(
observation_spec=env.observation_spec(),
action_spec=env.action_spec(),
epsilon_cfg=epsilon_cfg,
target_period=FLAGS.target_period,
learning_rate=FLAGS.learning_rate,
)
accumulator = ReplayBuffer(FLAGS.replay_capacity)
experiment.run_loop(
agent=agent,
environment=env,
accumulator=accumulator,
seed=FLAGS.seed,
batch_size=FLAGS.batch_size,
train_episodes=FLAGS.train_episodes,
evaluate_every=FLAGS.evaluate_every,
eval_episodes=FLAGS.eval_episodes,
)
if __name__ == "__main__":
app.run(main)
| true | true |
1c49b765839e7dc8a74136e35d3e6f215885011c | 8,060 | py | Python | docs/conf.py | jonnyguio/tsuru | 30a8d657fbb05e5b64fe8c7babc67f1e618842bf | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | jonnyguio/tsuru | 30a8d657fbb05e5b64fe8c7babc67f1e618842bf | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | jonnyguio/tsuru | 30a8d657fbb05e5b64fe8c7babc67f1e618842bf | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# tsuru documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 8 11:09:54 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.mathjax',
'tsuru_sphinx.handlers',
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tsuru'
copyright = u'2017, tsuru authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.5'
# The full version, including alpha/beta/rc tags.
release = '1.5.0-rc11'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'tsuru'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['theme']
# if not os.environ.get('READTHEDOCS', None):
# import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme_ext'
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tsurudoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'tsuru.tex', u'tsuru Documentation',
u'tsuru', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tsuru', u'tsuru Documentation',
[u'tsuru'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'tsuru', u'tsuru Documentation',
u'tsuru', 'tsuru', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# tsuru releases
try:
import releases
except:
pass
else:
html_context = {
'releases' : releases.RELEASES,
}
| 30.881226 | 80 | 0.711911 |
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
extensions = [
'sphinx.ext.mathjax',
'tsuru_sphinx.handlers',
]
source_suffix = '.rst'
master_doc = 'index'
project = u'tsuru'
copyright = u'2017, tsuru authors'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.5'
# The full version, including alpha/beta/rc tags.
release = '1.5.0-rc11'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'tsuru'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['theme']
# if not os.environ.get('READTHEDOCS', None):
# import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme_ext'
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tsurudoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'tsuru.tex', u'tsuru Documentation',
u'tsuru', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tsuru', u'tsuru Documentation',
[u'tsuru'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'tsuru', u'tsuru Documentation',
u'tsuru', 'tsuru', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# tsuru releases
try:
import releases
except:
pass
else:
html_context = {
'releases' : releases.RELEASES,
}
| true | true |
1c49b884e3cdaad9254dce2a1831a4b6797541bf | 41 | py | Python | tests/__init__.py | robert2398/docs_parser | 8ddb8820ebeb0a5b8da11e81fe7fbfceab71e413 | [
"MIT"
] | null | null | null | tests/__init__.py | robert2398/docs_parser | 8ddb8820ebeb0a5b8da11e81fe7fbfceab71e413 | [
"MIT"
] | null | null | null | tests/__init__.py | robert2398/docs_parser | 8ddb8820ebeb0a5b8da11e81fe7fbfceab71e413 | [
"MIT"
] | null | null | null | """Unit test package for docs_parser."""
| 20.5 | 40 | 0.707317 | true | true |
|
1c49b978a6af8f17af06e34921de32180b66587b | 467 | py | Python | worlds/migrations/0051_pipeline_logging.py | cognitive-space/warpzone | 06acee2add83cf9ddf981b4e4187dd742e627561 | [
"MIT"
] | 1 | 2022-02-25T12:04:13.000Z | 2022-02-25T12:04:13.000Z | worlds/migrations/0051_pipeline_logging.py | cognitive-space/warpzone | 06acee2add83cf9ddf981b4e4187dd742e627561 | [
"MIT"
] | null | null | null | worlds/migrations/0051_pipeline_logging.py | cognitive-space/warpzone | 06acee2add83cf9ddf981b4e4187dd742e627561 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.10 on 2022-01-17 16:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('worlds', '0050_alter_completedlog_pod'),
]
operations = [
migrations.AddField(
model_name='pipeline',
name='logging',
field=models.CharField(choices=[('kube', 'Kubernetes'), ('shelix', 'Star Helix')], default='kube', max_length=10),
),
]
| 24.578947 | 126 | 0.605996 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('worlds', '0050_alter_completedlog_pod'),
]
operations = [
migrations.AddField(
model_name='pipeline',
name='logging',
field=models.CharField(choices=[('kube', 'Kubernetes'), ('shelix', 'Star Helix')], default='kube', max_length=10),
),
]
| true | true |
1c49b9af782f450f47a2cc9226a2b3a154659404 | 9,972 | py | Python | sigal.conf.py | flamableconcrete/DnD-Watercolor-Gallery | 6e8984ba56d2516ee0e17cbfb415fb2198d608cd | [
"MIT"
] | 7 | 2020-10-02T02:47:00.000Z | 2022-03-06T13:00:48.000Z | sigal.conf.py | flamableconcrete/DnD-Watercolor-Gallery | 6e8984ba56d2516ee0e17cbfb415fb2198d608cd | [
"MIT"
] | 1 | 2021-11-28T22:06:20.000Z | 2021-11-28T22:06:20.000Z | sigal.conf.py | flamableconcrete/DnD-Watercolor-Gallery | 6e8984ba56d2516ee0e17cbfb415fb2198d608cd | [
"MIT"
] | 1 | 2020-10-02T02:47:04.000Z | 2020-10-02T02:47:04.000Z | # All configuration values have a default; values that are commented out serve
# to show the default. Default values are specified when modified in this
# example config file
# Gallery title. Can be set here or as the '--title' option of the `sigal
# build` command, or in the 'index.md' file of the source directory.
# The priority order is: cli option > settings file > index.md file
# title = "Sigal test gallery"
# ---------------------
# General configuration
# ---------------------
# Source directory. Can be set here or as the first argument of the `sigal
# build` command
source = "albums"
# Destination directory. Can be set here or as the second argument of the
# `sigal build` command (default: '_build')
# destination = '_build'
# Theme :
# - colorbox (default), galleria, photoswipe, or the path to a custom theme
# directory
# theme = 'colorbox'
theme = "my-sigal-theme"
# Author. Used in the footer of the pages and in the author meta tag.
# author = ''
# Use originals in gallery (default: False). If True, this will bypass all
# processing steps (resize, auto-orient, recompress, and any plugin-specific
# step).
# Originals will be symlinked if orig_link = True, else they will be copied.
use_orig = True
# ----------------
# Image processing (ignored if use_orig = True)
# ----------------
# Size of resized image (default: (640, 480))
# img_size = (800, 600)
img_size = (1076, 816)
# Output format of images (default: None, i.e. use input format)
# img_format = "JPEG"
# Show a map of the images where possible?
# This option only has an effect on the galleria theme for the while.
# The leaflet_provider setting allow to customize the tile provider (see
# https://github.com/leaflet-extras/leaflet-providers#providers)
# show_map = False
# leaflet_provider = 'OpenStreetMap.Mapnik'
# File extensions that should be treated as images
# img_extensions = ['.jpg', '.jpeg', '.png', '.gif']
# Pilkit processor used to resize the image
# (see http://pilkit.readthedocs.org/en/latest/#processors)
# - ResizeToFit: fit the image within the specified dimensions (default)
# - ResizeToFill: crop THE IMAGE it to the exact specified width and height
# - SmartResize: identical to ResizeToFill, but uses entropy to crop the image
# - None: don't resize
# img_processor = 'ResizeToFit'
# Autorotate images
# Warning: this setting is not compatible with `copy_exif_data` (see below),
# because Sigal can't save the modified Orientation tag (currently Pillow can't
# write EXIF).
# autorotate_images = True
# If True, EXIF data from the original image is copied to the resized image
# copy_exif_data = False
# Python's datetime format string used for the EXIF date formatting
# https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior
# datetime_format = '%c'
# Jpeg options
# jpg_options = {'quality': 85,
# 'optimize': True,
# 'progressive': True}
# --------------------
# Thumbnail generation
# --------------------
# Generate thumbnails
# make_thumbs = True
# Subdirectory of the thumbnails
# thumb_dir = 'thumbnails'
# Prefix and/or suffix for thumbnail filenames (default: '')
# thumb_prefix = ''
# thumb_suffix = '.tn'
# Thumbnail size (default: (200, 150))
# For the galleria theme, use 280 px for the width
# For the colorbox and photoswipe theme, use 200 px for the width
thumb_size = (200, 267)
# Crop the image to fill the box
thumb_fit = False
# When using thumb_fit, specifies what we should crop
# for usage see
# http://pillow.readthedocs.io/en/stable/reference/ImageOps.html#PIL.ImageOps.fit
# thumb_fit_centering = (0.5, 0.5)
# Delay in seconds to avoid black thumbnails in videos with fade-in
# thumb_video_delay = '0'
# Keep original image (default: False)
# keep_orig = True
# Subdirectory for original images
# orig_dir = 'original'
# Use symbolic links instead of copying the original images
# orig_link = False
# Use symbolic links that are relative to the source directory instead of absolute paths
# rel_link = False
# Attribute of Album objects which is used to sort medias (eg 'title'). To sort
# on a metadata key, use 'meta.key'.
# albums_sort_attr = 'name'
# Reverse sort for albums
# albums_sort_reverse = False
# Attribute of Media objects which is used to sort medias. 'date' can be used
# to sort with EXIF dates, and 'meta.key' to sort on a metadata key (which then
# must exist for all images).
# medias_sort_attr = 'filename'
# Reverse sort for medias
# medias_sort_reverse = False
# Filter directories and files.
# The settings take a list of patterns matched with the fnmatch module on the
# path relative to the source directory:
# http://docs.python.org/2/library/fnmatch.html
ignore_directories = []
ignore_files = []
# -------------
# Video options
# -------------
# Video converter binary (can be 'avconv' on certain GNU/Linux distributions)
# video_converter = 'ffmpeg'
# File extensions that should be treated as video files
# video_extensions = ['.mov', '.avi', '.mp4', '.webm', '.ogv', '.3gp']
# Video format
# specify an alternative format, valid are 'webm' (default) and 'mp4'
# video_format = 'webm'
# Webm options
# Options used in ffmpeg to encode the webm video. You may want to read
# http://ffmpeg.org/trac/ffmpeg/wiki/vpxEncodingGuide
# Be aware of the fact these options need to be passed as strings. If you are
# using avconv (for example with Ubuntu), you will need to adapt the settings.
# webm_options = ['-crf', '10', '-b:v', '1.6M',
# '-qmin', '4', '-qmax', '63']
# MP4 options
# Options used to encode the mp4 video. You may want to read
# https://trac.ffmpeg.org/wiki/Encode/H.264
# mp4_options = ['-crf', '23' ]
# Size of resized video (default: (480, 360))
# video_size = (480, 360)
# -------------
# Miscellaneous
# -------------
# Write HTML files. If False, sigal will only process the images.
# write_html = False
# Name of the generated HTML files
# output_filename = 'index.html'
# Add output filename (see above) to the URLs
index_in_url = True
# A list of links (tuples (title, URL))
links = [
(
"Original Reddit Thread (2018)",
"https://www.reddit.com/r/UnearthedArcana/comments/83w44y/42_full_page_watercolor_stains_for_the_homebrewery/",
),
(
"New Reddit Thread (2020)",
"https://www.reddit.com/r/UnearthedArcana/comments/iqpmek/301_full_page_watercolor_stains_for/",
),
("Gmbinder Guide", "https://www.gmbinder.com/share/-L4Yt8ZSxmhwqt--yNRT"),
("Homebrewery Guide", "https://homebrewery.naturalcrit.com/share/SkKsdJmKf"),
(
"Website Source (GitHub)",
"https://github.com/flamableconcrete/DnD-Watercolor-Gallery",
),
]
# Google Analytics tracking code (UA-xxxx-x)
google_analytics = 'UA-187978011-1'
# Google Tag Manager tracking code (GTM-xxxxxx)
# google_tag_manager = ''
# Piwik tracking
# tracker_url must not contain trailing slash.
# Example : {'tracker_url': 'http://stats.domain.com', 'site_id' : 2}
# piwik = {'tracker_url': '', 'site_id' : 0}
# Specify a different locale. If set to '', the default locale is used.
# locale = ''
# Define language used on main <html> tag in templates
# html_language = 'en'
# List of files to copy from the source directory to the destination.
# A symbolic link is used if ``orig_link`` is set to True (see above).
# files_to_copy = (('extra/robots.txt', 'robots.txt'),
# ('extra/favicon.ico', 'favicon.ico'),)
# Colorbox theme config
# The column size is given in number of column of the css grid of the Skeleton
# framework which is used for this theme: http://www.getskeleton.com/#grid
# Then the image size must be adapted to fit the column size.
# The default is 3 columns (176px).
# colorbox_column_size = 3
# Site Logo - Use a logo file in the sidebar
# Only for colorbox currently, it could be adapted for other themes
# You must place the logo file into the theme's static images folder, which
# can be done using 'files_to_copy':
# files_to_copy = (('extra/logo.png', 'static/logo.png'))
# site_logo = 'logo.png'
# --------
# Plugins
# --------
# List of plugins to use. The values must be a path than can be imported.
# Another option is to import the plugin and put the module in the list, but
# this will break with the multiprocessing feature (the settings dict obtained
# from this file must be serializable).
plugins = [
# 'sigal.plugins.adjust',
"sigal.plugins.compress_assets",
# 'sigal.plugins.copyright',
# 'sigal.plugins.encrypt',
# 'sigal.plugins.extended_caching',
# 'sigal.plugins.feeds',
# 'sigal.plugins.media_page',
# 'sigal.plugins.nomedia',
# 'sigal.plugins.upload_s3',
# 'sigal.plugins.watermark',
"sigal.plugins.zip_gallery",
]
# Adjust the image after resizing it. A default value of 1.0 leaves the images
# untouched.
# adjust_options = {'color': 1.0,
# 'brightness': 1.0,
# 'contrast': 1.0,
# 'sharpness': 1.0}
# Settings for compressing static assets
compress_assets_options = {"method": "brotli"}
# Add a copyright text on the image (default: '')
# copyright = "© An example copyright message"
# Settings for encryption plugin
# encrypt_options = {
# 'password': 'password',
# 'ask_password': False
# }
# Settings for upload to s3 plugin
# upload_s3_options = {
# 'bucket': 'my-bucket',
# 'policy': 'public-read',
# 'overwrite': False
# }
# Set zip_gallery to either False or a file name. The file name can
# be formatted python style with an 'album' variable, for example
# '{album.name}.zip'. The final archive will contain all resized or
# original files (depending on `zip_media_format`).
# zip_gallery = False # False or 'archive.zip'
zip_gallery = "{album.name}.zip"
# zip_media_format = 'resized' # 'resized' or 'orig'
# zip_skip_if_exists = False # Skip archive generation if archive is
# already present. Warning: new photos in an album won't be added to archive
| 33.019868 | 119 | 0.695347 |
source = "albums"
theme = "my-sigal-theme"
use_orig = True
img_size = (1076, 816)
# img_processor = 'ResizeToFit'
# Autorotate images
# Warning: this setting is not compatible with `copy_exif_data` (see below),
# because Sigal can't save the modified Orientation tag (currently Pillow can't
# write EXIF).
# autorotate_images = True
# If True, EXIF data from the original image is copied to the resized image
# copy_exif_data = False
# Python's datetime format string used for the EXIF date formatting
thumb_size = (200, 267)
thumb_fit = False
ignore_directories = []
ignore_files = []
index_in_url = True
links = [
(
"Original Reddit Thread (2018)",
"https://www.reddit.com/r/UnearthedArcana/comments/83w44y/42_full_page_watercolor_stains_for_the_homebrewery/",
),
(
"New Reddit Thread (2020)",
"https://www.reddit.com/r/UnearthedArcana/comments/iqpmek/301_full_page_watercolor_stains_for/",
),
("Gmbinder Guide", "https://www.gmbinder.com/share/-L4Yt8ZSxmhwqt--yNRT"),
("Homebrewery Guide", "https://homebrewery.naturalcrit.com/share/SkKsdJmKf"),
(
"Website Source (GitHub)",
"https://github.com/flamableconcrete/DnD-Watercolor-Gallery",
),
]
google_analytics = 'UA-187978011-1'
# can be done using 'files_to_copy':
# files_to_copy = (('extra/logo.png', 'static/logo.png'))
# site_logo = 'logo.png'
# --------
# Plugins
# --------
# List of plugins to use. The values must be a path than can be imported.
# Another option is to import the plugin and put the module in the list, but
# this will break with the multiprocessing feature (the settings dict obtained
# from this file must be serializable).
plugins = [
# 'sigal.plugins.adjust',
"sigal.plugins.compress_assets",
# 'sigal.plugins.copyright',
# 'sigal.plugins.encrypt',
# 'sigal.plugins.extended_caching',
# 'sigal.plugins.feeds',
# 'sigal.plugins.media_page',
# 'sigal.plugins.nomedia',
# 'sigal.plugins.upload_s3',
# 'sigal.plugins.watermark',
"sigal.plugins.zip_gallery",
]
# Adjust the image after resizing it. A default value of 1.0 leaves the images
# untouched.
# adjust_options = {'color': 1.0,
# 'brightness': 1.0,
# 'contrast': 1.0,
# 'sharpness': 1.0}
# Settings for compressing static assets
compress_assets_options = {"method": "brotli"}
# Add a copyright text on the image (default: '')
# copyright = "© An example copyright message"
# Settings for encryption plugin
# encrypt_options = {
# 'password': 'password',
# 'ask_password': False
# }
# Settings for upload to s3 plugin
# upload_s3_options = {
# 'bucket': 'my-bucket',
# 'policy': 'public-read',
# 'overwrite': False
# }
# Set zip_gallery to either False or a file name. The file name can
# be formatted python style with an 'album' variable, for example
# '{album.name}.zip'. The final archive will contain all resized or
# original files (depending on `zip_media_format`).
# zip_gallery = False # False or 'archive.zip'
zip_gallery = "{album.name}.zip"
# zip_media_format = 'resized' # 'resized' or 'orig'
# zip_skip_if_exists = False # Skip archive generation if archive is
# already present. Warning: new photos in an album won't be added to archive
| true | true |
1c49b9b3ba4bd5a9767f7b527ecc21f6732caf9f | 687 | py | Python | Content/Data Structures/Matrix.py | MovsisyanM/Data-Structures-And-Algos-Revisit | 3bb128a4a5476914c164b1a3c1b533a8eace8604 | [
"MIT"
] | 3 | 2020-12-24T16:49:14.000Z | 2021-08-10T17:19:16.000Z | Content/Data Structures/Matrix.py | MovsisyanM/Data-Structures-And-Algos-Revisit | 3bb128a4a5476914c164b1a3c1b533a8eace8604 | [
"MIT"
] | null | null | null | Content/Data Structures/Matrix.py | MovsisyanM/Data-Structures-And-Algos-Revisit | 3bb128a4a5476914c164b1a3c1b533a8eace8604 | [
"MIT"
] | 1 | 2020-12-25T15:37:36.000Z | 2020-12-25T15:37:36.000Z | class Matrix:
"""No, not the movie.
A 2d array with many methods to make it act like a matrix"""
def __init__(self, size, fill_with=0):
assert (size >= 1), "Matrix size too small, must be positive integer"
this.size = math.floor(size)
this.mem = [[fill_with] * this.size] * this.size
def __getitem__(self, key):
return copy.copy(this.mem[key])
# this is where the fun begins!
def __mul__(self, matrix):
pass
# No need to worry about matrix mult. compatability since all of them are created squares
# TODO
# arr = np.random.rand(50) * 50
# InsertionSort(arr)
# print(IsSorted(arr))
# Code block by Movsisyan
| 25.444444 | 97 | 0.640466 | class Matrix:
def __init__(self, size, fill_with=0):
assert (size >= 1), "Matrix size too small, must be positive integer"
this.size = math.floor(size)
this.mem = [[fill_with] * this.size] * this.size
def __getitem__(self, key):
return copy.copy(this.mem[key])
def __mul__(self, matrix):
pass
| true | true |
1c49ba3af2eaa48f8e0bb0d3f22a566b9e7df7f7 | 1,212 | py | Python | tests/test_roi.py | EXLER/CILISSA | 452b9233d0d4a5139b9ab022b9178d9cde832359 | [
"MIT"
] | null | null | null | tests/test_roi.py | EXLER/CILISSA | 452b9233d0d4a5139b9ab022b9178d9cde832359 | [
"MIT"
] | 1 | 2021-10-15T19:55:56.000Z | 2021-10-15T19:55:56.000Z | tests/test_roi.py | EXLER/CILISSA | 452b9233d0d4a5139b9ab022b9178d9cde832359 | [
"MIT"
] | null | null | null | from pathlib import Path
from cilissa.images import Image, ImagePair
from cilissa.metrics import MSE
from cilissa.roi import ROI
from cilissa.transformations import Equalization
from tests.base import BaseTest
class TestROI(BaseTest):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.base_image = Image(Path(cls.data_path, "ref_images", "parrots.bmp"))
cls.transformed_image = Image(Path(cls.data_path, "other", "parrots_roi.bmp"))
cls.transformed_roi = ROI(0, 0, 384, 512)
cls.unchanged_roi = ROI(384, 0, 768, 512)
def test_roi_transformation(self) -> None:
result_image = self.base_image.copy()
cropped_image = result_image.crop(self.transformed_roi.slices)
transformed_image = Equalization().transform(cropped_image)
result_image.from_array(transformed_image.im, at=self.transformed_roi.slices)
self.assertEqual(result_image, self.transformed_image)
def test_roi_analysis(self) -> None:
pair = ImagePair(self.base_image, self.transformed_image)
pair.set_roi(self.unchanged_roi)
mse = MSE()
result = mse.analyze(pair)
self.assertEqual(result, 0)
| 32.756757 | 86 | 0.70462 | from pathlib import Path
from cilissa.images import Image, ImagePair
from cilissa.metrics import MSE
from cilissa.roi import ROI
from cilissa.transformations import Equalization
from tests.base import BaseTest
class TestROI(BaseTest):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.base_image = Image(Path(cls.data_path, "ref_images", "parrots.bmp"))
cls.transformed_image = Image(Path(cls.data_path, "other", "parrots_roi.bmp"))
cls.transformed_roi = ROI(0, 0, 384, 512)
cls.unchanged_roi = ROI(384, 0, 768, 512)
def test_roi_transformation(self) -> None:
result_image = self.base_image.copy()
cropped_image = result_image.crop(self.transformed_roi.slices)
transformed_image = Equalization().transform(cropped_image)
result_image.from_array(transformed_image.im, at=self.transformed_roi.slices)
self.assertEqual(result_image, self.transformed_image)
def test_roi_analysis(self) -> None:
pair = ImagePair(self.base_image, self.transformed_image)
pair.set_roi(self.unchanged_roi)
mse = MSE()
result = mse.analyze(pair)
self.assertEqual(result, 0)
| true | true |
1c49bbbc3c51bc3b92d9cf824afce38dc820b85b | 16,100 | py | Python | tests/handlers/test_e2e_room_keys.py | cleveritcz/synapse | caead3e45968a9f753da7bc11ee588ab4efda858 | [
"Apache-2.0"
] | 1 | 2019-05-01T11:05:51.000Z | 2019-05-01T11:05:51.000Z | tests/handlers/test_e2e_room_keys.py | cleveritcz/synapse | caead3e45968a9f753da7bc11ee588ab4efda858 | [
"Apache-2.0"
] | null | null | null | tests/handlers/test_e2e_room_keys.py | cleveritcz/synapse | caead3e45968a9f753da7bc11ee588ab4efda858 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
# Copyright 2017 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import mock
from twisted.internet import defer
import synapse.api.errors
import synapse.handlers.e2e_room_keys
import synapse.storage
from synapse.api import errors
from tests import unittest, utils
# sample room_key data for use in the tests
room_keys = {
"rooms": {
"!abc:matrix.org": {
"sessions": {
"c0ff33": {
"first_message_index": 1,
"forwarded_count": 1,
"is_verified": False,
"session_data": "SSBBTSBBIEZJU0gK"
}
}
}
}
}
class E2eRoomKeysHandlerTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(E2eRoomKeysHandlerTestCase, self).__init__(*args, **kwargs)
self.hs = None # type: synapse.server.HomeServer
self.handler = None # type: synapse.handlers.e2e_keys.E2eRoomKeysHandler
@defer.inlineCallbacks
def setUp(self):
self.hs = yield utils.setup_test_homeserver(
self.addCleanup,
handlers=None,
replication_layer=mock.Mock(),
)
self.handler = synapse.handlers.e2e_room_keys.E2eRoomKeysHandler(self.hs)
self.local_user = "@boris:" + self.hs.hostname
@defer.inlineCallbacks
def test_get_missing_current_version_info(self):
"""Check that we get a 404 if we ask for info about the current version
if there is no version.
"""
res = None
try:
yield self.handler.get_version_info(self.local_user)
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_get_missing_version_info(self):
"""Check that we get a 404 if we ask for info about a specific version
if it doesn't exist.
"""
res = None
try:
yield self.handler.get_version_info(self.local_user, "bogus_version")
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_create_version(self):
"""Check that we can create and then retrieve versions.
"""
res = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(res, "1")
# check we can retrieve it as the current version
res = yield self.handler.get_version_info(self.local_user)
self.assertDictEqual(res, {
"version": "1",
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
# check we can retrieve it as a specific version
res = yield self.handler.get_version_info(self.local_user, "1")
self.assertDictEqual(res, {
"version": "1",
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
# upload a new one...
res = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "second_version_auth_data",
})
self.assertEqual(res, "2")
# check we can retrieve it as the current version
res = yield self.handler.get_version_info(self.local_user)
self.assertDictEqual(res, {
"version": "2",
"algorithm": "m.megolm_backup.v1",
"auth_data": "second_version_auth_data",
})
@defer.inlineCallbacks
def test_update_version(self):
"""Check that we can update versions.
"""
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
res = yield self.handler.update_version(self.local_user, version, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "revised_first_version_auth_data",
"version": version
})
self.assertDictEqual(res, {})
# check we can retrieve it as the current version
res = yield self.handler.get_version_info(self.local_user)
self.assertDictEqual(res, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "revised_first_version_auth_data",
"version": version
})
@defer.inlineCallbacks
def test_update_missing_version(self):
"""Check that we get a 404 on updating nonexistent versions
"""
res = None
try:
yield self.handler.update_version(self.local_user, "1", {
"algorithm": "m.megolm_backup.v1",
"auth_data": "revised_first_version_auth_data",
"version": "1"
})
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_update_bad_version(self):
"""Check that we get a 400 if the version in the body is missing or
doesn't match
"""
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
res = None
try:
yield self.handler.update_version(self.local_user, version, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "revised_first_version_auth_data"
})
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 400)
res = None
try:
yield self.handler.update_version(self.local_user, version, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "revised_first_version_auth_data",
"version": "incorrect"
})
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 400)
@defer.inlineCallbacks
def test_delete_missing_version(self):
"""Check that we get a 404 on deleting nonexistent versions
"""
res = None
try:
yield self.handler.delete_version(self.local_user, "1")
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_delete_missing_current_version(self):
"""Check that we get a 404 on deleting nonexistent current version
"""
res = None
try:
yield self.handler.delete_version(self.local_user)
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_delete_version(self):
"""Check that we can create and then delete versions.
"""
res = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(res, "1")
# check we can delete it
yield self.handler.delete_version(self.local_user, "1")
# check that it's gone
res = None
try:
yield self.handler.get_version_info(self.local_user, "1")
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_get_missing_backup(self):
"""Check that we get a 404 on querying missing backup
"""
res = None
try:
yield self.handler.get_room_keys(self.local_user, "bogus_version")
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_get_missing_room_keys(self):
"""Check we get an empty response from an empty backup
"""
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
res = yield self.handler.get_room_keys(self.local_user, version)
self.assertDictEqual(res, {
"rooms": {}
})
# TODO: test the locking semantics when uploading room_keys,
# although this is probably best done in sytest
@defer.inlineCallbacks
def test_upload_room_keys_no_versions(self):
"""Check that we get a 404 on uploading keys when no versions are defined
"""
res = None
try:
yield self.handler.upload_room_keys(self.local_user, "no_version", room_keys)
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_upload_room_keys_bogus_version(self):
"""Check that we get a 404 on uploading keys when an nonexistent version
is specified
"""
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
res = None
try:
yield self.handler.upload_room_keys(
self.local_user, "bogus_version", room_keys
)
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_upload_room_keys_wrong_version(self):
"""Check that we get a 403 on uploading keys for an old version
"""
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "second_version_auth_data",
})
self.assertEqual(version, "2")
res = None
try:
yield self.handler.upload_room_keys(self.local_user, "1", room_keys)
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 403)
@defer.inlineCallbacks
def test_upload_room_keys_insert(self):
"""Check that we can insert and retrieve keys for a session
"""
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
yield self.handler.upload_room_keys(self.local_user, version, room_keys)
res = yield self.handler.get_room_keys(self.local_user, version)
self.assertDictEqual(res, room_keys)
# check getting room_keys for a given room
res = yield self.handler.get_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org"
)
self.assertDictEqual(res, room_keys)
# check getting room_keys for a given session_id
res = yield self.handler.get_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org",
session_id="c0ff33",
)
self.assertDictEqual(res, room_keys)
@defer.inlineCallbacks
def test_upload_room_keys_merge(self):
"""Check that we can upload a new room_key for an existing session and
have it correctly merged"""
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
yield self.handler.upload_room_keys(self.local_user, version, room_keys)
new_room_keys = copy.deepcopy(room_keys)
new_room_key = new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33']
# test that increasing the message_index doesn't replace the existing session
new_room_key['first_message_index'] = 2
new_room_key['session_data'] = 'new'
yield self.handler.upload_room_keys(self.local_user, version, new_room_keys)
res = yield self.handler.get_room_keys(self.local_user, version)
self.assertEqual(
res['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'],
"SSBBTSBBIEZJU0gK"
)
# test that marking the session as verified however /does/ replace it
new_room_key['is_verified'] = True
yield self.handler.upload_room_keys(self.local_user, version, new_room_keys)
res = yield self.handler.get_room_keys(self.local_user, version)
self.assertEqual(
res['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'],
"new"
)
# test that a session with a higher forwarded_count doesn't replace one
# with a lower forwarding count
new_room_key['forwarded_count'] = 2
new_room_key['session_data'] = 'other'
yield self.handler.upload_room_keys(self.local_user, version, new_room_keys)
res = yield self.handler.get_room_keys(self.local_user, version)
self.assertEqual(
res['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'],
"new"
)
# TODO: check edge cases as well as the common variations here
@defer.inlineCallbacks
def test_delete_room_keys(self):
"""Check that we can insert and delete keys for a session
"""
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
# check for bulk-delete
yield self.handler.upload_room_keys(self.local_user, version, room_keys)
yield self.handler.delete_room_keys(self.local_user, version)
res = yield self.handler.get_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org",
session_id="c0ff33",
)
self.assertDictEqual(res, {
"rooms": {}
})
# check for bulk-delete per room
yield self.handler.upload_room_keys(self.local_user, version, room_keys)
yield self.handler.delete_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org",
)
res = yield self.handler.get_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org",
session_id="c0ff33",
)
self.assertDictEqual(res, {
"rooms": {}
})
# check for bulk-delete per session
yield self.handler.upload_room_keys(self.local_user, version, room_keys)
yield self.handler.delete_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org",
session_id="c0ff33",
)
res = yield self.handler.get_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org",
session_id="c0ff33",
)
self.assertDictEqual(res, {
"rooms": {}
})
| 34.623656 | 89 | 0.605093 |
import copy
import mock
from twisted.internet import defer
import synapse.api.errors
import synapse.handlers.e2e_room_keys
import synapse.storage
from synapse.api import errors
from tests import unittest, utils
room_keys = {
"rooms": {
"!abc:matrix.org": {
"sessions": {
"c0ff33": {
"first_message_index": 1,
"forwarded_count": 1,
"is_verified": False,
"session_data": "SSBBTSBBIEZJU0gK"
}
}
}
}
}
class E2eRoomKeysHandlerTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(E2eRoomKeysHandlerTestCase, self).__init__(*args, **kwargs)
self.hs = None self.handler = None
@defer.inlineCallbacks
def setUp(self):
self.hs = yield utils.setup_test_homeserver(
self.addCleanup,
handlers=None,
replication_layer=mock.Mock(),
)
self.handler = synapse.handlers.e2e_room_keys.E2eRoomKeysHandler(self.hs)
self.local_user = "@boris:" + self.hs.hostname
@defer.inlineCallbacks
def test_get_missing_current_version_info(self):
res = None
try:
yield self.handler.get_version_info(self.local_user)
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_get_missing_version_info(self):
res = None
try:
yield self.handler.get_version_info(self.local_user, "bogus_version")
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_create_version(self):
res = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(res, "1")
res = yield self.handler.get_version_info(self.local_user)
self.assertDictEqual(res, {
"version": "1",
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
res = yield self.handler.get_version_info(self.local_user, "1")
self.assertDictEqual(res, {
"version": "1",
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
res = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "second_version_auth_data",
})
self.assertEqual(res, "2")
res = yield self.handler.get_version_info(self.local_user)
self.assertDictEqual(res, {
"version": "2",
"algorithm": "m.megolm_backup.v1",
"auth_data": "second_version_auth_data",
})
@defer.inlineCallbacks
def test_update_version(self):
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
res = yield self.handler.update_version(self.local_user, version, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "revised_first_version_auth_data",
"version": version
})
self.assertDictEqual(res, {})
res = yield self.handler.get_version_info(self.local_user)
self.assertDictEqual(res, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "revised_first_version_auth_data",
"version": version
})
@defer.inlineCallbacks
def test_update_missing_version(self):
res = None
try:
yield self.handler.update_version(self.local_user, "1", {
"algorithm": "m.megolm_backup.v1",
"auth_data": "revised_first_version_auth_data",
"version": "1"
})
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_update_bad_version(self):
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
res = None
try:
yield self.handler.update_version(self.local_user, version, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "revised_first_version_auth_data"
})
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 400)
res = None
try:
yield self.handler.update_version(self.local_user, version, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "revised_first_version_auth_data",
"version": "incorrect"
})
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 400)
@defer.inlineCallbacks
def test_delete_missing_version(self):
res = None
try:
yield self.handler.delete_version(self.local_user, "1")
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_delete_missing_current_version(self):
res = None
try:
yield self.handler.delete_version(self.local_user)
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_delete_version(self):
res = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(res, "1")
yield self.handler.delete_version(self.local_user, "1")
res = None
try:
yield self.handler.get_version_info(self.local_user, "1")
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_get_missing_backup(self):
res = None
try:
yield self.handler.get_room_keys(self.local_user, "bogus_version")
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_get_missing_room_keys(self):
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
res = yield self.handler.get_room_keys(self.local_user, version)
self.assertDictEqual(res, {
"rooms": {}
})
# TODO: test the locking semantics when uploading room_keys,
# although this is probably best done in sytest
@defer.inlineCallbacks
def test_upload_room_keys_no_versions(self):
res = None
try:
yield self.handler.upload_room_keys(self.local_user, "no_version", room_keys)
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_upload_room_keys_bogus_version(self):
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
res = None
try:
yield self.handler.upload_room_keys(
self.local_user, "bogus_version", room_keys
)
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_upload_room_keys_wrong_version(self):
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "second_version_auth_data",
})
self.assertEqual(version, "2")
res = None
try:
yield self.handler.upload_room_keys(self.local_user, "1", room_keys)
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 403)
@defer.inlineCallbacks
def test_upload_room_keys_insert(self):
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
yield self.handler.upload_room_keys(self.local_user, version, room_keys)
res = yield self.handler.get_room_keys(self.local_user, version)
self.assertDictEqual(res, room_keys)
# check getting room_keys for a given room
res = yield self.handler.get_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org"
)
self.assertDictEqual(res, room_keys)
# check getting room_keys for a given session_id
res = yield self.handler.get_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org",
session_id="c0ff33",
)
self.assertDictEqual(res, room_keys)
@defer.inlineCallbacks
def test_upload_room_keys_merge(self):
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
yield self.handler.upload_room_keys(self.local_user, version, room_keys)
new_room_keys = copy.deepcopy(room_keys)
new_room_key = new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33']
# test that increasing the message_index doesn't replace the existing session
new_room_key['first_message_index'] = 2
new_room_key['session_data'] = 'new'
yield self.handler.upload_room_keys(self.local_user, version, new_room_keys)
res = yield self.handler.get_room_keys(self.local_user, version)
self.assertEqual(
res['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'],
"SSBBTSBBIEZJU0gK"
)
new_room_key['is_verified'] = True
yield self.handler.upload_room_keys(self.local_user, version, new_room_keys)
res = yield self.handler.get_room_keys(self.local_user, version)
self.assertEqual(
res['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'],
"new"
)
# with a lower forwarding count
new_room_key['forwarded_count'] = 2
new_room_key['session_data'] = 'other'
yield self.handler.upload_room_keys(self.local_user, version, new_room_keys)
res = yield self.handler.get_room_keys(self.local_user, version)
self.assertEqual(
res['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'],
"new"
)
# TODO: check edge cases as well as the common variations here
@defer.inlineCallbacks
def test_delete_room_keys(self):
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
# check for bulk-delete
yield self.handler.upload_room_keys(self.local_user, version, room_keys)
yield self.handler.delete_room_keys(self.local_user, version)
res = yield self.handler.get_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org",
session_id="c0ff33",
)
self.assertDictEqual(res, {
"rooms": {}
})
# check for bulk-delete per room
yield self.handler.upload_room_keys(self.local_user, version, room_keys)
yield self.handler.delete_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org",
)
res = yield self.handler.get_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org",
session_id="c0ff33",
)
self.assertDictEqual(res, {
"rooms": {}
})
# check for bulk-delete per session
yield self.handler.upload_room_keys(self.local_user, version, room_keys)
yield self.handler.delete_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org",
session_id="c0ff33",
)
res = yield self.handler.get_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org",
session_id="c0ff33",
)
self.assertDictEqual(res, {
"rooms": {}
})
| true | true |
1c49bbdc54235d9fd6a0bca037e561e4f50df361 | 18,581 | py | Python | store/models.py | olaoluwa-98/tetris | 2daf4f7dc24c655cadc71394aea2aa68879bf6ea | [
"MIT"
] | null | null | null | store/models.py | olaoluwa-98/tetris | 2daf4f7dc24c655cadc71394aea2aa68879bf6ea | [
"MIT"
] | null | null | null | store/models.py | olaoluwa-98/tetris | 2daf4f7dc24c655cadc71394aea2aa68879bf6ea | [
"MIT"
] | null | null | null | from django.conf import settings
from django.db import models
from django.contrib.auth.models import AbstractUser, UserManager
from django.contrib.auth import get_user_model
from django.utils.crypto import get_random_string
from .addresses import STATES
from autoslug import AutoSlugField
from django.urls import reverse
from phonenumber_field.modelfields import PhoneNumberField
# this returns the location of the uploaded profile picture
def get_profile_pic_path(instance, filename):
return 'profile_pictures/{}-{}'.format(instance.user.username, filename)
class User(AbstractUser):
email = models.EmailField( verbose_name='email address', unique=True)
email_token = models.CharField(verbose_name='email token', max_length=16, editable=False, null=True)
is_verified = models.BooleanField(default=False)
phone = PhoneNumberField(default='',
help_text='Please use the following format: <em>+234 XXX XXX XXXX</em>.',
)
profile_pic_path = models.ImageField(upload_to=get_profile_pic_path, max_length=255)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
# Override models save method:
def save(self, *args, **kwargs):
# generate email_token for the user
# email_token must be unique
self.email_token = '{}{}'.format(self.email[:2], get_random_string(length=14))
while User.objects.filter(email_token=self.email_token).exists():
self.email_token = '{}{}'.format(self.email[:2], get_random_string(length=14))
super(User, self).save(*args, **kwargs)
def __str__(self):
return '{}'.format(self.email)
class Brand(models.Model):
name = models.CharField(max_length=40, unique=True, verbose_name='name of the brand' )
email = models.EmailField( max_length=50, verbose_name='email address of the brand')
phone = PhoneNumberField(blank=True, null=True,
help_text='Please use the following format: <em>+234 XXX XXX XXXX</em>.',
)
desc = models.CharField(max_length=255, verbose_name='description of brand', blank=True, null=True)
brand_image_url = models.ImageField(upload_to='img/brands/', max_length=255, blank=True, null=True)
slug = AutoSlugField(populate_from='name',
unique=True,
sep='',
)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date brand was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date brand details were updated last' )
def get_absolute_url(self):
return reverse('store:brand', kwargs={'slug': self.slug})
def get_carts(self):
return Cart.objects.filter(product__brand=self).order_by('-created_at')
def get_wishes(self):
return Wish.objects.filter(product__brand=self).order_by('-created_at')
def get_orders(self):
return OrderItem.objects.filter(product__brand=self).order_by('-created_at')
def random_product_images(self):
from django.db.models import Count
products = list(self.products.all()[:3])
images = []
if len(products) > 0:
for product in products:
if product.product_images.first():
images.append(product.product_images.first())
if len(images) > 0:
return images
return None
def __str__(self):
return '{}'.format(self.name)
class Meta:
get_latest_by = 'created_at'
class ShippingAddress(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='shipping_addresses',
verbose_name ='Customer'
)
is_default = models.BooleanField(default=False)
zip_code = models.CharField( max_length=10, verbose_name='zip code' )
address = models.CharField( max_length=60, verbose_name='address' )
city = models.CharField( max_length=30, verbose_name='city' )
state = models.CharField( max_length=15, verbose_name='state', choices=STATES )
country = models.CharField( max_length=30, default='Nigeria', verbose_name='country' )
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date added'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date shipping address details were updated last' )
def __str__(self):
return '{}, {}. ({})'.format(self.city, self.state, self.user.username)
class Meta:
verbose_name_plural = 'Shipping Addresses'
get_latest_by = 'created_at'
ordering = ['user_id',]
class ProductCategory(models.Model):
name = models.CharField(max_length=30, unique=True, verbose_name='name of category')
CAT_TYPES = (
('top', 'Top'),
('bottom', 'Bottom'),
('accessory', 'Accessory'),
('foot', 'Footwear'),
('other', 'Other')
)
cat_type = models.CharField(max_length=10, choices=CAT_TYPES, verbose_name='type of category')
desc = models.CharField(max_length=255, verbose_name='description of product category', blank=True, null=True)
cat_image_url = models.ImageField(upload_to='img/product_categories/', max_length=255, blank=True, null=True)
slug = AutoSlugField(populate_from='name',
unique=True,
sep='',
)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date product category was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date product category details were updated last')
def get_absolute_url(self):
return reverse('store:category', kwargs={'slug': self.slug})
def random_product_images(self):
from django.db.models import Count
products = list(self.products.all()[:3])
images = []
if len(products) > 0:
for product in products:
if product.product_images.first():
images.append(product.product_images.first())
if len(images) > 0:
return images
return None
def __str__(self):
return '{} ({})'.format(self.name, self.cat_type)
class Meta:
get_latest_by = 'created_at'
verbose_name_plural = 'Product Categories'
ordering = ['name',]
class Size(models.Model):
category = models.ForeignKey(
ProductCategory,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='sizes',
verbose_name ='Category'
)
size_format = models.CharField(max_length=15, verbose_name='size format e.g UK, US')
value = models.CharField(max_length=10, verbose_name='size value e.g 43 or XL')
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date size was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date size details were updated last' )
def __str__(self):
return '{} - {} {}'.format(self.size_format, self.value, self.category.name)
class Meta:
get_latest_by = 'created_at'
ordering = ['value']
class Product(models.Model):
admin = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
related_name='products',
verbose_name ='Staff',
blank=True,
null=True
)
brand = models.ForeignKey(
Brand,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='products',
verbose_name ='Brand'
)
category = models.ForeignKey(
ProductCategory,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='products',
verbose_name ='Category'
)
GENDER = (
('male', 'Male'),
('female', 'Female'),
('unisex', 'Unisex')
)
COLOURS = (
('blue', 'Blue'),
('red', 'Red'),
('white', 'White'),
('black', 'Black'),
('green', 'Green'),
('purple', 'Purple'),
('yellow', 'Yellow'),
('gray', 'Gray'),
('khaki', 'Khaki'),
('brown', 'Brown'),
('orange', 'Orange'),
('navy blue', 'Navy Blue'),
('transparent', 'Transparent'),
('gold', 'Gold'),
('silver', 'Silver'),
)
SIZES = (
('EUR-39', 'EUR 39'),
)
name = models.CharField(max_length=50, verbose_name='name')
desc = models.CharField(max_length=255, verbose_name='description', blank=True, null=True)
gender = models.CharField(max_length=15, choices=GENDER, verbose_name='gender')
size = models.ForeignKey(
Size,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='products',
verbose_name ='Product'
)
colour = models.CharField(max_length=15, verbose_name='colour', choices=COLOURS)
price_per_unit = models.DecimalField(decimal_places=2, max_digits=17, verbose_name='price (₦)')
quantity = models.PositiveIntegerField(verbose_name='quantity left')
num_deliveries = models.PositiveIntegerField(verbose_name='deliveries', default=0)
orders_count = models.PositiveIntegerField(verbose_name='orders', default=0)
slug = AutoSlugField(populate_from='name',
unique=True,
sep='',
)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date added'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date product details were updated last' )
def get_absolute_url(self):
return reverse('store:product', kwargs={'slug': self.slug})
def __str__(self):
return '{}'.format(self.name)
class Meta:
get_latest_by = 'created_at'
ordering = ['-created_at', 'admin_id', 'name']
class Wish(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='wishes',
verbose_name ='Owner'
)
product = models.ForeignKey(
Product,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='wishes',
verbose_name ='Product'
)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date wish was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date wish details were updated last' )
# Override models save method:
def save(self, *args, **kwargs):
# check if wish product already exists, if it does ignore
if Wish.objects.filter(user_id=self.user_id, product_id=self.product_id).exists():
pass
else:
super(Wish, self).save(*args, **kwargs)
def __str__(self):
return '{} -> {}'.format(self.user.username, self.product.name)
class Meta:
get_latest_by = 'created_at'
verbose_name_plural = 'wishes'
ordering = ['-created_at', 'user_id']
class Cart(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='cart',
verbose_name ='Owner',
blank=True,
null=True
)
product = models.ForeignKey(
Product,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='cart',
verbose_name ='product in the cart'
)
quantity = models.PositiveIntegerField(verbose_name='quantity of the product added')
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date cart product was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date cart product details were updated last' )
# Override models save method:
def save(self, *args, **kwargs):
# check if cart product already exists, add more quantity to it
if not self.pk:
cart = Cart.objects.filter(user=self.user, product_id=self.product_id)
if cart.exists():
cart_item = cart.first()
cart_item.quantity += int(self.quantity)
super(Cart, cart_item).save(*args, **kwargs)
else:
super(Cart, self).save(*args, **kwargs)
else:
super(Cart, self).save(*args, **kwargs)
def __str__(self):
return 'x{} {} -> {}'.format(self.quantity, self.user, self.product.name)
class Meta:
get_latest_by = 'created_at'
ordering = ['-created_at', 'user_id']
class Order(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
blank=True,
null=True,
related_name='orders',
verbose_name ='Customer'
)
shipping_address = models.ForeignKey(
ShippingAddress,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='orders',
verbose_name ='shipping address',
)
ORDER_STATUS = (
('pending', 'Pending'),
('processing', 'Processing'),
('delivered', 'Delivered'),
('cancelled', 'Cancelled')
)
ref = models.CharField(verbose_name='reference',max_length=100,null=True,blank=True,
help_text='this field is generated automatically'
)
reason_cancelled = models.CharField(verbose_name='if order is cancelled, why?',max_length=100,blank=True,null=True)
canceller = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='cancellers',
verbose_name ='the canceller',
)
status = models.CharField(choices=ORDER_STATUS, default='pending', max_length=100,
verbose_name='status'
)
deliver_date = models.DateTimeField(null=True, blank=True,
verbose_name='delivered (tetris)'
)
confirm_delivery_date = models.DateTimeField(null=True, blank=True,
verbose_name='confirmed delivered (customer)'
)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date ordered'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date order details were updated last' )
def subtotal(self):
from django.db.models import Sum, F
total = self.order_items.aggregate( subtotal=Sum(F('price_per_unit') * F('quantity'), output_field=models.DecimalField()))
if total['subtotal']:
return total['subtotal']
return 0
def get_absolute_url(self):
return reverse('store:order', kwargs={'ref': self.ref})
# Override models save method:
def save(self, *args, **kwargs):
if not self.pk:
# generate reference for the order
# order reference must be unique
self.ref = get_random_string(length=16)
while Order.objects.filter(ref=self.ref).exists():
self.ref = get_random_string(length=16)
super(Order, self).save(*args, **kwargs)
def __str__(self):
return '{} ordered {} [{}]'.format(self.user.username, self.ref, self.status)
class Meta:
get_latest_by = 'created_at'
ordering = ['-created_at', 'user_id']
# permissions = (
# ('change_status_to_processing', 'Change Status to Processing'),
# ('change_status_to_pending', 'Change Status to Pending'),
# )
class OrderItem(models.Model):
order = models.ForeignKey(
Order,
on_delete=models.CASCADE,
blank=True,
null=True,
related_name='order_items',
verbose_name ='order'
)
product = models.ForeignKey(
Product,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='order_items',
verbose_name ='product ordered'
)
quantity = models.PositiveIntegerField(verbose_name='quantity ordered')
price_per_unit = models.DecimalField(decimal_places=2, max_digits=17)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date ordered'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date order details were updated last' )
def __str__(self):
return 'x{} {} [{}]'.format(self.quantity, self.product.name, self.order.ref)
class Meta:
verbose_name_plural = 'Order Items'
get_latest_by = 'created_at'
ordering = ['order',]
class ProductImage(models.Model):
product = models.ForeignKey(
Product,
on_delete=models.CASCADE,
related_name='product_images',
verbose_name ='product image belongs to'
)
product_image_url = models.ImageField(upload_to='img/products/', max_length=255, blank=True)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date image was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date image was updated last' )
def __str__(self):
return '{}\'s image'.format(self.product.name)
class Meta:
verbose_name_plural = 'Product Images'
get_latest_by = 'created_at'
ordering = ['product_id',]
class Feedback(models.Model):
email = models.EmailField( verbose_name='email address')
content = models.TextField(verbose_name='feedback')
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='feedback was made'
)
def __str__(self):
return '{}\'s feedback'.format(self.email)
class TetrisImage(models.Model):
TYPES = (
('background-image', 'Background Image (1366px by 738px )'),
('default-product-img', 'Default Product Image (should be a perfect square)'),
('default-brand-img', 'Default Brand Image (should be a perfect square)'),
('default-category-img', 'Default Category Image (should be a perfect square)'),
)
name = models.CharField(max_length=30, verbose_name='name of the image', choices=TYPES)
description = models.TextField(verbose_name='description of the image', blank=True, null=True )
image_url = models.ImageField(upload_to='img/tetris-img/', max_length=255, blank=True)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date image was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date image was updated last' )
def __str__(self):
return self.name | 36.220273 | 130 | 0.645875 | from django.conf import settings
from django.db import models
from django.contrib.auth.models import AbstractUser, UserManager
from django.contrib.auth import get_user_model
from django.utils.crypto import get_random_string
from .addresses import STATES
from autoslug import AutoSlugField
from django.urls import reverse
from phonenumber_field.modelfields import PhoneNumberField
def get_profile_pic_path(instance, filename):
return 'profile_pictures/{}-{}'.format(instance.user.username, filename)
class User(AbstractUser):
email = models.EmailField( verbose_name='email address', unique=True)
email_token = models.CharField(verbose_name='email token', max_length=16, editable=False, null=True)
is_verified = models.BooleanField(default=False)
phone = PhoneNumberField(default='',
help_text='Please use the following format: <em>+234 XXX XXX XXXX</em>.',
)
profile_pic_path = models.ImageField(upload_to=get_profile_pic_path, max_length=255)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def save(self, *args, **kwargs):
self.email_token = '{}{}'.format(self.email[:2], get_random_string(length=14))
while User.objects.filter(email_token=self.email_token).exists():
self.email_token = '{}{}'.format(self.email[:2], get_random_string(length=14))
super(User, self).save(*args, **kwargs)
def __str__(self):
return '{}'.format(self.email)
class Brand(models.Model):
name = models.CharField(max_length=40, unique=True, verbose_name='name of the brand' )
email = models.EmailField( max_length=50, verbose_name='email address of the brand')
phone = PhoneNumberField(blank=True, null=True,
help_text='Please use the following format: <em>+234 XXX XXX XXXX</em>.',
)
desc = models.CharField(max_length=255, verbose_name='description of brand', blank=True, null=True)
brand_image_url = models.ImageField(upload_to='img/brands/', max_length=255, blank=True, null=True)
slug = AutoSlugField(populate_from='name',
unique=True,
sep='',
)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date brand was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date brand details were updated last' )
def get_absolute_url(self):
return reverse('store:brand', kwargs={'slug': self.slug})
def get_carts(self):
return Cart.objects.filter(product__brand=self).order_by('-created_at')
def get_wishes(self):
return Wish.objects.filter(product__brand=self).order_by('-created_at')
def get_orders(self):
return OrderItem.objects.filter(product__brand=self).order_by('-created_at')
def random_product_images(self):
from django.db.models import Count
products = list(self.products.all()[:3])
images = []
if len(products) > 0:
for product in products:
if product.product_images.first():
images.append(product.product_images.first())
if len(images) > 0:
return images
return None
def __str__(self):
return '{}'.format(self.name)
class Meta:
get_latest_by = 'created_at'
class ShippingAddress(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='shipping_addresses',
verbose_name ='Customer'
)
is_default = models.BooleanField(default=False)
zip_code = models.CharField( max_length=10, verbose_name='zip code' )
address = models.CharField( max_length=60, verbose_name='address' )
city = models.CharField( max_length=30, verbose_name='city' )
state = models.CharField( max_length=15, verbose_name='state', choices=STATES )
country = models.CharField( max_length=30, default='Nigeria', verbose_name='country' )
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date added'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date shipping address details were updated last' )
def __str__(self):
return '{}, {}. ({})'.format(self.city, self.state, self.user.username)
class Meta:
verbose_name_plural = 'Shipping Addresses'
get_latest_by = 'created_at'
ordering = ['user_id',]
class ProductCategory(models.Model):
name = models.CharField(max_length=30, unique=True, verbose_name='name of category')
CAT_TYPES = (
('top', 'Top'),
('bottom', 'Bottom'),
('accessory', 'Accessory'),
('foot', 'Footwear'),
('other', 'Other')
)
cat_type = models.CharField(max_length=10, choices=CAT_TYPES, verbose_name='type of category')
desc = models.CharField(max_length=255, verbose_name='description of product category', blank=True, null=True)
cat_image_url = models.ImageField(upload_to='img/product_categories/', max_length=255, blank=True, null=True)
slug = AutoSlugField(populate_from='name',
unique=True,
sep='',
)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date product category was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date product category details were updated last')
def get_absolute_url(self):
return reverse('store:category', kwargs={'slug': self.slug})
def random_product_images(self):
from django.db.models import Count
products = list(self.products.all()[:3])
images = []
if len(products) > 0:
for product in products:
if product.product_images.first():
images.append(product.product_images.first())
if len(images) > 0:
return images
return None
def __str__(self):
return '{} ({})'.format(self.name, self.cat_type)
class Meta:
get_latest_by = 'created_at'
verbose_name_plural = 'Product Categories'
ordering = ['name',]
class Size(models.Model):
category = models.ForeignKey(
ProductCategory,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='sizes',
verbose_name ='Category'
)
size_format = models.CharField(max_length=15, verbose_name='size format e.g UK, US')
value = models.CharField(max_length=10, verbose_name='size value e.g 43 or XL')
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date size was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date size details were updated last' )
def __str__(self):
return '{} - {} {}'.format(self.size_format, self.value, self.category.name)
class Meta:
get_latest_by = 'created_at'
ordering = ['value']
class Product(models.Model):
admin = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
related_name='products',
verbose_name ='Staff',
blank=True,
null=True
)
brand = models.ForeignKey(
Brand,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='products',
verbose_name ='Brand'
)
category = models.ForeignKey(
ProductCategory,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='products',
verbose_name ='Category'
)
GENDER = (
('male', 'Male'),
('female', 'Female'),
('unisex', 'Unisex')
)
COLOURS = (
('blue', 'Blue'),
('red', 'Red'),
('white', 'White'),
('black', 'Black'),
('green', 'Green'),
('purple', 'Purple'),
('yellow', 'Yellow'),
('gray', 'Gray'),
('khaki', 'Khaki'),
('brown', 'Brown'),
('orange', 'Orange'),
('navy blue', 'Navy Blue'),
('transparent', 'Transparent'),
('gold', 'Gold'),
('silver', 'Silver'),
)
SIZES = (
('EUR-39', 'EUR 39'),
)
name = models.CharField(max_length=50, verbose_name='name')
desc = models.CharField(max_length=255, verbose_name='description', blank=True, null=True)
gender = models.CharField(max_length=15, choices=GENDER, verbose_name='gender')
size = models.ForeignKey(
Size,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='products',
verbose_name ='Product'
)
colour = models.CharField(max_length=15, verbose_name='colour', choices=COLOURS)
price_per_unit = models.DecimalField(decimal_places=2, max_digits=17, verbose_name='price (₦)')
quantity = models.PositiveIntegerField(verbose_name='quantity left')
num_deliveries = models.PositiveIntegerField(verbose_name='deliveries', default=0)
orders_count = models.PositiveIntegerField(verbose_name='orders', default=0)
slug = AutoSlugField(populate_from='name',
unique=True,
sep='',
)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date added'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date product details were updated last' )
def get_absolute_url(self):
return reverse('store:product', kwargs={'slug': self.slug})
def __str__(self):
return '{}'.format(self.name)
class Meta:
get_latest_by = 'created_at'
ordering = ['-created_at', 'admin_id', 'name']
class Wish(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='wishes',
verbose_name ='Owner'
)
product = models.ForeignKey(
Product,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='wishes',
verbose_name ='Product'
)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date wish was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date wish details were updated last' )
def save(self, *args, **kwargs):
if Wish.objects.filter(user_id=self.user_id, product_id=self.product_id).exists():
pass
else:
super(Wish, self).save(*args, **kwargs)
def __str__(self):
return '{} -> {}'.format(self.user.username, self.product.name)
class Meta:
get_latest_by = 'created_at'
verbose_name_plural = 'wishes'
ordering = ['-created_at', 'user_id']
class Cart(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='cart',
verbose_name ='Owner',
blank=True,
null=True
)
product = models.ForeignKey(
Product,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='cart',
verbose_name ='product in the cart'
)
quantity = models.PositiveIntegerField(verbose_name='quantity of the product added')
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date cart product was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date cart product details were updated last' )
def save(self, *args, **kwargs):
if not self.pk:
cart = Cart.objects.filter(user=self.user, product_id=self.product_id)
if cart.exists():
cart_item = cart.first()
cart_item.quantity += int(self.quantity)
super(Cart, cart_item).save(*args, **kwargs)
else:
super(Cart, self).save(*args, **kwargs)
else:
super(Cart, self).save(*args, **kwargs)
def __str__(self):
return 'x{} {} -> {}'.format(self.quantity, self.user, self.product.name)
class Meta:
get_latest_by = 'created_at'
ordering = ['-created_at', 'user_id']
class Order(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
blank=True,
null=True,
related_name='orders',
verbose_name ='Customer'
)
shipping_address = models.ForeignKey(
ShippingAddress,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='orders',
verbose_name ='shipping address',
)
ORDER_STATUS = (
('pending', 'Pending'),
('processing', 'Processing'),
('delivered', 'Delivered'),
('cancelled', 'Cancelled')
)
ref = models.CharField(verbose_name='reference',max_length=100,null=True,blank=True,
help_text='this field is generated automatically'
)
reason_cancelled = models.CharField(verbose_name='if order is cancelled, why?',max_length=100,blank=True,null=True)
canceller = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='cancellers',
verbose_name ='the canceller',
)
status = models.CharField(choices=ORDER_STATUS, default='pending', max_length=100,
verbose_name='status'
)
deliver_date = models.DateTimeField(null=True, blank=True,
verbose_name='delivered (tetris)'
)
confirm_delivery_date = models.DateTimeField(null=True, blank=True,
verbose_name='confirmed delivered (customer)'
)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date ordered'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date order details were updated last' )
def subtotal(self):
from django.db.models import Sum, F
total = self.order_items.aggregate( subtotal=Sum(F('price_per_unit') * F('quantity'), output_field=models.DecimalField()))
if total['subtotal']:
return total['subtotal']
return 0
def get_absolute_url(self):
return reverse('store:order', kwargs={'ref': self.ref})
def save(self, *args, **kwargs):
if not self.pk:
self.ref = get_random_string(length=16)
while Order.objects.filter(ref=self.ref).exists():
self.ref = get_random_string(length=16)
super(Order, self).save(*args, **kwargs)
def __str__(self):
return '{} ordered {} [{}]'.format(self.user.username, self.ref, self.status)
class Meta:
get_latest_by = 'created_at'
ordering = ['-created_at', 'user_id']
class OrderItem(models.Model):
order = models.ForeignKey(
Order,
on_delete=models.CASCADE,
blank=True,
null=True,
related_name='order_items',
verbose_name ='order'
)
product = models.ForeignKey(
Product,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='order_items',
verbose_name ='product ordered'
)
quantity = models.PositiveIntegerField(verbose_name='quantity ordered')
price_per_unit = models.DecimalField(decimal_places=2, max_digits=17)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date ordered'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date order details were updated last' )
def __str__(self):
return 'x{} {} [{}]'.format(self.quantity, self.product.name, self.order.ref)
class Meta:
verbose_name_plural = 'Order Items'
get_latest_by = 'created_at'
ordering = ['order',]
class ProductImage(models.Model):
product = models.ForeignKey(
Product,
on_delete=models.CASCADE,
related_name='product_images',
verbose_name ='product image belongs to'
)
product_image_url = models.ImageField(upload_to='img/products/', max_length=255, blank=True)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date image was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date image was updated last' )
def __str__(self):
return '{}\'s image'.format(self.product.name)
class Meta:
verbose_name_plural = 'Product Images'
get_latest_by = 'created_at'
ordering = ['product_id',]
class Feedback(models.Model):
email = models.EmailField( verbose_name='email address')
content = models.TextField(verbose_name='feedback')
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='feedback was made'
)
def __str__(self):
return '{}\'s feedback'.format(self.email)
class TetrisImage(models.Model):
TYPES = (
('background-image', 'Background Image (1366px by 738px )'),
('default-product-img', 'Default Product Image (should be a perfect square)'),
('default-brand-img', 'Default Brand Image (should be a perfect square)'),
('default-category-img', 'Default Category Image (should be a perfect square)'),
)
name = models.CharField(max_length=30, verbose_name='name of the image', choices=TYPES)
description = models.TextField(verbose_name='description of the image', blank=True, null=True )
image_url = models.ImageField(upload_to='img/tetris-img/', max_length=255, blank=True)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date image was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date image was updated last' )
def __str__(self):
return self.name | true | true |
1c49bcf86193b8dd320433d8f581a02d71c51fc7 | 927 | py | Python | PyPoll/main.py | amitpatel02-atl/Python-Challenge | e864b87b40c64fa7f5b6a3ad98da2f31c8028790 | [
"ADSL"
] | null | null | null | PyPoll/main.py | amitpatel02-atl/Python-Challenge | e864b87b40c64fa7f5b6a3ad98da2f31c8028790 | [
"ADSL"
] | null | null | null | PyPoll/main.py | amitpatel02-atl/Python-Challenge | e864b87b40c64fa7f5b6a3ad98da2f31c8028790 | [
"ADSL"
] | null | null | null | # First we'll import the os module
# This will allow us to create file paths across operating systems
import os
# Module for reading CSV files
import csv
# Set path for file
csvpath = os.path.join("election_data.csv")
#Open the csv
with open(csvpath) as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
#Read the header row first (skip this step if there is now header)
csv_header = next(csvreader)
print(f"CSV Header: {csv_header}")
# Create a variable and set it as an List
list_of_candidates = ["Khan", "Correy", "Li", "O'Tooley"]
total_votes = 0
#Loop through looking for the votes then add to counter
vote_counter = 0
for row in csvreader:
#Count number of votes
vote_counter= (vote_counter+ 1)
print("Election Results")
print("------------------")
print(f"Total Votes : {vote_counter}")
print("Khan")
print("Correy")
print("Li")
print("O'Tooley")
| 24.394737 | 70 | 0.674218 | # This will allow us to create file paths across operating systems
import os
# Module for reading CSV files
import csv
# Set path for file
csvpath = os.path.join("election_data.csv")
#Open the csv
with open(csvpath) as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
#Read the header row first (skip this step if there is now header)
csv_header = next(csvreader)
print(f"CSV Header: {csv_header}")
# Create a variable and set it as an List
list_of_candidates = ["Khan", "Correy", "Li", "O'Tooley"]
total_votes = 0
vote_counter = 0
for row in csvreader:
vote_counter= (vote_counter+ 1)
print("Election Results")
print("------------------")
print(f"Total Votes : {vote_counter}")
print("Khan")
print("Correy")
print("Li")
print("O'Tooley")
| true | true |
1c49bd27ad29632093673b01d9d464f573e77386 | 5,118 | py | Python | discord/ui/modal.py | DeadPool3333/enhanced-discord.py | be34c7e521c9edbe6d8ff949962c0ab777821712 | [
"MIT"
] | 4 | 2021-09-28T12:45:00.000Z | 2022-02-04T20:11:58.000Z | discord/ui/modal.py | DeadPool3333/enhanced-discord.py | be34c7e521c9edbe6d8ff949962c0ab777821712 | [
"MIT"
] | 2 | 2021-11-07T12:31:33.000Z | 2022-01-06T17:06:51.000Z | discord/ui/modal.py | DeadPool3333/enhanced-discord.py | be34c7e521c9edbe6d8ff949962c0ab777821712 | [
"MIT"
] | 8 | 2022-01-10T22:26:03.000Z | 2022-02-25T14:26:04.000Z | from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import os
import asyncio
import sys
import traceback
from .item import Item
from itertools import groupby
from .view import _ViewWeights as _ModalWeights
from ..interactions import Interaction
if TYPE_CHECKING:
from ..state import ConnectionState
__all__ = ("Modal",)
class Modal:
"""Represents a UI Modal.
This object must be inherited to create a UI within Discord.
.. versionadded:: 2.0
Parameters
------------
title: :class:`str`
The title of the modal.
custom_id: Optional[:class:`str`]
The ID of the modal that gets received during an interaction.
Attributes
------------
title: :class:`str`
The title of the modal.
custom_id: Optional[:class:`str`]
The ID of the modal that gets received during an interaction.
children: List[:class:`Item`]
The children of the modal.
"""
def __init__(self, title: str, custom_id: Optional[str] = None) -> None:
self.title = title
self.custom_id = custom_id or os.urandom(16).hex()
self.children: List[Item] = []
self.__weights = _ModalWeights(self.children)
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.title=} {self.custom_id=}>"
def add_item(self, item: Item):
if not isinstance(item, Item):
raise TypeError(f"expected Item not {item.__class__!r}")
if len(self.children) > 5:
raise ValueError("Modal can only have a maximum of 5 items")
self.__weights.add_item(item)
self.children.append(item)
def remove_item(self, item: Item):
try:
self.children.remove(item)
except ValueError:
pass
else:
self.__weights.remove_item(item)
def to_components(self) -> List[Dict[str, Any]]:
def key(item: Item) -> int:
return item._rendered_row or 0
children = sorted(self.children, key=key)
components: List[Dict[str, Any]] = []
for _, group in groupby(children, key=key):
children = [item.to_component_dict() for item in group]
if not children:
continue
components.append(
{
"type": 1,
"components": children,
}
)
return components
async def callback(self, interaction: Interaction):
"""|coro|
The callback associated with this Modal.
This can be overriden by subclasses.
Parameters
-----------
interaction: :class:`.Interaction`
The interaction that submitted this Modal.
"""
pass
async def on_error(self, error: Exception, interaction: Interaction):
"""|coro|
The callback for when an error occurs in the :meth:`callback`.
The default implementation prints the traceback to stderr.
Parameters
-----------
error: :class:`Exception`
The error that occurred.
interaction: :class:`.Interaction`
The interaction that submitted this Modal.
"""
print(f"Ignoring exception in modal {self}:", file=sys.stderr)
traceback.print_exception(error.__class__, error, error.__traceback__, file=sys.stderr)
def to_dict(self) -> Dict[str, Any]:
return {
"title": self.title,
"custom_id": self.custom_id,
"components": self.to_components(),
}
class ModalStore:
def __init__(self, state: ConnectionState) -> None:
# (user_id, custom_id) : Modal
self._modals: Dict[Tuple[int, str], Modal] = {}
self._state = state
def add_modal(self, modal: Modal, user_id: int):
self._modals[(user_id, modal.custom_id)] = modal
def remove_modal(self, modal: Modal, user_id: int):
self._modals.pop((user_id, modal.custom_id))
async def _scheduled_task(self, modal: Modal, interaction: Interaction):
try:
await modal.callback(interaction)
except Exception as e:
await modal.on_error(e, interaction)
def dispatch(self, user_id: int, custom_id: str, interaction: Interaction):
key = (user_id, custom_id)
modal = self._modals.get(key)
if modal is None:
return
assert interaction.data is not None
components = [
component for action_row in interaction.data["components"] for component in action_row["components"]
]
for component in components:
component_custom_id = component["custom_id"]
for child in modal.children:
if child.custom_id == component_custom_id: # type: ignore
child.refresh_state(component)
break
asyncio.create_task(
self._scheduled_task(modal, interaction), name=f"discord-ui-modal-dispatch-{modal.custom_id}"
)
self.remove_modal(modal, user_id)
| 29.413793 | 112 | 0.603751 | from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import os
import asyncio
import sys
import traceback
from .item import Item
from itertools import groupby
from .view import _ViewWeights as _ModalWeights
from ..interactions import Interaction
if TYPE_CHECKING:
from ..state import ConnectionState
__all__ = ("Modal",)
class Modal:
def __init__(self, title: str, custom_id: Optional[str] = None) -> None:
self.title = title
self.custom_id = custom_id or os.urandom(16).hex()
self.children: List[Item] = []
self.__weights = _ModalWeights(self.children)
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.title=} {self.custom_id=}>"
def add_item(self, item: Item):
if not isinstance(item, Item):
raise TypeError(f"expected Item not {item.__class__!r}")
if len(self.children) > 5:
raise ValueError("Modal can only have a maximum of 5 items")
self.__weights.add_item(item)
self.children.append(item)
def remove_item(self, item: Item):
try:
self.children.remove(item)
except ValueError:
pass
else:
self.__weights.remove_item(item)
def to_components(self) -> List[Dict[str, Any]]:
def key(item: Item) -> int:
return item._rendered_row or 0
children = sorted(self.children, key=key)
components: List[Dict[str, Any]] = []
for _, group in groupby(children, key=key):
children = [item.to_component_dict() for item in group]
if not children:
continue
components.append(
{
"type": 1,
"components": children,
}
)
return components
async def callback(self, interaction: Interaction):
pass
async def on_error(self, error: Exception, interaction: Interaction):
print(f"Ignoring exception in modal {self}:", file=sys.stderr)
traceback.print_exception(error.__class__, error, error.__traceback__, file=sys.stderr)
def to_dict(self) -> Dict[str, Any]:
return {
"title": self.title,
"custom_id": self.custom_id,
"components": self.to_components(),
}
class ModalStore:
def __init__(self, state: ConnectionState) -> None:
self._modals: Dict[Tuple[int, str], Modal] = {}
self._state = state
def add_modal(self, modal: Modal, user_id: int):
self._modals[(user_id, modal.custom_id)] = modal
def remove_modal(self, modal: Modal, user_id: int):
self._modals.pop((user_id, modal.custom_id))
async def _scheduled_task(self, modal: Modal, interaction: Interaction):
try:
await modal.callback(interaction)
except Exception as e:
await modal.on_error(e, interaction)
def dispatch(self, user_id: int, custom_id: str, interaction: Interaction):
key = (user_id, custom_id)
modal = self._modals.get(key)
if modal is None:
return
assert interaction.data is not None
components = [
component for action_row in interaction.data["components"] for component in action_row["components"]
]
for component in components:
component_custom_id = component["custom_id"]
for child in modal.children:
if child.custom_id == component_custom_id: child.refresh_state(component)
break
asyncio.create_task(
self._scheduled_task(modal, interaction), name=f"discord-ui-modal-dispatch-{modal.custom_id}"
)
self.remove_modal(modal, user_id)
| true | true |
1c49bde9cb2690f493422ebf0eb42afb5f96147d | 654 | py | Python | src/directional_clustering/clustering/kmeans/__init__.py | arpastrana/directional_clustering | 78fd39fe4ad207b2a639deddf4ba12d5580df5c6 | [
"MIT"
] | 6 | 2020-08-04T15:24:22.000Z | 2022-02-02T21:34:33.000Z | src/directional_clustering/clustering/kmeans/__init__.py | arpastrana/directional_clustering | 78fd39fe4ad207b2a639deddf4ba12d5580df5c6 | [
"MIT"
] | 3 | 2020-10-30T02:33:08.000Z | 2020-11-04T19:45:08.000Z | src/directional_clustering/clustering/kmeans/__init__.py | arpastrana/directional_clustering | 78fd39fe4ad207b2a639deddf4ba12d5580df5c6 | [
"MIT"
] | null | null | null | """
directional_clustering.clustering.kmeans
****************************
.. currentmodule:: directional_clustering.clustering.kmeans
Classes
=======
.. autosummary::
:toctree: generated/
:nosignatures:
Functions
=========
.. autosummary::
:toctree: generated/
:nosignatures:
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# from .<module> import *
from .operations import *
from .distances import *
from ._kmeans import *
from .cosine import *
from .variational import *
from .euclidean import *
__all__ = [name for name in dir() if not name.startswith('_')]
| 16.769231 | 62 | 0.689602 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .operations import *
from .distances import *
from ._kmeans import *
from .cosine import *
from .variational import *
from .euclidean import *
__all__ = [name for name in dir() if not name.startswith('_')]
| true | true |
1c49be5ca86913a07ac3f5f1c7e0e1e3d9ac11be | 408 | py | Python | integration_tests/mass_simulator_ITG_test.py | hoondental/smtm | f7648da652c5437ee27efef6fbf2480045130c16 | [
"MIT"
] | 16 | 2020-02-21T08:18:04.000Z | 2022-03-29T06:34:29.000Z | integration_tests/mass_simulator_ITG_test.py | hoondental/smtm | f7648da652c5437ee27efef6fbf2480045130c16 | [
"MIT"
] | 31 | 2019-11-11T13:06:47.000Z | 2022-02-26T12:14:41.000Z | integration_tests/mass_simulator_ITG_test.py | msaltnet/smtm | b2b480a59204e7d730d60ec037b00660d9dd235d | [
"MIT"
] | 12 | 2020-07-03T06:44:22.000Z | 2022-03-30T03:03:05.000Z | import time
import unittest
from smtm import MassSimulator
from unittest.mock import *
class MassSimulatorIntegrationTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@patch("builtins.print")
def test_ITG_run_single_simulation(self, mock_print):
mass = MassSimulator()
mass.run("integration_tests/data/mass_simulation_config.json")
| 21.473684 | 70 | 0.720588 | import time
import unittest
from smtm import MassSimulator
from unittest.mock import *
class MassSimulatorIntegrationTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@patch("builtins.print")
def test_ITG_run_single_simulation(self, mock_print):
mass = MassSimulator()
mass.run("integration_tests/data/mass_simulation_config.json")
| true | true |
1c49bf29b541c47c8d66c29b20e2bf30bc1b27d6 | 765 | py | Python | tests/journal.ext/info_example.py | PyreFramework/pyre | 345c7449a3416eea1c1affa74fb32faff30a6aaa | [
"BSD-3-Clause"
] | null | null | null | tests/journal.ext/info_example.py | PyreFramework/pyre | 345c7449a3416eea1c1affa74fb32faff30a6aaa | [
"BSD-3-Clause"
] | null | null | null | tests/journal.ext/info_example.py | PyreFramework/pyre | 345c7449a3416eea1c1affa74fb32faff30a6aaa | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <[email protected]>
# (c) 1998-2022 all rights reserved
def test():
"""
Exercise the info channel with a realistic example
"""
# get the trash can
from journal.ext.journal import Trash as trash
# and the channel
from journal.ext.journal import Informational as info
# make an info channel
channel = info(name="tests.journal.info")
# send the output to trash
channel.device = trash()
# add some metadata
channel.notes["time"] = "now"
# inject
channel.line("info channel:")
channel.log(" hello world!")
# all done
return
# main
if __name__ == "__main__":
# run the test
test()
# end of file
| 19.615385 | 57 | 0.627451 |
def test():
from journal.ext.journal import Trash as trash
from journal.ext.journal import Informational as info
channel = info(name="tests.journal.info")
channel.device = trash()
channel.notes["time"] = "now"
channel.line("info channel:")
channel.log(" hello world!")
return
if __name__ == "__main__":
test()
| true | true |
1c49bfa2d1946ee3be217dd485422832d454b222 | 292 | py | Python | xicam/spectral/operations/clustering.py | Xi-CAM/Xi-cam.spectral | 62240c4992ba79a2f97db99ade988a4613566e98 | [
"BSD-3-Clause"
] | null | null | null | xicam/spectral/operations/clustering.py | Xi-CAM/Xi-cam.spectral | 62240c4992ba79a2f97db99ade988a4613566e98 | [
"BSD-3-Clause"
] | 10 | 2020-09-15T03:16:26.000Z | 2021-02-06T08:17:47.000Z | xicam/spectral/operations/clustering.py | Xi-CAM/Xi-cam.spectral | 62240c4992ba79a2f97db99ade988a4613566e98 | [
"BSD-3-Clause"
] | 1 | 2020-10-20T17:06:43.000Z | 2020-10-20T17:06:43.000Z | # k-means
## scikit-learn.cluster.kmeans
# hierarchical clustering analysis "EMSC" (Extended Multiplicative Scattering Correction)
## https://scikit-learn.org/stable/modules/clustering.html#hierarchical-clustering
## https://github.com/RPCausin/EMSC/blob/master/EMSC.py: (Bassan, Konevskikh)
| 48.666667 | 89 | 0.794521 | true | true |
|
1c49c0b805416ccceb49291147df00302441da20 | 10,379 | py | Python | test/mitmproxy/test_optmanager.py | dotnes/mitmproxy | 5eb17bbf6d47c8d703763bfa41cf1ff3f98a632f | [
"MIT"
] | 1 | 2017-12-27T09:05:23.000Z | 2017-12-27T09:05:23.000Z | test/mitmproxy/test_optmanager.py | dotnes/mitmproxy | 5eb17bbf6d47c8d703763bfa41cf1ff3f98a632f | [
"MIT"
] | null | null | null | test/mitmproxy/test_optmanager.py | dotnes/mitmproxy | 5eb17bbf6d47c8d703763bfa41cf1ff3f98a632f | [
"MIT"
] | 2 | 2018-09-03T19:26:31.000Z | 2019-04-08T23:05:15.000Z | import copy
import pytest
import typing
import argparse
from mitmproxy import options
from mitmproxy import optmanager
from mitmproxy import exceptions
class TO(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("one", typing.Optional[int], None, "help")
self.add_option("two", typing.Optional[int], 2, "help")
self.add_option("bool", bool, False, "help")
self.add_option("required_int", int, 2, "help")
class TD(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("one", str, "done", "help")
self.add_option("two", str, "dtwo", "help")
class TD2(TD):
def __init__(self):
super().__init__()
self.add_option("three", str, "dthree", "help")
self.add_option("four", str, "dfour", "help")
class TM(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("two", typing.Sequence[str], ["foo"], "help")
self.add_option("one", typing.Optional[str], None, "help")
def test_defaults():
o = TD2()
defaults = {
"one": "done",
"two": "dtwo",
"three": "dthree",
"four": "dfour",
}
for k, v in defaults.items():
assert o.default(k) == v
assert not o.has_changed("one")
newvals = dict(
one="xone",
two="xtwo",
three="xthree",
four="xfour",
)
o.update(**newvals)
assert o.has_changed("one")
for k, v in newvals.items():
assert v == getattr(o, k)
o.reset()
assert not o.has_changed("one")
for k in o.keys():
assert not o.has_changed(k)
def test_required_int():
o = TO()
with pytest.raises(exceptions.OptionsError):
o.parse_setval("required_int", None)
def test_deepcopy():
o = TD()
copy.deepcopy(o)
def test_options():
o = TO()
assert o.keys() == {"bool", "one", "two", "required_int"}
assert o.one is None
assert o.two == 2
o.one = 1
assert o.one == 1
with pytest.raises(TypeError):
TO(nonexistent = "value")
with pytest.raises(Exception, match="Unknown options"):
o.nonexistent = "value"
with pytest.raises(Exception, match="Unknown options"):
o.update(nonexistent = "value")
assert o.update_known(nonexistent = "value") == {"nonexistent": "value"}
rec = []
def sub(opts, updated):
rec.append(copy.copy(opts))
o.changed.connect(sub)
o.one = 90
assert len(rec) == 1
assert rec[-1].one == 90
o.update(one=3)
assert len(rec) == 2
assert rec[-1].one == 3
def test_setter():
o = TO()
f = o.setter("two")
f(99)
assert o.two == 99
with pytest.raises(Exception, match="No such option"):
o.setter("nonexistent")
def test_toggler():
o = TO()
f = o.toggler("bool")
assert o.bool is False
f()
assert o.bool is True
f()
assert o.bool is False
with pytest.raises(Exception, match="No such option"):
o.toggler("nonexistent")
with pytest.raises(Exception, match="boolean options"):
o.toggler("one")
class Rec():
def __init__(self):
self.called = None
def __call__(self, *args, **kwargs):
self.called = (args, kwargs)
def test_subscribe():
o = TO()
r = Rec()
# pytest.raises keeps a reference here that interferes with the cleanup test
# further down.
try:
o.subscribe(r, ["unknown"])
except exceptions.OptionsError:
pass
else:
raise AssertionError
assert len(o.changed.receivers) == 0
o.subscribe(r, ["two"])
o.one = 2
assert not r.called
o.two = 3
assert r.called
assert len(o.changed.receivers) == 1
del r
o.two = 4
assert len(o.changed.receivers) == 0
class binder:
def __init__(self):
self.o = TO()
self.called = False
self.o.subscribe(self.bound, ["two"])
def bound(self, *args, **kwargs):
self.called = True
t = binder()
t.o.one = 3
assert not t.called
t.o.two = 3
assert t.called
def test_rollback():
o = TO()
rec = []
def sub(opts, updated):
rec.append(copy.copy(opts))
recerr = []
def errsub(opts, **kwargs):
recerr.append(kwargs)
def err(opts, updated):
if opts.one == 10:
raise exceptions.OptionsError()
if opts.bool is True:
raise exceptions.OptionsError()
o.changed.connect(sub)
o.changed.connect(err)
o.errored.connect(errsub)
assert o.one is None
with pytest.raises(exceptions.OptionsError):
o.one = 10
assert o.one is None
with pytest.raises(exceptions.OptionsError):
o.bool = True
assert o.bool is False
assert isinstance(recerr[0]["exc"], exceptions.OptionsError)
assert o.one is None
assert o.bool is False
assert len(rec) == 4
assert rec[0].one == 10
assert rec[1].one is None
assert rec[2].bool is True
assert rec[3].bool is False
with pytest.raises(exceptions.OptionsError):
with o.rollback({"one"}, reraise=True):
raise exceptions.OptionsError()
def test_simple():
assert repr(TO())
assert "one" in TO()
def test_items():
assert TO().items()
def test_serialize():
o = TD2()
o.three = "set"
assert "dfour" in optmanager.serialize(o, None, defaults=True)
data = optmanager.serialize(o, None)
assert "dfour" not in data
o2 = TD2()
optmanager.load(o2, data)
assert o2 == o
assert not o == 42
t = """
unknown: foo
"""
data = optmanager.serialize(o, t)
o2 = TD2()
optmanager.load(o2, data)
assert o2 == o
t = "invalid: foo\ninvalid"
with pytest.raises(Exception, match="Config error"):
optmanager.load(o2, t)
t = "invalid"
with pytest.raises(Exception, match="Config error"):
optmanager.load(o2, t)
t = "# a comment"
optmanager.load(o2, t)
assert optmanager.load(o2, "foobar: '123'") == {"foobar": "123"}
t = ""
optmanager.load(o2, t)
assert optmanager.load(o2, "foobar: '123'") == {"foobar": "123"}
def test_serialize_defaults():
o = options.Options()
assert optmanager.serialize(o, None, defaults=True)
def test_saving(tmpdir):
o = TD2()
o.three = "set"
dst = str(tmpdir.join("conf"))
optmanager.save(o, dst, defaults=True)
o2 = TD2()
optmanager.load_paths(o2, dst)
o2.three = "foo"
optmanager.save(o2, dst, defaults=True)
optmanager.load_paths(o, dst)
assert o.three == "foo"
with open(dst, 'a') as f:
f.write("foobar: '123'")
assert optmanager.load_paths(o, dst) == {"foobar": "123"}
with open(dst, 'a') as f:
f.write("'''")
with pytest.raises(exceptions.OptionsError):
optmanager.load_paths(o, dst)
with open(dst, 'wb') as f:
f.write(b"\x01\x02\x03")
with pytest.raises(exceptions.OptionsError):
optmanager.load_paths(o, dst)
with pytest.raises(exceptions.OptionsError):
optmanager.save(o, dst)
with open(dst, 'wb') as f:
f.write(b"\xff\xff\xff")
with pytest.raises(exceptions.OptionsError):
optmanager.load_paths(o, dst)
with pytest.raises(exceptions.OptionsError):
optmanager.save(o, dst)
def test_merge():
m = TM()
m.merge(dict(one="two"))
assert m.one == "two"
m.merge(dict(one=None))
assert m.one == "two"
m.merge(dict(two=["bar"]))
assert m.two == ["foo", "bar"]
def test_option():
o = optmanager._Option("test", int, 1, "help", None)
assert o.current() == 1
with pytest.raises(TypeError):
o.set("foo")
with pytest.raises(TypeError):
optmanager._Option("test", str, 1, "help", None)
o2 = optmanager._Option("test", int, 1, "help", None)
assert o2 == o
o2.set(5)
assert o2 != o
def test_dump_defaults():
o = options.Options()
assert optmanager.dump_defaults(o)
def test_dump_dicts():
o = options.Options()
assert optmanager.dump_dicts(o)
assert optmanager.dump_dicts(o, ['http2', 'listen_port'])
class TTypes(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("str", str, "str", "help")
self.add_option("optstr", typing.Optional[str], "optstr", "help", "help")
self.add_option("bool", bool, False, "help")
self.add_option("bool_on", bool, True, "help")
self.add_option("int", int, 0, "help")
self.add_option("optint", typing.Optional[int], 0, "help")
self.add_option("seqstr", typing.Sequence[str], [], "help")
self.add_option("unknown", float, 0.0, "help")
def test_make_parser():
parser = argparse.ArgumentParser()
opts = TTypes()
opts.make_parser(parser, "str", short="a")
opts.make_parser(parser, "bool", short="b")
opts.make_parser(parser, "int", short="c")
opts.make_parser(parser, "seqstr", short="d")
opts.make_parser(parser, "bool_on", short="e")
with pytest.raises(ValueError):
opts.make_parser(parser, "unknown")
# Nonexistent options ignore
opts.make_parser(parser, "nonexistentxxx")
def test_set():
opts = TTypes()
opts.set("str=foo")
assert opts.str == "foo"
with pytest.raises(TypeError):
opts.set("str")
opts.set("optstr=foo")
assert opts.optstr == "foo"
opts.set("optstr")
assert opts.optstr is None
opts.set("bool=false")
assert opts.bool is False
opts.set("bool")
assert opts.bool is True
opts.set("bool=true")
assert opts.bool is True
with pytest.raises(exceptions.OptionsError):
opts.set("bool=wobble")
opts.set("bool=toggle")
assert opts.bool is False
opts.set("bool=toggle")
assert opts.bool is True
opts.set("int=1")
assert opts.int == 1
with pytest.raises(exceptions.OptionsError):
opts.set("int=wobble")
opts.set("optint")
assert opts.optint is None
assert opts.seqstr == []
opts.set("seqstr=foo")
assert opts.seqstr == ["foo"]
opts.set("seqstr=bar")
assert opts.seqstr == ["foo", "bar"]
opts.set("seqstr")
assert opts.seqstr == []
with pytest.raises(exceptions.OptionsError):
opts.set("nonexistent=wobble")
| 24.137209 | 81 | 0.598902 | import copy
import pytest
import typing
import argparse
from mitmproxy import options
from mitmproxy import optmanager
from mitmproxy import exceptions
class TO(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("one", typing.Optional[int], None, "help")
self.add_option("two", typing.Optional[int], 2, "help")
self.add_option("bool", bool, False, "help")
self.add_option("required_int", int, 2, "help")
class TD(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("one", str, "done", "help")
self.add_option("two", str, "dtwo", "help")
class TD2(TD):
def __init__(self):
super().__init__()
self.add_option("three", str, "dthree", "help")
self.add_option("four", str, "dfour", "help")
class TM(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("two", typing.Sequence[str], ["foo"], "help")
self.add_option("one", typing.Optional[str], None, "help")
def test_defaults():
o = TD2()
defaults = {
"one": "done",
"two": "dtwo",
"three": "dthree",
"four": "dfour",
}
for k, v in defaults.items():
assert o.default(k) == v
assert not o.has_changed("one")
newvals = dict(
one="xone",
two="xtwo",
three="xthree",
four="xfour",
)
o.update(**newvals)
assert o.has_changed("one")
for k, v in newvals.items():
assert v == getattr(o, k)
o.reset()
assert not o.has_changed("one")
for k in o.keys():
assert not o.has_changed(k)
def test_required_int():
o = TO()
with pytest.raises(exceptions.OptionsError):
o.parse_setval("required_int", None)
def test_deepcopy():
o = TD()
copy.deepcopy(o)
def test_options():
o = TO()
assert o.keys() == {"bool", "one", "two", "required_int"}
assert o.one is None
assert o.two == 2
o.one = 1
assert o.one == 1
with pytest.raises(TypeError):
TO(nonexistent = "value")
with pytest.raises(Exception, match="Unknown options"):
o.nonexistent = "value"
with pytest.raises(Exception, match="Unknown options"):
o.update(nonexistent = "value")
assert o.update_known(nonexistent = "value") == {"nonexistent": "value"}
rec = []
def sub(opts, updated):
rec.append(copy.copy(opts))
o.changed.connect(sub)
o.one = 90
assert len(rec) == 1
assert rec[-1].one == 90
o.update(one=3)
assert len(rec) == 2
assert rec[-1].one == 3
def test_setter():
o = TO()
f = o.setter("two")
f(99)
assert o.two == 99
with pytest.raises(Exception, match="No such option"):
o.setter("nonexistent")
def test_toggler():
o = TO()
f = o.toggler("bool")
assert o.bool is False
f()
assert o.bool is True
f()
assert o.bool is False
with pytest.raises(Exception, match="No such option"):
o.toggler("nonexistent")
with pytest.raises(Exception, match="boolean options"):
o.toggler("one")
class Rec():
def __init__(self):
self.called = None
def __call__(self, *args, **kwargs):
self.called = (args, kwargs)
def test_subscribe():
o = TO()
r = Rec()
try:
o.subscribe(r, ["unknown"])
except exceptions.OptionsError:
pass
else:
raise AssertionError
assert len(o.changed.receivers) == 0
o.subscribe(r, ["two"])
o.one = 2
assert not r.called
o.two = 3
assert r.called
assert len(o.changed.receivers) == 1
del r
o.two = 4
assert len(o.changed.receivers) == 0
class binder:
def __init__(self):
self.o = TO()
self.called = False
self.o.subscribe(self.bound, ["two"])
def bound(self, *args, **kwargs):
self.called = True
t = binder()
t.o.one = 3
assert not t.called
t.o.two = 3
assert t.called
def test_rollback():
o = TO()
rec = []
def sub(opts, updated):
rec.append(copy.copy(opts))
recerr = []
def errsub(opts, **kwargs):
recerr.append(kwargs)
def err(opts, updated):
if opts.one == 10:
raise exceptions.OptionsError()
if opts.bool is True:
raise exceptions.OptionsError()
o.changed.connect(sub)
o.changed.connect(err)
o.errored.connect(errsub)
assert o.one is None
with pytest.raises(exceptions.OptionsError):
o.one = 10
assert o.one is None
with pytest.raises(exceptions.OptionsError):
o.bool = True
assert o.bool is False
assert isinstance(recerr[0]["exc"], exceptions.OptionsError)
assert o.one is None
assert o.bool is False
assert len(rec) == 4
assert rec[0].one == 10
assert rec[1].one is None
assert rec[2].bool is True
assert rec[3].bool is False
with pytest.raises(exceptions.OptionsError):
with o.rollback({"one"}, reraise=True):
raise exceptions.OptionsError()
def test_simple():
assert repr(TO())
assert "one" in TO()
def test_items():
assert TO().items()
def test_serialize():
o = TD2()
o.three = "set"
assert "dfour" in optmanager.serialize(o, None, defaults=True)
data = optmanager.serialize(o, None)
assert "dfour" not in data
o2 = TD2()
optmanager.load(o2, data)
assert o2 == o
assert not o == 42
t = """
unknown: foo
"""
data = optmanager.serialize(o, t)
o2 = TD2()
optmanager.load(o2, data)
assert o2 == o
t = "invalid: foo\ninvalid"
with pytest.raises(Exception, match="Config error"):
optmanager.load(o2, t)
t = "invalid"
with pytest.raises(Exception, match="Config error"):
optmanager.load(o2, t)
t = "# a comment"
optmanager.load(o2, t)
assert optmanager.load(o2, "foobar: '123'") == {"foobar": "123"}
t = ""
optmanager.load(o2, t)
assert optmanager.load(o2, "foobar: '123'") == {"foobar": "123"}
def test_serialize_defaults():
o = options.Options()
assert optmanager.serialize(o, None, defaults=True)
def test_saving(tmpdir):
o = TD2()
o.three = "set"
dst = str(tmpdir.join("conf"))
optmanager.save(o, dst, defaults=True)
o2 = TD2()
optmanager.load_paths(o2, dst)
o2.three = "foo"
optmanager.save(o2, dst, defaults=True)
optmanager.load_paths(o, dst)
assert o.three == "foo"
with open(dst, 'a') as f:
f.write("foobar: '123'")
assert optmanager.load_paths(o, dst) == {"foobar": "123"}
with open(dst, 'a') as f:
f.write("'''")
with pytest.raises(exceptions.OptionsError):
optmanager.load_paths(o, dst)
with open(dst, 'wb') as f:
f.write(b"\x01\x02\x03")
with pytest.raises(exceptions.OptionsError):
optmanager.load_paths(o, dst)
with pytest.raises(exceptions.OptionsError):
optmanager.save(o, dst)
with open(dst, 'wb') as f:
f.write(b"\xff\xff\xff")
with pytest.raises(exceptions.OptionsError):
optmanager.load_paths(o, dst)
with pytest.raises(exceptions.OptionsError):
optmanager.save(o, dst)
def test_merge():
m = TM()
m.merge(dict(one="two"))
assert m.one == "two"
m.merge(dict(one=None))
assert m.one == "two"
m.merge(dict(two=["bar"]))
assert m.two == ["foo", "bar"]
def test_option():
o = optmanager._Option("test", int, 1, "help", None)
assert o.current() == 1
with pytest.raises(TypeError):
o.set("foo")
with pytest.raises(TypeError):
optmanager._Option("test", str, 1, "help", None)
o2 = optmanager._Option("test", int, 1, "help", None)
assert o2 == o
o2.set(5)
assert o2 != o
def test_dump_defaults():
o = options.Options()
assert optmanager.dump_defaults(o)
def test_dump_dicts():
o = options.Options()
assert optmanager.dump_dicts(o)
assert optmanager.dump_dicts(o, ['http2', 'listen_port'])
class TTypes(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("str", str, "str", "help")
self.add_option("optstr", typing.Optional[str], "optstr", "help", "help")
self.add_option("bool", bool, False, "help")
self.add_option("bool_on", bool, True, "help")
self.add_option("int", int, 0, "help")
self.add_option("optint", typing.Optional[int], 0, "help")
self.add_option("seqstr", typing.Sequence[str], [], "help")
self.add_option("unknown", float, 0.0, "help")
def test_make_parser():
parser = argparse.ArgumentParser()
opts = TTypes()
opts.make_parser(parser, "str", short="a")
opts.make_parser(parser, "bool", short="b")
opts.make_parser(parser, "int", short="c")
opts.make_parser(parser, "seqstr", short="d")
opts.make_parser(parser, "bool_on", short="e")
with pytest.raises(ValueError):
opts.make_parser(parser, "unknown")
# Nonexistent options ignore
opts.make_parser(parser, "nonexistentxxx")
def test_set():
opts = TTypes()
opts.set("str=foo")
assert opts.str == "foo"
with pytest.raises(TypeError):
opts.set("str")
opts.set("optstr=foo")
assert opts.optstr == "foo"
opts.set("optstr")
assert opts.optstr is None
opts.set("bool=false")
assert opts.bool is False
opts.set("bool")
assert opts.bool is True
opts.set("bool=true")
assert opts.bool is True
with pytest.raises(exceptions.OptionsError):
opts.set("bool=wobble")
opts.set("bool=toggle")
assert opts.bool is False
opts.set("bool=toggle")
assert opts.bool is True
opts.set("int=1")
assert opts.int == 1
with pytest.raises(exceptions.OptionsError):
opts.set("int=wobble")
opts.set("optint")
assert opts.optint is None
assert opts.seqstr == []
opts.set("seqstr=foo")
assert opts.seqstr == ["foo"]
opts.set("seqstr=bar")
assert opts.seqstr == ["foo", "bar"]
opts.set("seqstr")
assert opts.seqstr == []
with pytest.raises(exceptions.OptionsError):
opts.set("nonexistent=wobble")
| true | true |
1c49c25e5e36e3834b02920900d27d8725a5e1cf | 1,182 | py | Python | pets/forms/pets.py | IvanParvanovski/petstagram-repository | 03f1464d4f5919712446f812fad044056f9a15f6 | [
"MIT"
] | 2 | 2021-06-10T08:18:00.000Z | 2021-06-12T19:10:49.000Z | pets/forms/pets.py | IvanParvanovski/petstagram-repository | 03f1464d4f5919712446f812fad044056f9a15f6 | [
"MIT"
] | null | null | null | pets/forms/pets.py | IvanParvanovski/petstagram-repository | 03f1464d4f5919712446f812fad044056f9a15f6 | [
"MIT"
] | null | null | null | from django import forms
from pets.models.pet import Pet
class PetCreateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for (_, field) in self.fields.items():
field.widget.attrs['class'] = 'form-control'
class Meta:
model = Pet
exclude = ('user', )
# widgets = {
# 'type': forms.Select(
# attrs={
# 'class': 'form-control',
# },
# ),
#
# 'name': forms.TextInput(
# attrs={
# 'class': 'form-control',
# },
# ),
#
# 'age': forms.NumberInput(
# attrs={
# 'class': 'form-control'
# },
# ),
#
# 'image_url': forms.TextInput(
# attrs={
# 'class': 'form-control'
# },
# ),
#
# 'description': forms.Textarea(
# attrs={
# 'class': 'form-control'
# },
# ),
# }
| 25.148936 | 56 | 0.358714 | from django import forms
from pets.models.pet import Pet
class PetCreateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for (_, field) in self.fields.items():
field.widget.attrs['class'] = 'form-control'
class Meta:
model = Pet
exclude = ('user', )
| true | true |
1c49c279cce146a4c48b5adc7ff7367616606ace | 2,101 | py | Python | wagtail_commons/core/management/commands/bootstrap_users.py | bgrace/wagtail-commons | 37985629e3098842c08f6ae7072c2af8a69319f0 | [
"BSD-3-Clause"
] | 13 | 2015-03-13T06:44:47.000Z | 2021-08-01T02:36:36.000Z | wagtail_commons/core/management/commands/bootstrap_users.py | bgrace/wagtail-commons | 37985629e3098842c08f6ae7072c2af8a69319f0 | [
"BSD-3-Clause"
] | null | null | null | wagtail_commons/core/management/commands/bootstrap_users.py | bgrace/wagtail-commons | 37985629e3098842c08f6ae7072c2af8a69319f0 | [
"BSD-3-Clause"
] | 1 | 2016-02-07T20:54:40.000Z | 2016-02-07T20:54:40.000Z | from django.conf import settings
__author__ = '[email protected]'
import codecs
import os
from optparse import make_option
import yaml
import yaml.parser
from django.contrib.auth.models import User
from django.db.utils import IntegrityError
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
args = '<content directory>'
help = 'Create users, found in <content directory>/users.yml'
option_list = BaseCommand.option_list + (
make_option('--content', dest='content_path', type='string', ),
)
def handle(self, *args, **options):
if options['content_path']:
path = options['content_path']
elif settings.BOOTSTRAP_CONTENT_DIR:
path = settings.BOOTSTRAP_CONTENT_DIR
else:
raise CommandError("Pass --content <content dir>, where <content dir>/pages contain .yml files")
if not os.path.isdir(path):
raise CommandError("Content dir '{0}' does not exist or is not a directory".format(path))
content_path = os.path.join(path, 'users.yml')
if not os.path.isfile(content_path):
raise CommandError("Could not find file '{0}'".format(content_path))
f = codecs.open(content_path, encoding='utf-8')
stream = yaml.load_all(f)
users = next(stream)
f.close()
for user in users:
try:
u = User.objects.create(username=user['username'],
email=user['email'],
first_name=user['first_name'],
last_name=user['last_name'],
is_superuser=user['is_superuser'],
is_staff=user['is_staff'])
u.set_password(user['password'])
u.save()
self.stdout.write("Created {0}".format(user['username']))
except IntegrityError:
self.stderr.write("Could not create {0}, already exists?".format(user['username'])) | 36.859649 | 108 | 0.585436 | from django.conf import settings
__author__ = '[email protected]'
import codecs
import os
from optparse import make_option
import yaml
import yaml.parser
from django.contrib.auth.models import User
from django.db.utils import IntegrityError
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
args = '<content directory>'
help = 'Create users, found in <content directory>/users.yml'
option_list = BaseCommand.option_list + (
make_option('--content', dest='content_path', type='string', ),
)
def handle(self, *args, **options):
if options['content_path']:
path = options['content_path']
elif settings.BOOTSTRAP_CONTENT_DIR:
path = settings.BOOTSTRAP_CONTENT_DIR
else:
raise CommandError("Pass --content <content dir>, where <content dir>/pages contain .yml files")
if not os.path.isdir(path):
raise CommandError("Content dir '{0}' does not exist or is not a directory".format(path))
content_path = os.path.join(path, 'users.yml')
if not os.path.isfile(content_path):
raise CommandError("Could not find file '{0}'".format(content_path))
f = codecs.open(content_path, encoding='utf-8')
stream = yaml.load_all(f)
users = next(stream)
f.close()
for user in users:
try:
u = User.objects.create(username=user['username'],
email=user['email'],
first_name=user['first_name'],
last_name=user['last_name'],
is_superuser=user['is_superuser'],
is_staff=user['is_staff'])
u.set_password(user['password'])
u.save()
self.stdout.write("Created {0}".format(user['username']))
except IntegrityError:
self.stderr.write("Could not create {0}, already exists?".format(user['username'])) | true | true |
1c49c4ae101e4f898905a971de616b3252a2ebed | 4,978 | py | Python | oauth2.py | mats-ch/ctfd-oauth | 59d9c6bd22c69f12d909329cc94293270fb09ba6 | [
"MIT"
] | null | null | null | oauth2.py | mats-ch/ctfd-oauth | 59d9c6bd22c69f12d909329cc94293270fb09ba6 | [
"MIT"
] | null | null | null | oauth2.py | mats-ch/ctfd-oauth | 59d9c6bd22c69f12d909329cc94293270fb09ba6 | [
"MIT"
] | null | null | null | from flask import render_template, session, redirect
from flask_dance.contrib import azure, github
import flask_dance.contrib
import os
from CTFd.auth import confirm, register, reset_password, login
from CTFd.models import db, Users
from CTFd.utils import set_config
from CTFd.utils.logging import log
from CTFd.utils.security.auth import login_user, logout_user
from CTFd import utils
import boto3
import base64
from botocore.exceptions import ClientError
import json
def get_secret():
secret_name = "ctf_azure_sso"
region_name = "eu-west-1"
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name
)
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
return json.loads(secret)
else:
decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary'])
return json.loads(decoded_binary_secret)
def load(app):
########################
# Plugin Configuration #
########################
aws_secret = get_secret()
authentication_url_prefix = "/auth"
oauth_client_id = aws_secret['OAUTHLOGIN_CLIENT_ID']
oauth_client_secret = aws_secret['OAUTHLOGIN_CLIENT_SECRET']
oauth_provider = "azure"
create_missing_user = True
##################
# User Functions #
##################
def retrieve_user_from_database(username):
user = Users.query.filter_by(email=username).first()
if user is not None:
log('logins', "[{date}] {ip} - " + user.name + " - OAuth2 bridged user found")
return user
def create_user(username, displayName):
with app.app_context():
user = Users(email=username, name=displayName.strip())
log('logins', "[{date}] {ip} - " + user.name + " - No OAuth2 bridged user found, creating user")
db.session.add(user)
db.session.commit()
db.session.flush()
login_user(user)
return user
def create_or_get_user(username, displayName):
user = retrieve_user_from_database(username)
if user is not None:
login_user(user)
return user
if create_missing_user:
return create_user(username, displayName)
else:
log('logins', "[{date}] {ip} - " + user.name + " - No OAuth2 bridged user found and not configured to create missing users")
return None
##########################
# Provider Configuration #
##########################
provider_blueprints = {
'azure': lambda: flask_dance.contrib.azure.make_azure_blueprint(
login_url='/azure',
client_id=oauth_client_id,
client_secret=oauth_client_secret,
redirect_url=authentication_url_prefix + "/azure/confirm"),
'github': lambda: flask_dance.contrib.github.make_github_blueprint(
login_url='/github',
client_id=oauth_client_id,
client_secret=oauth_client_secret,
redirect_url=authentication_url_prefix + "/github/confirm")
}
def get_azure_user():
user_info = flask_dance.contrib.azure.azure.get("/v1.0/me").json()
return create_or_get_user(
username=user_info["userPrincipalName"],
displayName=user_info["displayName"])
def get_github_user():
user_info = flask_dance.contrib.github.github.get("/user").json()
return create_or_get_user(
username=user_info["email"],
displayName=user_info["name"])
provider_users = {
'azure': lambda: get_azure_user(),
'github': lambda: get_github_user()
}
provider_blueprint = provider_blueprints[oauth_provider]() # Resolved lambda
#######################
# Blueprint Functions #
#######################
@provider_blueprint.route('/<string:auth_provider>/confirm', methods=['GET'])
def confirm_auth_provider(auth_provider):
if not auth_provider in provider_users:
return redirect('/')
provider_user = provider_users[oauth_provider]() # Resolved lambda
session.regenerate()
return redirect('/')
app.register_blueprint(provider_blueprint, url_prefix=authentication_url_prefix)
print(app.register_blueprint)
###############################
# Application Reconfiguration #
###############################
# ('', 204) is "No Content" code
set_config('registration_visibility', False)
app.view_functions['auth.login'] = lambda: redirect(authentication_url_prefix + "/" + oauth_provider)
app.view_functions['auth.register'] = lambda: ('', 204)
app.view_functions['auth.reset_password'] = lambda: ('', 204)
app.view_functions['auth.confirm'] = lambda: ('', 204)
| 35.557143 | 136 | 0.631579 | from flask import render_template, session, redirect
from flask_dance.contrib import azure, github
import flask_dance.contrib
import os
from CTFd.auth import confirm, register, reset_password, login
from CTFd.models import db, Users
from CTFd.utils import set_config
from CTFd.utils.logging import log
from CTFd.utils.security.auth import login_user, logout_user
from CTFd import utils
import boto3
import base64
from botocore.exceptions import ClientError
import json
def get_secret():
secret_name = "ctf_azure_sso"
region_name = "eu-west-1"
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name
)
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
return json.loads(secret)
else:
decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary'])
return json.loads(decoded_binary_secret)
def load(app):
aws_secret = get_secret()
authentication_url_prefix = "/auth"
oauth_client_id = aws_secret['OAUTHLOGIN_CLIENT_ID']
oauth_client_secret = aws_secret['OAUTHLOGIN_CLIENT_SECRET']
oauth_provider = "azure"
create_missing_user = True
def retrieve_user_from_database(username):
user = Users.query.filter_by(email=username).first()
if user is not None:
log('logins', "[{date}] {ip} - " + user.name + " - OAuth2 bridged user found")
return user
def create_user(username, displayName):
with app.app_context():
user = Users(email=username, name=displayName.strip())
log('logins', "[{date}] {ip} - " + user.name + " - No OAuth2 bridged user found, creating user")
db.session.add(user)
db.session.commit()
db.session.flush()
login_user(user)
return user
def create_or_get_user(username, displayName):
user = retrieve_user_from_database(username)
if user is not None:
login_user(user)
return user
if create_missing_user:
return create_user(username, displayName)
else:
log('logins', "[{date}] {ip} - " + user.name + " - No OAuth2 bridged user found and not configured to create missing users")
return None
provider_blueprints = {
'azure': lambda: flask_dance.contrib.azure.make_azure_blueprint(
login_url='/azure',
client_id=oauth_client_id,
client_secret=oauth_client_secret,
redirect_url=authentication_url_prefix + "/azure/confirm"),
'github': lambda: flask_dance.contrib.github.make_github_blueprint(
login_url='/github',
client_id=oauth_client_id,
client_secret=oauth_client_secret,
redirect_url=authentication_url_prefix + "/github/confirm")
}
def get_azure_user():
user_info = flask_dance.contrib.azure.azure.get("/v1.0/me").json()
return create_or_get_user(
username=user_info["userPrincipalName"],
displayName=user_info["displayName"])
def get_github_user():
user_info = flask_dance.contrib.github.github.get("/user").json()
return create_or_get_user(
username=user_info["email"],
displayName=user_info["name"])
provider_users = {
'azure': lambda: get_azure_user(),
'github': lambda: get_github_user()
}
provider_blueprint = provider_blueprints[oauth_provider]()
@provider_blueprint.route('/<string:auth_provider>/confirm', methods=['GET'])
def confirm_auth_provider(auth_provider):
if not auth_provider in provider_users:
return redirect('/')
provider_user = provider_users[oauth_provider]() session.regenerate()
return redirect('/')
app.register_blueprint(provider_blueprint, url_prefix=authentication_url_prefix)
print(app.register_blueprint)
set_config('registration_visibility', False)
app.view_functions['auth.login'] = lambda: redirect(authentication_url_prefix + "/" + oauth_provider)
app.view_functions['auth.register'] = lambda: ('', 204)
app.view_functions['auth.reset_password'] = lambda: ('', 204)
app.view_functions['auth.confirm'] = lambda: ('', 204)
| true | true |
1c49c504b6dafe6dbc71503afe31a87262f71392 | 950 | py | Python | noggin/middleware.py | mscherer/noggin | 0e3be29de02a1ba7aaf247493c5adf7d08e5f64b | [
"MIT"
] | null | null | null | noggin/middleware.py | mscherer/noggin | 0e3be29de02a1ba7aaf247493c5adf7d08e5f64b | [
"MIT"
] | null | null | null | noggin/middleware.py | mscherer/noggin | 0e3be29de02a1ba7aaf247493c5adf7d08e5f64b | [
"MIT"
] | null | null | null | import python_freeipa
from flask import make_response, render_template
class IPAErrorHandler:
def __init__(self, app, error_template):
self.app = app
self.template = error_template
self.init_app()
def init_app(self):
self.app.wsgi_app = IPAWSGIMiddleware(
self.app.wsgi_app, self.get_error_response
)
def get_error_response(self, error):
self.app.logger.error(f"Uncaught IPA exception: {error}")
return make_response(render_template(self.template, error=error), 500)
class IPAWSGIMiddleware:
def __init__(self, wsgi_app, error_factory):
self.wsgi_app = wsgi_app
self.error_factory = error_factory
def __call__(self, environ, start_response):
try:
return self.wsgi_app(environ, start_response)
except python_freeipa.exceptions.FreeIPAError as e:
return self.error_factory(e)(environ, start_response)
| 30.645161 | 78 | 0.690526 | import python_freeipa
from flask import make_response, render_template
class IPAErrorHandler:
def __init__(self, app, error_template):
self.app = app
self.template = error_template
self.init_app()
def init_app(self):
self.app.wsgi_app = IPAWSGIMiddleware(
self.app.wsgi_app, self.get_error_response
)
def get_error_response(self, error):
self.app.logger.error(f"Uncaught IPA exception: {error}")
return make_response(render_template(self.template, error=error), 500)
class IPAWSGIMiddleware:
def __init__(self, wsgi_app, error_factory):
self.wsgi_app = wsgi_app
self.error_factory = error_factory
def __call__(self, environ, start_response):
try:
return self.wsgi_app(environ, start_response)
except python_freeipa.exceptions.FreeIPAError as e:
return self.error_factory(e)(environ, start_response)
| true | true |
1c49c50fb5c3d93475f03b91a1ed2d767b508c0a | 158 | py | Python | Hello_World/main.py | sostrowski/python | f01ac6f7ca491e10209ce7e3c37647e08d8f90af | [
"MIT"
] | null | null | null | Hello_World/main.py | sostrowski/python | f01ac6f7ca491e10209ce7e3c37647e08d8f90af | [
"MIT"
] | null | null | null | Hello_World/main.py | sostrowski/python | f01ac6f7ca491e10209ce7e3c37647e08d8f90af | [
"MIT"
] | null | null | null | # This program says hello and asks for my name.
print('Hello world!')
print('What is your name?')
myName = input()
print('It is good to meet you, ' + myName)
| 26.333333 | 47 | 0.689873 | print('Hello world!')
print('What is your name?')
myName = input()
print('It is good to meet you, ' + myName)
| true | true |
1c49c5326968324058812cb3a486dd5da42909e9 | 4,645 | py | Python | qa/rpc-tests/test_framework.py | hkaase/TestCoin | 73c647a99e933085ecc04c1d51491eeb44a922a4 | [
"MIT"
] | null | null | null | qa/rpc-tests/test_framework.py | hkaase/TestCoin | 73c647a99e933085ecc04c1d51491eeb44a922a4 | [
"MIT"
] | null | null | null | qa/rpc-tests/test_framework.py | hkaase/TestCoin | 73c647a99e933085ecc04c1d51491eeb44a922a4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# Copyright (c) 2014 The Testcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Base class for RPC testing
# Add python-Testcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-Testcoinrpc"))
import shutil
import tempfile
import traceback
from Testcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
class TestcoinTestFramework(object):
# These may be over-ridden by subclasses:
def run_test(self):
for node in self.nodes:
assert_equal(node.getblockcount(), 200)
assert_equal(node.getbalance(), 25*50)
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain(self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = start_nodes(4, self.options.tmpdir)
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
assert not self.is_network_split
stop_nodes(self.nodes)
wait_Testcoinds()
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
"""
Join the (previously split) network halves together.
"""
assert self.is_network_split
stop_nodes(self.nodes)
wait_Testcoinds()
self.setup_network(False)
def main(self):
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave Testcoinds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing Testcoind/Testcoin-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
if self.options.trace_rpc:
import logging
logging.basicConfig(level=logging.DEBUG)
os.environ['PATH'] = self.options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
try:
if not os.path.isdir(self.options.tmpdir):
os.makedirs(self.options.tmpdir)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: "+e.message)
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not self.options.nocleanup:
print("Cleaning up")
stop_nodes(self.nodes)
wait_Testcoinds()
shutil.rmtree(self.options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
| 33.178571 | 105 | 0.613563 |
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-Testcoinrpc"))
import shutil
import tempfile
import traceback
from Testcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
class TestcoinTestFramework(object):
def run_test(self):
for node in self.nodes:
assert_equal(node.getblockcount(), 200)
assert_equal(node.getbalance(), 25*50)
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain(self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = start_nodes(4, self.options.tmpdir)
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
assert not self.is_network_split
stop_nodes(self.nodes)
wait_Testcoinds()
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
assert self.is_network_split
stop_nodes(self.nodes)
wait_Testcoinds()
self.setup_network(False)
def main(self):
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave Testcoinds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing Testcoind/Testcoin-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
if self.options.trace_rpc:
import logging
logging.basicConfig(level=logging.DEBUG)
os.environ['PATH'] = self.options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
try:
if not os.path.isdir(self.options.tmpdir):
os.makedirs(self.options.tmpdir)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: "+e.message)
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not self.options.nocleanup:
print("Cleaning up")
stop_nodes(self.nodes)
wait_Testcoinds()
shutil.rmtree(self.options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
| true | true |
1c49c54ee3d9340721f261f16dae2ce932e7cc4a | 5,228 | py | Python | surgeo/scripts/weighted_mean.py | yashd94/surgeo | dc449b7332e143d97321bc844739840c4b0c3666 | [
"MIT"
] | null | null | null | surgeo/scripts/weighted_mean.py | yashd94/surgeo | dc449b7332e143d97321bc844739840c4b0c3666 | [
"MIT"
] | null | null | null | surgeo/scripts/weighted_mean.py | yashd94/surgeo | dc449b7332e143d97321bc844739840c4b0c3666 | [
"MIT"
] | null | null | null | import csv
import math
import itertools
import io
def get_weighted_mean(percentage_index_numbers,
analyzed_subject_index_numbers,
filepath_in,
filepath_out=''):
'''Gives the weighted mean of a particular data set.
Args:
filepath_in: file path of csv from which data is read
filepath_out: file path of csv where data is written.
If blank, return value.
percentage_index_numbers: tuple of index numbers of %s to use.
analyzed_subject_index_numbers: tuple of index numbers to analyze
Returns:
None, or string depending on filepath_out.
Raises:
None
'''
with open(filepath_in, 'rU') as input_csv:
# First pass, get all data and count up
csv_reader = csv.reader(input_csv)
row_1 = next(csv_reader)
# Create number/header index
name_column_index = {index: header for index, header in
enumerate(row_1)}
summed_percentages = {item: float(0) for item in
percentage_index_numbers}
#TODO better RAM usage
validated_data_rows = []
######### First filter out bad or incomplete rows
for index, row in enumerate(csv_reader, start=1):
try:
chained_index = itertools.chain(percentage_index_numbers,
analyzed_subject_index_numbers)
for positional_number in chained_index:
row_item = row[positional_number]
float(row_item)
validated_data_rows.append(row)
except ValueError:
continue
######### Sum totals
for row in validated_data_rows:
for dictionary_key in summed_percentages.keys():
row_value = row[dictionary_key]
summed_percentages[dictionary_key] += float(row_value)
######### Calculate weighted mean for each analyzed subject matter
summary_text = io.StringIO('')
for subject_index_number in analyzed_subject_index_numbers:
# Setup numbers
weighted_mean = {item: float(0) for item in
percentage_index_numbers}
weighted_stdev = {item: float(0) for item in
percentage_index_numbers}
list_of_subject_values = []
# Accumulate weighted mean
for row in validated_data_rows:
for key in weighted_mean.keys():
# row[key] is percentage
# summed_percentages[key] is aggregate percentage
# row[subject_index_number] is subject (e.g. balance, APR)
weighted_mean[key] += (float(row[key]) /
float(summed_percentages[key]) *
float(row[subject_index_number]))
list_of_subject_values.append(float(row[subject_index_number]))
# Accumulate weighted stdev
for row in validated_data_rows:
for key in weighted_mean.keys():
# row[key] is percentage
# summed_percentages[key] is aggregate percentage
# row[subject_index_number] is subject (e.g. balance, APR)
difference = (float(row[subject_index_number]) -
float(weighted_mean[key]))
difference_squared = math.pow(float(difference), 2)
weighted_stdev[key] += math.sqrt((float(row[key]) /
float(summed_percentages[key])
* difference_squared))
sample_mean = sum(list_of_subject_values) / len(list_of_subject_values)
distance_from_mean = [math.pow((value - sample_mean), 2) for value in
list_of_subject_values]
variance = sum(distance_from_mean) / len(list_of_subject_values)
sample_std_dev = math.sqrt(variance)
summary_text.write(''.join(['\n##########\n',
name_column_index[subject_index_number],
'\n##########\n',
'sample mean: ',
str(sample_mean),
'\n',
'sample standard deviation: ',
str(sample_std_dev),
'\n\n']))
for key in weighted_mean.keys():
summary_text.write(str(name_column_index[key]))
summary_text.write(' weighted mean: ')
summary_text.write(str(weighted_mean[key]))
summary_text.write(str('\n'))
summary_text.write(str(name_column_index[key]))
summary_text.write(' weighted stdev: ')
summary_text.write(str(weighted_stdev[key]))
summary_text.write('\n')
text_output = summary_text.getvalue()
summary_text.close()
if filepath_out == '':
return text_output
else:
with open(filepath_out, 'w+') as f:
f.write(text_output)
| 45.068966 | 79 | 0.547437 | import csv
import math
import itertools
import io
def get_weighted_mean(percentage_index_numbers,
analyzed_subject_index_numbers,
filepath_in,
filepath_out=''):
with open(filepath_in, 'rU') as input_csv:
csv_reader = csv.reader(input_csv)
row_1 = next(csv_reader)
name_column_index = {index: header for index, header in
enumerate(row_1)}
summed_percentages = {item: float(0) for item in
percentage_index_numbers}
validated_data_rows = []
for index, row in enumerate(csv_reader, start=1):
try:
chained_index = itertools.chain(percentage_index_numbers,
analyzed_subject_index_numbers)
for positional_number in chained_index:
row_item = row[positional_number]
float(row_item)
validated_data_rows.append(row)
except ValueError:
continue
for row in validated_data_rows:
for dictionary_key in summed_percentages.keys():
row_value = row[dictionary_key]
summed_percentages[dictionary_key] += float(row_value)
summary_text = io.StringIO('')
for subject_index_number in analyzed_subject_index_numbers:
weighted_mean = {item: float(0) for item in
percentage_index_numbers}
weighted_stdev = {item: float(0) for item in
percentage_index_numbers}
list_of_subject_values = []
for row in validated_data_rows:
for key in weighted_mean.keys():
weighted_mean[key] += (float(row[key]) /
float(summed_percentages[key]) *
float(row[subject_index_number]))
list_of_subject_values.append(float(row[subject_index_number]))
for row in validated_data_rows:
for key in weighted_mean.keys():
difference = (float(row[subject_index_number]) -
float(weighted_mean[key]))
difference_squared = math.pow(float(difference), 2)
weighted_stdev[key] += math.sqrt((float(row[key]) /
float(summed_percentages[key])
* difference_squared))
sample_mean = sum(list_of_subject_values) / len(list_of_subject_values)
distance_from_mean = [math.pow((value - sample_mean), 2) for value in
list_of_subject_values]
variance = sum(distance_from_mean) / len(list_of_subject_values)
sample_std_dev = math.sqrt(variance)
summary_text.write(''.join(['\n##########\n',
name_column_index[subject_index_number],
'\n##########\n',
'sample mean: ',
str(sample_mean),
'\n',
'sample standard deviation: ',
str(sample_std_dev),
'\n\n']))
for key in weighted_mean.keys():
summary_text.write(str(name_column_index[key]))
summary_text.write(' weighted mean: ')
summary_text.write(str(weighted_mean[key]))
summary_text.write(str('\n'))
summary_text.write(str(name_column_index[key]))
summary_text.write(' weighted stdev: ')
summary_text.write(str(weighted_stdev[key]))
summary_text.write('\n')
text_output = summary_text.getvalue()
summary_text.close()
if filepath_out == '':
return text_output
else:
with open(filepath_out, 'w+') as f:
f.write(text_output)
| true | true |
1c49c5b532ee2b7117c40d61eb0624ccde59523b | 17,483 | py | Python | qa/common/trace_summary.py | akbargumbira/server | a087c141c62923b61543651eeb5f134806cbaf2d | [
"BSD-3-Clause"
] | null | null | null | qa/common/trace_summary.py | akbargumbira/server | a087c141c62923b61543651eeb5f134806cbaf2d | [
"BSD-3-Clause"
] | null | null | null | qa/common/trace_summary.py | akbargumbira/server | a087c141c62923b61543651eeb5f134806cbaf2d | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import json
import sys
import numpy as np
FLAGS = None
def add_span(span_map, timestamps, span_name, ts_start, ts_end):
for tag in (ts_start, ts_end):
if tag not in timestamps:
raise ValueError('timestamps missing "{}": {}'.format(
tag, timestamps))
if timestamps[ts_end] < timestamps[ts_start]:
raise ValueError('end timestamp "{}" < start timestamp "{}"'.format(
ts_end, ts_start))
if span_name not in span_map:
span_map[span_name] = 0
span_map[span_name] += timestamps[ts_end] - timestamps[ts_start]
class AbstractFrontend():
@property
def filter_timestamp(self):
return None
def add_frontend_span(self, span_map, timestamps):
pass
def summarize_frontend_span(self, span_map, cnt):
return None
class HttpFrontend(AbstractFrontend):
@property
def filter_timestamp(self):
return "HTTP_RECV_START"
def add_frontend_span(self, span_map, timestamps):
if ("HTTP_RECV_START" in timestamps) and ("HTTP_SEND_END"
in timestamps):
add_span(span_map, timestamps, "HTTP_INFER", "HTTP_RECV_START",
"HTTP_SEND_END")
add_span(span_map, timestamps, "HTTP_RECV", "HTTP_RECV_START",
"HTTP_RECV_END")
add_span(span_map, timestamps, "HTTP_SEND", "HTTP_SEND_START",
"HTTP_SEND_END")
def summarize_frontend_span(self, span_map, cnt):
if "HTTP_INFER" in span_map:
res = "HTTP infer request (avg): {}us\n".format(
span_map["HTTP_INFER"] / (cnt * 1000))
res += "\tReceive (avg): {}us\n".format(span_map["HTTP_RECV"] /
(cnt * 1000))
res += "\tSend (avg): {}us\n".format(span_map["HTTP_SEND"] /
(cnt * 1000))
res += "\tOverhead (avg): {}us\n".format(
(span_map["HTTP_INFER"] - span_map["REQUEST"] -
span_map["HTTP_RECV"] - span_map["HTTP_SEND"]) / (cnt * 1000))
return res
else:
return None
class GrpcFrontend(AbstractFrontend):
@property
def filter_timestamp(self):
return "GRPC_WAITREAD_START"
def add_frontend_span(self, span_map, timestamps):
if ("GRPC_WAITREAD_START" in timestamps) and ("GRPC_SEND_END"
in timestamps):
add_span(span_map, timestamps, "GRPC_INFER", "GRPC_WAITREAD_START",
"GRPC_SEND_END")
add_span(span_map, timestamps, "GRPC_WAITREAD",
"GRPC_WAITREAD_START", "GRPC_WAITREAD_END")
add_span(span_map, timestamps, "GRPC_SEND", "GRPC_SEND_START",
"GRPC_SEND_END")
def summarize_frontend_span(self, span_map, cnt):
if "GRPC_INFER" in span_map:
res = "GRPC infer request (avg): {}us\n".format(
span_map["GRPC_INFER"] / (cnt * 1000))
res += "\tWait/Read (avg): {}us\n".format(
span_map["GRPC_WAITREAD"] / (cnt * 1000))
res += "\tSend (avg): {}us\n".format(span_map["GRPC_SEND"] /
(cnt * 1000))
res += "\tOverhead (avg): {}us\n".format(
(span_map["GRPC_INFER"] - span_map["REQUEST"] -
span_map["GRPC_WAITREAD"] - span_map["GRPC_SEND"]) /
(cnt * 1000))
return res
else:
return None
def summarize(frontend, traces):
# map from (model_name, model_version) to # of traces
model_count_map = dict()
# map from (model_name, model_version) to map of span->total time
model_span_map = dict()
# Order traces by id to be more intuitive if 'show_trace'
traces = sorted(traces, key=lambda t: t.get('id', -1))
# Filter the trace that is not for the requested frontend
match_frontend_id_set = set()
# Filter the trace that is not meaningful and group them by 'id'
filtered_traces = dict()
for trace in traces:
if "id" not in trace:
continue
# Trace without a parent must contain frontend timestamps
add_trace = False
if "parent_id" not in trace:
if frontend.filter_timestamp is None:
continue
if "timestamps" in trace:
for ts in trace["timestamps"]:
if frontend.filter_timestamp in ts["name"]:
match_frontend_id_set.add(trace["id"])
if trace["id"] in match_frontend_id_set:
add_trace = True
# Otherwise need to check whether parent is filtered
elif trace["parent_id"] in match_frontend_id_set:
match_frontend_id_set.add(trace["id"])
add_trace = True
if add_trace:
if (trace['id'] in filtered_traces.keys()):
rep_trace = filtered_traces[trace['id']]
# Apend the timestamp to the trace representing this 'id'
if "model_name" in trace:
rep_trace["model_name"] = trace["model_name"]
if "model_version" in trace:
rep_trace["model_version"] = trace["model_version"]
if "timestamps" in trace:
rep_trace["timestamps"] += trace["timestamps"]
else:
# Use this trace to represent this 'id'
if "timestamps" not in trace:
trace["timestamps"] = []
filtered_traces[trace['id']] = trace
for trace_id, trace in filtered_traces.items():
if trace_id not in match_frontend_id_set:
filtered_traces.pop(trace_id, None)
continue
timestamps = dict()
for ts in trace["timestamps"]:
timestamps[ts["name"]] = ts["ns"]
if ("REQUEST_START" in timestamps) and ("REQUEST_END" in timestamps):
key = (trace["model_name"], trace["model_version"])
if key not in model_count_map:
model_count_map[key] = 0
model_span_map[key] = dict()
model_count_map[key] += 1
frontend.add_frontend_span(model_span_map[key], timestamps)
add_span(model_span_map[key], timestamps, "REQUEST",
"REQUEST_START", "REQUEST_END")
# The tags below will be missing for ensemble model
if ("QUEUE_START" in timestamps) and ("COMPUTE_START"
in timestamps):
add_span(model_span_map[key], timestamps, "QUEUE",
"QUEUE_START", "COMPUTE_START")
if ("COMPUTE_START" in timestamps) and ("COMPUTE_END"
in timestamps):
add_span(model_span_map[key], timestamps, "COMPUTE",
"COMPUTE_START", "COMPUTE_END")
if ("COMPUTE_INPUT_END" in timestamps) and ("COMPUTE_OUTPUT_START"
in timestamps):
add_span(model_span_map[key], timestamps, "COMPUTE_INPUT",
"COMPUTE_START", "COMPUTE_INPUT_END")
add_span(model_span_map[key], timestamps, "COMPUTE_INFER",
"COMPUTE_INPUT_END", "COMPUTE_OUTPUT_START")
add_span(model_span_map[key], timestamps, "COMPUTE_OUTPUT",
"COMPUTE_OUTPUT_START", "COMPUTE_END")
if FLAGS.show_trace:
print("{} ({}):".format(trace["model_name"],
trace["model_version"]))
print("\tid: {}".format(trace["id"]))
if "parent_id" in trace:
print("\tparent id: {}".format(trace["parent_id"]))
ordered_timestamps = list()
for ts in trace["timestamps"]:
ordered_timestamps.append((ts["name"], ts["ns"]))
ordered_timestamps.sort(key=lambda tup: tup[1])
now = None
for ts in ordered_timestamps:
if now is not None:
print("\t\t{}us".format((ts[1] - now) / 1000))
print("\t{}".format(ts[0]))
now = ts[1]
for key, cnt in model_count_map.items():
model_name, model_value = key
print("Summary for {} ({}): trace count = {}".format(
model_name, model_value, cnt))
frontend_summary = frontend.summarize_frontend_span(
model_span_map[key], cnt)
if frontend_summary is not None:
print(frontend_summary)
# collect handler timeline
print("\tHandler (avg): {}us".format(model_span_map[key]["REQUEST"] /
(cnt * 1000)))
if ("QUEUE"
in model_span_map[key]) and "COMPUTE" in model_span_map[key]:
print("\t\tOverhead (avg): {}us".format(
(model_span_map[key]["REQUEST"] - model_span_map[key]["QUEUE"] -
model_span_map[key]["COMPUTE"]) / (cnt * 1000)))
print("\t\tQueue (avg): {}us".format(model_span_map[key]["QUEUE"] /
(cnt * 1000)))
print("\t\tCompute (avg): {}us".format(
model_span_map[key]["COMPUTE"] / (cnt * 1000)))
if ("COMPUTE_INPUT" in model_span_map[key]
) and "COMPUTE_OUTPUT" in model_span_map[key]:
print("\t\t\tInput (avg): {}us".format(
model_span_map[key]["COMPUTE_INPUT"] / (cnt * 1000)))
print("\t\t\tInfer (avg): {}us".format(
model_span_map[key]["COMPUTE_INFER"] / (cnt * 1000)))
print("\t\t\tOutput (avg): {}us".format(
model_span_map[key]["COMPUTE_OUTPUT"] / (cnt * 1000)))
def summarize_dataflow(traces):
# collect data flow
# - parent input
# - child input
# - ...
# - child output
# Order traces by id to be more intuitive if 'show_trace'
traces = sorted(traces, key=lambda t: t.get('id', -1))
# {3: [4, 5, 6], 4: [7]}
dataflow_parent_map = dict()
for trace in traces:
if "id" not in trace:
continue
if "parent_id" in trace:
if trace["parent_id"] not in dataflow_parent_map:
dataflow_parent_map[trace["parent_id"]] = []
dataflow_parent_map[trace["parent_id"]].append(trace["id"])
if len(dataflow_parent_map) == 0:
# print the tensors of model
first_id = find_first_id_with_tensor(traces)
if first_id != 0:
print("Data Flow:")
print_tensor_by_id(first_id, traces, 0, 0)
return
# print the tensors of ensemble
print("Data Flow:")
first_parent_id = list(dataflow_parent_map.items())[0][0]
# {3: {4: {7: None}, 5: None, 6: None}}
dataflow_tree_map = dict()
depth = [0]
append_dataflow_tensor(dataflow_tree_map,
first_parent_id,
dataflow_parent_map,
traces,
depth)
print_dataflow_tensor(dataflow_tree_map, traces, depth[0], step=0)
def append_dataflow_tensor(dataflow_tensor_map,
parent_id,
dataflow_tree_map,
traces,
depth):
if parent_id not in dataflow_tree_map:
dataflow_tensor_map[parent_id] = None
return
child_tensor_map = dict()
dataflow_tensor_map[parent_id] = child_tensor_map
depth[0] = depth[0] + 1
child_ids = dataflow_tree_map[parent_id]
for child_id in child_ids:
append_dataflow_tensor(child_tensor_map, child_id,
dataflow_tree_map, traces, depth)
def print_dataflow_tensor(dataflow_tree_map, traces, depth, step):
for parent_id in dataflow_tree_map:
print_tensor_by_id(parent_id, traces, depth, step)
if dataflow_tree_map[parent_id] is None:
continue
print_dataflow_tensor(
dataflow_tree_map[parent_id], traces, depth, step+1)
def print_tensor_by_id(id, traces, depth, step):
if id == 0:
return
tabs = "\t"*(step+1)
print("{0}{1}".format(tabs, "="*(50+8*(depth-step))))
for trace in traces:
# print model name and version
if "id" in trace and "model_name" in trace and "model_version" in trace and "timestamps" in trace and trace["id"] == id:
print("{0}Name: {1}".format(
tabs, trace["model_name"]))
print("{0}Version:{1}".format(
tabs, trace["model_version"]))
# print data
if "id" in trace and "activity" in trace:
if trace["id"] == id and trace["activity"] == "TENSOR_QUEUE_INPUT":
print("{0}{1}:".format(tabs, "QUEUE_INPUT"))
print("{0}\t{1}: {2}".format(tabs, trace["tensor"]["name"],
get_numpy_array(trace["tensor"])))
elif trace["id"] == id and trace["activity"] == "TENSOR_BACKEND_INPUT":
print("{0}{1}:".format(tabs, "BACKEND_INPUT"))
print("{0}\t{1}: {2}".format(tabs, trace["tensor"]["name"],
get_numpy_array(trace["tensor"])))
elif trace["id"] == id and trace["activity"] == "TENSOR_BACKEND_OUTPUT":
print("{0}{1}:".format(tabs, "BACKEND_OUTPUT"))
print("{0}\t{1}: {2}".format(tabs, trace["tensor"]["name"],
get_numpy_array(trace["tensor"])))
print("{0}{1}".format(tabs, "="*(50+8*(depth-step))))
def find_first_id_with_tensor(traces):
for trace in traces:
if "activity" in trace and (trace["activity"] == "TENSOR_QUEUE_INPUT" or trace["activity"] == "TENSOR_BACKEND_INPUT" or trace["activity"] == "TENSOR_BACKEND_OUTPUT"):
return trace["id"]
return 0
TRITON_TYPE_TO_NUMPY = {
"BOOL": bool,
"UINT8": np.uint8,
"UINT16": np.uint16,
"UINT32": np.uint32,
"UINT64": np.uint64,
"INT8": np.int8,
"INT16": np.int16,
"INT32": np.int32,
"INT64": np.int64,
"FP16": np.float16,
"FP32": np.float32,
"FP64": np.float64,
"BYTES": np.object_
}
def get_numpy_array(tensor):
dtype = TRITON_TYPE_TO_NUMPY[tensor["dtype"]]
value = map(float, tensor["data"].split(","))
shape = map(int, tensor["shape"].split(","))
array = np.array(list(value), dtype=dtype)
array = array.reshape(list(shape))
return array
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-v',
'--verbose',
action="store_true",
required=False,
default=False,
help='Enable verbose output')
parser.add_argument('-t',
'--show-trace',
action="store_true",
required=False,
default=False,
help='Show timestamps for each individual trace')
parser.add_argument('file', type=argparse.FileType('r'), nargs='+')
FLAGS = parser.parse_args()
for f in FLAGS.file:
trace_data = json.loads(f.read())
if FLAGS.verbose:
print(json.dumps(trace_data, sort_keys=True, indent=2))
# Must summarize HTTP and GRPC separately since they have
# different ways of accumulating time.
print("File: {}".format(f.name))
summarize(HttpFrontend(), trace_data)
summarize(GrpcFrontend(), trace_data)
summarize_dataflow(trace_data)
| 40.65814 | 174 | 0.566493 |
import argparse
import json
import sys
import numpy as np
FLAGS = None
def add_span(span_map, timestamps, span_name, ts_start, ts_end):
for tag in (ts_start, ts_end):
if tag not in timestamps:
raise ValueError('timestamps missing "{}": {}'.format(
tag, timestamps))
if timestamps[ts_end] < timestamps[ts_start]:
raise ValueError('end timestamp "{}" < start timestamp "{}"'.format(
ts_end, ts_start))
if span_name not in span_map:
span_map[span_name] = 0
span_map[span_name] += timestamps[ts_end] - timestamps[ts_start]
class AbstractFrontend():
@property
def filter_timestamp(self):
return None
def add_frontend_span(self, span_map, timestamps):
pass
def summarize_frontend_span(self, span_map, cnt):
return None
class HttpFrontend(AbstractFrontend):
@property
def filter_timestamp(self):
return "HTTP_RECV_START"
def add_frontend_span(self, span_map, timestamps):
if ("HTTP_RECV_START" in timestamps) and ("HTTP_SEND_END"
in timestamps):
add_span(span_map, timestamps, "HTTP_INFER", "HTTP_RECV_START",
"HTTP_SEND_END")
add_span(span_map, timestamps, "HTTP_RECV", "HTTP_RECV_START",
"HTTP_RECV_END")
add_span(span_map, timestamps, "HTTP_SEND", "HTTP_SEND_START",
"HTTP_SEND_END")
def summarize_frontend_span(self, span_map, cnt):
if "HTTP_INFER" in span_map:
res = "HTTP infer request (avg): {}us\n".format(
span_map["HTTP_INFER"] / (cnt * 1000))
res += "\tReceive (avg): {}us\n".format(span_map["HTTP_RECV"] /
(cnt * 1000))
res += "\tSend (avg): {}us\n".format(span_map["HTTP_SEND"] /
(cnt * 1000))
res += "\tOverhead (avg): {}us\n".format(
(span_map["HTTP_INFER"] - span_map["REQUEST"] -
span_map["HTTP_RECV"] - span_map["HTTP_SEND"]) / (cnt * 1000))
return res
else:
return None
class GrpcFrontend(AbstractFrontend):
@property
def filter_timestamp(self):
return "GRPC_WAITREAD_START"
def add_frontend_span(self, span_map, timestamps):
if ("GRPC_WAITREAD_START" in timestamps) and ("GRPC_SEND_END"
in timestamps):
add_span(span_map, timestamps, "GRPC_INFER", "GRPC_WAITREAD_START",
"GRPC_SEND_END")
add_span(span_map, timestamps, "GRPC_WAITREAD",
"GRPC_WAITREAD_START", "GRPC_WAITREAD_END")
add_span(span_map, timestamps, "GRPC_SEND", "GRPC_SEND_START",
"GRPC_SEND_END")
def summarize_frontend_span(self, span_map, cnt):
if "GRPC_INFER" in span_map:
res = "GRPC infer request (avg): {}us\n".format(
span_map["GRPC_INFER"] / (cnt * 1000))
res += "\tWait/Read (avg): {}us\n".format(
span_map["GRPC_WAITREAD"] / (cnt * 1000))
res += "\tSend (avg): {}us\n".format(span_map["GRPC_SEND"] /
(cnt * 1000))
res += "\tOverhead (avg): {}us\n".format(
(span_map["GRPC_INFER"] - span_map["REQUEST"] -
span_map["GRPC_WAITREAD"] - span_map["GRPC_SEND"]) /
(cnt * 1000))
return res
else:
return None
def summarize(frontend, traces):
model_count_map = dict()
model_span_map = dict()
traces = sorted(traces, key=lambda t: t.get('id', -1))
match_frontend_id_set = set()
filtered_traces = dict()
for trace in traces:
if "id" not in trace:
continue
add_trace = False
if "parent_id" not in trace:
if frontend.filter_timestamp is None:
continue
if "timestamps" in trace:
for ts in trace["timestamps"]:
if frontend.filter_timestamp in ts["name"]:
match_frontend_id_set.add(trace["id"])
if trace["id"] in match_frontend_id_set:
add_trace = True
elif trace["parent_id"] in match_frontend_id_set:
match_frontend_id_set.add(trace["id"])
add_trace = True
if add_trace:
if (trace['id'] in filtered_traces.keys()):
rep_trace = filtered_traces[trace['id']]
if "model_name" in trace:
rep_trace["model_name"] = trace["model_name"]
if "model_version" in trace:
rep_trace["model_version"] = trace["model_version"]
if "timestamps" in trace:
rep_trace["timestamps"] += trace["timestamps"]
else:
if "timestamps" not in trace:
trace["timestamps"] = []
filtered_traces[trace['id']] = trace
for trace_id, trace in filtered_traces.items():
if trace_id not in match_frontend_id_set:
filtered_traces.pop(trace_id, None)
continue
timestamps = dict()
for ts in trace["timestamps"]:
timestamps[ts["name"]] = ts["ns"]
if ("REQUEST_START" in timestamps) and ("REQUEST_END" in timestamps):
key = (trace["model_name"], trace["model_version"])
if key not in model_count_map:
model_count_map[key] = 0
model_span_map[key] = dict()
model_count_map[key] += 1
frontend.add_frontend_span(model_span_map[key], timestamps)
add_span(model_span_map[key], timestamps, "REQUEST",
"REQUEST_START", "REQUEST_END")
if ("QUEUE_START" in timestamps) and ("COMPUTE_START"
in timestamps):
add_span(model_span_map[key], timestamps, "QUEUE",
"QUEUE_START", "COMPUTE_START")
if ("COMPUTE_START" in timestamps) and ("COMPUTE_END"
in timestamps):
add_span(model_span_map[key], timestamps, "COMPUTE",
"COMPUTE_START", "COMPUTE_END")
if ("COMPUTE_INPUT_END" in timestamps) and ("COMPUTE_OUTPUT_START"
in timestamps):
add_span(model_span_map[key], timestamps, "COMPUTE_INPUT",
"COMPUTE_START", "COMPUTE_INPUT_END")
add_span(model_span_map[key], timestamps, "COMPUTE_INFER",
"COMPUTE_INPUT_END", "COMPUTE_OUTPUT_START")
add_span(model_span_map[key], timestamps, "COMPUTE_OUTPUT",
"COMPUTE_OUTPUT_START", "COMPUTE_END")
if FLAGS.show_trace:
print("{} ({}):".format(trace["model_name"],
trace["model_version"]))
print("\tid: {}".format(trace["id"]))
if "parent_id" in trace:
print("\tparent id: {}".format(trace["parent_id"]))
ordered_timestamps = list()
for ts in trace["timestamps"]:
ordered_timestamps.append((ts["name"], ts["ns"]))
ordered_timestamps.sort(key=lambda tup: tup[1])
now = None
for ts in ordered_timestamps:
if now is not None:
print("\t\t{}us".format((ts[1] - now) / 1000))
print("\t{}".format(ts[0]))
now = ts[1]
for key, cnt in model_count_map.items():
model_name, model_value = key
print("Summary for {} ({}): trace count = {}".format(
model_name, model_value, cnt))
frontend_summary = frontend.summarize_frontend_span(
model_span_map[key], cnt)
if frontend_summary is not None:
print(frontend_summary)
print("\tHandler (avg): {}us".format(model_span_map[key]["REQUEST"] /
(cnt * 1000)))
if ("QUEUE"
in model_span_map[key]) and "COMPUTE" in model_span_map[key]:
print("\t\tOverhead (avg): {}us".format(
(model_span_map[key]["REQUEST"] - model_span_map[key]["QUEUE"] -
model_span_map[key]["COMPUTE"]) / (cnt * 1000)))
print("\t\tQueue (avg): {}us".format(model_span_map[key]["QUEUE"] /
(cnt * 1000)))
print("\t\tCompute (avg): {}us".format(
model_span_map[key]["COMPUTE"] / (cnt * 1000)))
if ("COMPUTE_INPUT" in model_span_map[key]
) and "COMPUTE_OUTPUT" in model_span_map[key]:
print("\t\t\tInput (avg): {}us".format(
model_span_map[key]["COMPUTE_INPUT"] / (cnt * 1000)))
print("\t\t\tInfer (avg): {}us".format(
model_span_map[key]["COMPUTE_INFER"] / (cnt * 1000)))
print("\t\t\tOutput (avg): {}us".format(
model_span_map[key]["COMPUTE_OUTPUT"] / (cnt * 1000)))
def summarize_dataflow(traces):
traces = sorted(traces, key=lambda t: t.get('id', -1))
dataflow_parent_map = dict()
for trace in traces:
if "id" not in trace:
continue
if "parent_id" in trace:
if trace["parent_id"] not in dataflow_parent_map:
dataflow_parent_map[trace["parent_id"]] = []
dataflow_parent_map[trace["parent_id"]].append(trace["id"])
if len(dataflow_parent_map) == 0:
first_id = find_first_id_with_tensor(traces)
if first_id != 0:
print("Data Flow:")
print_tensor_by_id(first_id, traces, 0, 0)
return
print("Data Flow:")
first_parent_id = list(dataflow_parent_map.items())[0][0]
dataflow_tree_map = dict()
depth = [0]
append_dataflow_tensor(dataflow_tree_map,
first_parent_id,
dataflow_parent_map,
traces,
depth)
print_dataflow_tensor(dataflow_tree_map, traces, depth[0], step=0)
def append_dataflow_tensor(dataflow_tensor_map,
parent_id,
dataflow_tree_map,
traces,
depth):
if parent_id not in dataflow_tree_map:
dataflow_tensor_map[parent_id] = None
return
child_tensor_map = dict()
dataflow_tensor_map[parent_id] = child_tensor_map
depth[0] = depth[0] + 1
child_ids = dataflow_tree_map[parent_id]
for child_id in child_ids:
append_dataflow_tensor(child_tensor_map, child_id,
dataflow_tree_map, traces, depth)
def print_dataflow_tensor(dataflow_tree_map, traces, depth, step):
for parent_id in dataflow_tree_map:
print_tensor_by_id(parent_id, traces, depth, step)
if dataflow_tree_map[parent_id] is None:
continue
print_dataflow_tensor(
dataflow_tree_map[parent_id], traces, depth, step+1)
def print_tensor_by_id(id, traces, depth, step):
if id == 0:
return
tabs = "\t"*(step+1)
print("{0}{1}".format(tabs, "="*(50+8*(depth-step))))
for trace in traces:
if "id" in trace and "model_name" in trace and "model_version" in trace and "timestamps" in trace and trace["id"] == id:
print("{0}Name: {1}".format(
tabs, trace["model_name"]))
print("{0}Version:{1}".format(
tabs, trace["model_version"]))
if "id" in trace and "activity" in trace:
if trace["id"] == id and trace["activity"] == "TENSOR_QUEUE_INPUT":
print("{0}{1}:".format(tabs, "QUEUE_INPUT"))
print("{0}\t{1}: {2}".format(tabs, trace["tensor"]["name"],
get_numpy_array(trace["tensor"])))
elif trace["id"] == id and trace["activity"] == "TENSOR_BACKEND_INPUT":
print("{0}{1}:".format(tabs, "BACKEND_INPUT"))
print("{0}\t{1}: {2}".format(tabs, trace["tensor"]["name"],
get_numpy_array(trace["tensor"])))
elif trace["id"] == id and trace["activity"] == "TENSOR_BACKEND_OUTPUT":
print("{0}{1}:".format(tabs, "BACKEND_OUTPUT"))
print("{0}\t{1}: {2}".format(tabs, trace["tensor"]["name"],
get_numpy_array(trace["tensor"])))
print("{0}{1}".format(tabs, "="*(50+8*(depth-step))))
def find_first_id_with_tensor(traces):
for trace in traces:
if "activity" in trace and (trace["activity"] == "TENSOR_QUEUE_INPUT" or trace["activity"] == "TENSOR_BACKEND_INPUT" or trace["activity"] == "TENSOR_BACKEND_OUTPUT"):
return trace["id"]
return 0
TRITON_TYPE_TO_NUMPY = {
"BOOL": bool,
"UINT8": np.uint8,
"UINT16": np.uint16,
"UINT32": np.uint32,
"UINT64": np.uint64,
"INT8": np.int8,
"INT16": np.int16,
"INT32": np.int32,
"INT64": np.int64,
"FP16": np.float16,
"FP32": np.float32,
"FP64": np.float64,
"BYTES": np.object_
}
def get_numpy_array(tensor):
dtype = TRITON_TYPE_TO_NUMPY[tensor["dtype"]]
value = map(float, tensor["data"].split(","))
shape = map(int, tensor["shape"].split(","))
array = np.array(list(value), dtype=dtype)
array = array.reshape(list(shape))
return array
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-v',
'--verbose',
action="store_true",
required=False,
default=False,
help='Enable verbose output')
parser.add_argument('-t',
'--show-trace',
action="store_true",
required=False,
default=False,
help='Show timestamps for each individual trace')
parser.add_argument('file', type=argparse.FileType('r'), nargs='+')
FLAGS = parser.parse_args()
for f in FLAGS.file:
trace_data = json.loads(f.read())
if FLAGS.verbose:
print(json.dumps(trace_data, sort_keys=True, indent=2))
print("File: {}".format(f.name))
summarize(HttpFrontend(), trace_data)
summarize(GrpcFrontend(), trace_data)
summarize_dataflow(trace_data)
| true | true |
1c49c641c007103fba76421fc78571e90fbff9c1 | 1,284 | py | Python | cpm/code/leastSquareSolver.py | jvc2688/cpm | 409e9ada39fc6238a63a75fb8474a3af70410347 | [
"MIT"
] | 1 | 2015-08-13T19:26:23.000Z | 2015-08-13T19:26:23.000Z | cpm/code/leastSquareSolver.py | jvc2688/cpm | 409e9ada39fc6238a63a75fb8474a3af70410347 | [
"MIT"
] | null | null | null | cpm/code/leastSquareSolver.py | jvc2688/cpm | 409e9ada39fc6238a63a75fb8474a3af70410347 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["linear_least_squares"]
import numpy as np
from scipy import linalg
def linear_least_squares(A, y, yvar=None, l2=None):
"""
Solve a linear system as fast as possible.
:param A: ``(ndata, nbasis)``
The basis matrix.
:param y: ``(ndata)``
The observations.
:param yvar:
The observational variance of the points ``y``.
:param l2:
The L2 regularization strength. Can be a scalar or a vector (of length
``A.shape[1]``).
"""
# Incorporate the observational uncertainties.
if yvar is not None:
CiA = A / yvar[:, None]
Ciy = y / yvar[:, None]
else:
CiA = A
Ciy = y
# Compute the pre-factor.
AT = A.T
ATA = np.dot(AT, CiA)
# Incorporate any L2 regularization.
if l2 is not None:
if np.isscalar(l2):
l2 = l2 + np.zeros(A.shape[1])
ATA[np.diag_indices_from(ATA)] += l2
# Solve the equations overwriting the temporary arrays for speed.
factor = linalg.cho_factor(ATA, overwrite_a=True)
return linalg.cho_solve(factor, np.dot(AT, Ciy), overwrite_b=True) | 26.204082 | 78 | 0.57243 |
from __future__ import division, print_function
__all__ = ["linear_least_squares"]
import numpy as np
from scipy import linalg
def linear_least_squares(A, y, yvar=None, l2=None):
if yvar is not None:
CiA = A / yvar[:, None]
Ciy = y / yvar[:, None]
else:
CiA = A
Ciy = y
AT = A.T
ATA = np.dot(AT, CiA)
if l2 is not None:
if np.isscalar(l2):
l2 = l2 + np.zeros(A.shape[1])
ATA[np.diag_indices_from(ATA)] += l2
factor = linalg.cho_factor(ATA, overwrite_a=True)
return linalg.cho_solve(factor, np.dot(AT, Ciy), overwrite_b=True) | true | true |
1c49c75a54956fe7a7977bc79452b0fbce941fc1 | 157 | py | Python | solution/lcp06.py | sth4nothing/pyleetcode | 70ac2dc55b0cbcd243b38103a96dd796538a3c05 | [
"MIT"
] | null | null | null | solution/lcp06.py | sth4nothing/pyleetcode | 70ac2dc55b0cbcd243b38103a96dd796538a3c05 | [
"MIT"
] | null | null | null | solution/lcp06.py | sth4nothing/pyleetcode | 70ac2dc55b0cbcd243b38103a96dd796538a3c05 | [
"MIT"
] | null | null | null | import math
from typing import List
class Solution:
def minCount(self, coins: List[int]) -> int:
return sum(map(lambda c:math.ceil(c/2), coins))
| 26.166667 | 55 | 0.681529 | import math
from typing import List
class Solution:
def minCount(self, coins: List[int]) -> int:
return sum(map(lambda c:math.ceil(c/2), coins))
| true | true |
1c49c954463613879c282f1b71d006111054bf6c | 4,568 | py | Python | pype/plugins/maya/publish/validate_muster_connection.py | tokejepsen/pype | 8f2b2b631cc5d3ad93eeb5ad3bc6110d32466ed3 | [
"MIT"
] | null | null | null | pype/plugins/maya/publish/validate_muster_connection.py | tokejepsen/pype | 8f2b2b631cc5d3ad93eeb5ad3bc6110d32466ed3 | [
"MIT"
] | null | null | null | pype/plugins/maya/publish/validate_muster_connection.py | tokejepsen/pype | 8f2b2b631cc5d3ad93eeb5ad3bc6110d32466ed3 | [
"MIT"
] | null | null | null | import os
import json
import appdirs
import pyblish.api
from avalon.vendor import requests
from pype.plugin import contextplugin_should_run
import pype.maya.action
class ValidateMusterConnection(pyblish.api.ContextPlugin):
"""
Validate Muster REST API Service is running and we have valid auth token
"""
label = "Validate Muster REST API Service"
order = pyblish.api.ValidatorOrder
hosts = ["maya"]
families = ["renderlayer"]
token = None
if not os.environ.get("MUSTER_REST_URL"):
active = False
actions = [pype.api.RepairAction]
def process(self, context):
# Workaround bug pyblish-base#250
if not contextplugin_should_run(self, context):
return
# test if we have environment set (redundant as this plugin shouldn'
# be active otherwise).
try:
MUSTER_REST_URL = os.environ["MUSTER_REST_URL"]
except KeyError:
self.log.error("Muster REST API url not found.")
raise ValueError("Muster REST API url not found.")
# Load credentials
try:
self._load_credentials()
except RuntimeError:
self.log.error("invalid or missing access token")
assert self._token is not None, "Invalid or missing token"
# We have token, lets do trivial query to web api to see if we can
# connect and access token is valid.
params = {
'authToken': self._token
}
api_entry = '/api/pools/list'
response = self._requests_get(
MUSTER_REST_URL + api_entry, params=params)
assert response.status_code == 200, "invalid response from server"
assert response.json()['ResponseData'], "invalid data in response"
def _load_credentials(self):
"""
Load Muster credentials from file and set `MUSTER_USER`,
`MUSTER_PASSWORD`, `MUSTER_REST_URL` is loaded from presets.
.. todo::
Show login dialog if access token is invalid or missing.
"""
app_dir = os.path.normpath(
appdirs.user_data_dir('pype-app', 'pype')
)
file_name = 'muster_cred.json'
fpath = os.path.join(app_dir, file_name)
file = open(fpath, 'r')
muster_json = json.load(file)
self._token = muster_json.get('token', None)
if not self._token:
raise RuntimeError("Invalid access token for Muster")
file.close()
self.MUSTER_REST_URL = os.environ.get("MUSTER_REST_URL")
if not self.MUSTER_REST_URL:
raise AttributeError("Muster REST API url not set")
@classmethod
def repair(cls, instance):
"""
Renew authentication token by logging into Muster
"""
api_url = "{}/muster/show_login".format(
os.environ["PYPE_REST_API_URL"])
cls.log.debug(api_url)
response = cls._requests_post(api_url, timeout=1)
if response.status_code != 200:
cls.log.error('Cannot show login form to Muster')
raise Exception('Cannot show login form to Muster')
def _requests_post(self, *args, **kwargs):
""" Wrapper for requests, disabling SSL certificate validation if
DONT_VERIFY_SSL environment variable is found. This is useful when
Deadline or Muster server are running with self-signed certificates
and their certificate is not added to trusted certificates on
client machines.
WARNING: disabling SSL certificate validation is defeating one line
of defense SSL is providing and it is not recommended.
"""
if 'verify' not in kwargs:
kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa
return requests.post(*args, **kwargs)
def _requests_get(self, *args, **kwargs):
""" Wrapper for requests, disabling SSL certificate validation if
DONT_VERIFY_SSL environment variable is found. This is useful when
Deadline or Muster server are running with self-signed certificates
and their certificate is not added to trusted certificates on
client machines.
WARNING: disabling SSL certificate validation is defeating one line
of defense SSL is providing and it is not recommended.
"""
if 'verify' not in kwargs:
kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa
return requests.get(*args, **kwargs)
| 37.442623 | 97 | 0.637916 | import os
import json
import appdirs
import pyblish.api
from avalon.vendor import requests
from pype.plugin import contextplugin_should_run
import pype.maya.action
class ValidateMusterConnection(pyblish.api.ContextPlugin):
label = "Validate Muster REST API Service"
order = pyblish.api.ValidatorOrder
hosts = ["maya"]
families = ["renderlayer"]
token = None
if not os.environ.get("MUSTER_REST_URL"):
active = False
actions = [pype.api.RepairAction]
def process(self, context):
if not contextplugin_should_run(self, context):
return
# be active otherwise).
try:
MUSTER_REST_URL = os.environ["MUSTER_REST_URL"]
except KeyError:
self.log.error("Muster REST API url not found.")
raise ValueError("Muster REST API url not found.")
# Load credentials
try:
self._load_credentials()
except RuntimeError:
self.log.error("invalid or missing access token")
assert self._token is not None, "Invalid or missing token"
# We have token, lets do trivial query to web api to see if we can
# connect and access token is valid.
params = {
'authToken': self._token
}
api_entry = '/api/pools/list'
response = self._requests_get(
MUSTER_REST_URL + api_entry, params=params)
assert response.status_code == 200, "invalid response from server"
assert response.json()['ResponseData'], "invalid data in response"
def _load_credentials(self):
app_dir = os.path.normpath(
appdirs.user_data_dir('pype-app', 'pype')
)
file_name = 'muster_cred.json'
fpath = os.path.join(app_dir, file_name)
file = open(fpath, 'r')
muster_json = json.load(file)
self._token = muster_json.get('token', None)
if not self._token:
raise RuntimeError("Invalid access token for Muster")
file.close()
self.MUSTER_REST_URL = os.environ.get("MUSTER_REST_URL")
if not self.MUSTER_REST_URL:
raise AttributeError("Muster REST API url not set")
@classmethod
def repair(cls, instance):
api_url = "{}/muster/show_login".format(
os.environ["PYPE_REST_API_URL"])
cls.log.debug(api_url)
response = cls._requests_post(api_url, timeout=1)
if response.status_code != 200:
cls.log.error('Cannot show login form to Muster')
raise Exception('Cannot show login form to Muster')
def _requests_post(self, *args, **kwargs):
if 'verify' not in kwargs:
kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa
return requests.post(*args, **kwargs)
def _requests_get(self, *args, **kwargs):
if 'verify' not in kwargs:
kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa
return requests.get(*args, **kwargs)
| true | true |
1c49c9837d339902372100015afa8dd09aa825df | 718 | py | Python | tests/main.py | deeso/json-search-replace | d1dd75cfaecb65bf8fcbad0c80a0bd839eccaa8d | [
"Apache-2.0"
] | 1 | 2019-02-08T14:42:45.000Z | 2019-02-08T14:42:45.000Z | tests/main.py | deeso/manipin-json | d1dd75cfaecb65bf8fcbad0c80a0bd839eccaa8d | [
"Apache-2.0"
] | null | null | null | tests/main.py | deeso/manipin-json | d1dd75cfaecb65bf8fcbad0c80a0bd839eccaa8d | [
"Apache-2.0"
] | null | null | null | from wrapper_tests.upsert_test import *
from wrapper_tests.upsertvaluedict_test import *
import os
import logging
import sys
import argparse
import signal
logging.getLogger().setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s - %(name)s] %(message)s')
ch.setFormatter(formatter)
logging.getLogger().addHandler(ch)
parser = argparse.ArgumentParser(
description='Unit testing for fiery snap.')
parser.add_argument('-config', type=str, default=None,
help='toml config for keys and such, see key.toml')
if __name__ == '__main__':
unittest.main()
os.kill(os.getpid(), signal.SIGKILL)
| 26.592593 | 71 | 0.721448 | from wrapper_tests.upsert_test import *
from wrapper_tests.upsertvaluedict_test import *
import os
import logging
import sys
import argparse
import signal
logging.getLogger().setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s - %(name)s] %(message)s')
ch.setFormatter(formatter)
logging.getLogger().addHandler(ch)
parser = argparse.ArgumentParser(
description='Unit testing for fiery snap.')
parser.add_argument('-config', type=str, default=None,
help='toml config for keys and such, see key.toml')
if __name__ == '__main__':
unittest.main()
os.kill(os.getpid(), signal.SIGKILL)
| true | true |
1c49c9dd5478932c655374fad541acbfc8952eeb | 2,650 | py | Python | deploy/utils/predictor.py | Sibo2rr/PaddleClas | b575e002cde44631b2dfc6333f4cfe43f0d0fc81 | [
"Apache-2.0"
] | 3 | 2021-12-16T06:59:04.000Z | 2021-12-16T06:59:24.000Z | deploy/utils/predictor.py | hello3281/PaddleClas | 8103f010c75ce4b4bee51ede8d057da4c6bd446a | [
"Apache-2.0"
] | null | null | null | deploy/utils/predictor.py | hello3281/PaddleClas | 8103f010c75ce4b4bee51ede8d057da4c6bd446a | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import base64
import shutil
import cv2
import numpy as np
from paddle.inference import Config
from paddle.inference import create_predictor
class Predictor(object):
def __init__(self, args, inference_model_dir=None):
# HALF precission predict only work when using tensorrt
if args.use_fp16 is True:
assert args.use_tensorrt is True
self.args = args
self.paddle_predictor, self.config = self.create_paddle_predictor(
args, inference_model_dir)
def predict(self, image):
raise NotImplementedError
def create_paddle_predictor(self, args, inference_model_dir=None):
if inference_model_dir is None:
inference_model_dir = args.inference_model_dir
params_file = os.path.join(inference_model_dir, "inference.pdiparams")
model_file = os.path.join(inference_model_dir, "inference.pdmodel")
config = Config(model_file, params_file)
if args.use_gpu:
config.enable_use_gpu(args.gpu_mem, 0)
else:
config.disable_gpu()
if args.enable_mkldnn:
# cache 10 different shapes for mkldnn to avoid memory leak
config.set_mkldnn_cache_capacity(10)
config.enable_mkldnn()
config.set_cpu_math_library_num_threads(args.cpu_num_threads)
if args.enable_profile:
config.enable_profile()
config.disable_glog_info()
config.switch_ir_optim(args.ir_optim) # default true
if args.use_tensorrt:
config.enable_tensorrt_engine(
precision_mode=Config.Precision.Half
if args.use_fp16 else Config.Precision.Float32,
max_batch_size=args.batch_size,
workspace_size=1 << 30,
min_subgraph_size=30)
config.enable_memory_optim()
# use zero copy
config.switch_use_feed_fetch_ops(False)
predictor = create_predictor(config)
return predictor, config
| 36.805556 | 78 | 0.689434 | import os
import argparse
import base64
import shutil
import cv2
import numpy as np
from paddle.inference import Config
from paddle.inference import create_predictor
class Predictor(object):
def __init__(self, args, inference_model_dir=None):
if args.use_fp16 is True:
assert args.use_tensorrt is True
self.args = args
self.paddle_predictor, self.config = self.create_paddle_predictor(
args, inference_model_dir)
def predict(self, image):
raise NotImplementedError
def create_paddle_predictor(self, args, inference_model_dir=None):
if inference_model_dir is None:
inference_model_dir = args.inference_model_dir
params_file = os.path.join(inference_model_dir, "inference.pdiparams")
model_file = os.path.join(inference_model_dir, "inference.pdmodel")
config = Config(model_file, params_file)
if args.use_gpu:
config.enable_use_gpu(args.gpu_mem, 0)
else:
config.disable_gpu()
if args.enable_mkldnn:
config.set_mkldnn_cache_capacity(10)
config.enable_mkldnn()
config.set_cpu_math_library_num_threads(args.cpu_num_threads)
if args.enable_profile:
config.enable_profile()
config.disable_glog_info()
config.switch_ir_optim(args.ir_optim) if args.use_tensorrt:
config.enable_tensorrt_engine(
precision_mode=Config.Precision.Half
if args.use_fp16 else Config.Precision.Float32,
max_batch_size=args.batch_size,
workspace_size=1 << 30,
min_subgraph_size=30)
config.enable_memory_optim()
config.switch_use_feed_fetch_ops(False)
predictor = create_predictor(config)
return predictor, config
| true | true |
1c49ca0ede398431f0496d09c172189ae299c254 | 5,375 | py | Python | pinax/apps/basic_profiles/views.py | skabber/pinax | 6fdee6b7bbbb597074d45122badf3a6dd75e0b92 | [
"MIT"
] | 2 | 2015-12-27T23:07:51.000Z | 2016-05-09T08:57:28.000Z | pinax/apps/basic_profiles/views.py | SMiGL/pinax | d08b2655fe661566bd13c5c170b1a4cad9e67a1d | [
"MIT"
] | null | null | null | pinax/apps/basic_profiles/views.py | SMiGL/pinax | d08b2655fe661566bd13c5c170b1a4cad9e67a1d | [
"MIT"
] | null | null | null | from django.conf import settings
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseForbidden
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
#from friends.forms import InviteFriendForm
#from friends.models import FriendshipInvitation, Friendship
from basic_profiles.models import Profile
from basic_profiles.forms import ProfileForm
# # used by friend autocompletion
# from gravatar.templatetags.gravatar import gravatar
if "notification" in settings.INSTALLED_APPS:
from notification import models as notification
else:
notification = None
def profiles(request, template_name="basic_profiles/profiles.html"):
return render_to_response(template_name, {
"users": User.objects.all().order_by("-date_joined"),
}, context_instance=RequestContext(request))
def profile(request, username, template_name="basic_profiles/profile.html"):
other_user = get_object_or_404(User, username=username)
if request.user.is_authenticated():
# is_friend = Friendship.objects.are_friends(request.user, other_user)
# other_friends = Friendship.objects.friends_for_user(other_user)
if request.user == other_user:
is_me = True
else:
is_me = False
else:
# other_friends = []
# is_friend = False
is_me = False
# if is_friend:
# invite_form = None
# previous_invitations_to = None
# previous_invitations_from = None
# else:
# if request.user.is_authenticated() and request.method == "POST":
# if request.POST["action"] == "invite":
# invite_form = InviteFriendForm(request.user, request.POST)
# if invite_form.is_valid():
# invite_form.save()
# else:
# invite_form = InviteFriendForm(request.user, {
# 'to_user': username,
# 'message': ugettext("Let's be friends!"),
# })
# if request.POST["action"] == "accept": # @@@ perhaps the form should just post to friends and be redirected here
# invitation_id = request.POST["invitation"]
# try:
# invitation = FriendshipInvitation.objects.get(id=invitation_id)
# if invitation.to_user == request.user:
# invitation.accept()
# request.user.message_set.create(message=_("You have accepted the friendship request from %(from_user)s") % {'from_user': invitation.from_user})
# is_friend = True
# other_friends = Friendship.objects.friends_for_user(other_user)
# except FriendshipInvitation.DoesNotExist:
# pass
# else:
# invite_form = InviteFriendForm(request.user, {
# 'to_user': username,
# 'message': ugettext("Let's be friends!"),
# })
# previous_invitations_to = FriendshipInvitation.objects.filter(to_user=other_user, from_user=request.user)
# previous_invitations_from = FriendshipInvitation.objects.filter(to_user=request.user, from_user=other_user)
if is_me:
if request.method == "POST":
if request.POST["action"] == "update":
profile_form = ProfileForm(request.POST, instance=other_user.get_profile())
if profile_form.is_valid():
profile = profile_form.save(commit=False)
profile.user = other_user
profile.save()
else:
profile_form = ProfileForm(instance=other_user.get_profile())
else:
profile_form = ProfileForm(instance=other_user.get_profile())
else:
profile_form = None
return render_to_response(template_name, {
"profile_form": profile_form,
"is_me": is_me,
# "is_friend": is_friend,
"other_user": other_user,
# "other_friends": other_friends,
# "invite_form": invite_form,
# "previous_invitations_to": previous_invitations_to,
# "previous_invitations_from": previous_invitations_from,
}, context_instance=RequestContext(request))
# def username_autocomplete(request):
# if request.user.is_authenticated():
# q = request.GET.get("q")
# friends = Friendship.objects.friends_for_user(request.user)
# content = []
# for friendship in friends:
# if friendship["friend"].username.lower().startswith(q):
# try:
# profile = friendship["friend"].get_profile()
# entry = "%s,,%s,,%s" % (
# gravatar(friendship["friend"], 40),
# friendship["friend"].username,
# profile.location
# )
# except Profile.DoesNotExist:
# pass
# content.append(entry)
# response = HttpResponse("\n".join(content))
# else:
# response = HttpResponseForbidden()
# setattr(response, "djangologging.suppress_output", True)
# return response
| 43 | 172 | 0.614512 | from django.conf import settings
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseForbidden
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
from basic_profiles.models import Profile
from basic_profiles.forms import ProfileForm
if "notification" in settings.INSTALLED_APPS:
from notification import models as notification
else:
notification = None
def profiles(request, template_name="basic_profiles/profiles.html"):
return render_to_response(template_name, {
"users": User.objects.all().order_by("-date_joined"),
}, context_instance=RequestContext(request))
def profile(request, username, template_name="basic_profiles/profile.html"):
other_user = get_object_or_404(User, username=username)
if request.user.is_authenticated():
if request.user == other_user:
is_me = True
else:
is_me = False
else:
is_me = False
# })
# if request.POST["action"] == "accept": # @@@ perhaps the form should just post to friends and be redirected here
# invitation_id = request.POST["invitation"]
# try:
# invitation = FriendshipInvitation.objects.get(id=invitation_id)
# if invitation.to_user == request.user:
# invitation.accept()
# request.user.message_set.create(message=_("You have accepted the friendship request from %(from_user)s") % {'from_user': invitation.from_user})
# is_friend = True
# other_friends = Friendship.objects.friends_for_user(other_user)
# except FriendshipInvitation.DoesNotExist:
# pass
# else:
# invite_form = InviteFriendForm(request.user, {
# 'to_user': username,
# 'message': ugettext("Let's be friends!"),
if is_me:
if request.method == "POST":
if request.POST["action"] == "update":
profile_form = ProfileForm(request.POST, instance=other_user.get_profile())
if profile_form.is_valid():
profile = profile_form.save(commit=False)
profile.user = other_user
profile.save()
else:
profile_form = ProfileForm(instance=other_user.get_profile())
else:
profile_form = ProfileForm(instance=other_user.get_profile())
else:
profile_form = None
return render_to_response(template_name, {
"profile_form": profile_form,
"is_me": is_me,
"other_user": other_user,
}, context_instance=RequestContext(request))
| true | true |
1c49ca865c275afff6e3b397b4fb7f0c5ba2036e | 1,962 | py | Python | mysite/polls/views.py | allentv/pycon-django-workshop | 931c3b672882616355053f1d84432ecaacddfbfc | [
"MIT"
] | null | null | null | mysite/polls/views.py | allentv/pycon-django-workshop | 931c3b672882616355053f1d84432ecaacddfbfc | [
"MIT"
] | null | null | null | mysite/polls/views.py | allentv/pycon-django-workshop | 931c3b672882616355053f1d84432ecaacddfbfc | [
"MIT"
] | null | null | null | from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from .models import Question
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
context = {
'latest_question_list': latest_question_list,
}
return render(request, 'polls_index.html', context)
# class IndexView(generic.ListView):
# template_name = 'polls_index.html'
# context_object_name = 'latest_question_list'
# def get_queryset(self):
# return Question.objects.order_by('-pub_date')[:5]
# def detail(request, question_id):
# question = get_object_or_404(Question, pk=question_id)
# return render(request, 'polls_detail.html', {'question': question})
class DetailView(generic.DetailView):
model = Question
template_name = 'polls_detail.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls_detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
# def results(request, question_id):
# question = get_object_or_404(Question, pk=question_id)
# return render(request, 'polls_results.html', {'question': question})
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls_results.html'
| 33.827586 | 82 | 0.705403 | from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from .models import Question
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
context = {
'latest_question_list': latest_question_list,
}
return render(request, 'polls_index.html', context)
class DetailView(generic.DetailView):
model = Question
template_name = 'polls_detail.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
return render(request, 'polls_detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
# def results(request, question_id):
# question = get_object_or_404(Question, pk=question_id)
# return render(request, 'polls_results.html', {'question': question})
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls_results.html'
| true | true |
1c49cadc1ab18e9d26cbe65b1388e424ceee96a2 | 3,204 | py | Python | calcrepo/calcpkg.py | TC01/calcpkg | 5168f606264620a090b42a64354331d208b00d5f | [
"MIT"
] | 3 | 2015-02-12T09:01:25.000Z | 2022-03-16T12:48:38.000Z | calcrepo/calcpkg.py | TC01/calcpkg | 5168f606264620a090b42a64354331d208b00d5f | [
"MIT"
] | 9 | 2015-02-12T03:41:16.000Z | 2018-07-05T22:12:34.000Z | calcrepo/calcpkg.py | TC01/calcpkg | 5168f606264620a090b42a64354331d208b00d5f | [
"MIT"
] | 1 | 2018-06-23T23:29:13.000Z | 2018-06-23T23:29:13.000Z | #!/usr/bin/env python
#Dependencies
import argparse
import os
import sys
#Import the necessary function from repos subpackage
from repos import createRepoObjects
#Main function of script... yeah.
def main():
"""Core function for the script"""
commands = ['update', 'list', 'get', 'info', 'count', 'search', 'download']
parser = argparse.ArgumentParser(description="Command line access to software repositories for TI calculators, primarily ticalc.org and Cemetech")
parser.add_argument("action", metavar="ACTION", type=str, help="The calcpkg command to execute (count, get, info, list, update)")
parser.add_argument("string", metavar="STRING", type=str, help="The string to search for when using count, get, info, or list commands", nargs="?", default="")
parser.add_argument("-c", "--category", dest="category", help="Limit searching to a specified category", default="")
parser.add_argument("-e", "--extension", dest="extension", help="Limit searching to a specified file extension", default="")
parser.add_argument("-f", "--filename", dest="searchFiles", action="store_true", help="Search by archive filenames rather than descriptive package name")
parser.add_argument("-g", "--game", dest="game", action="store_true", help="Limit searching to games only")
parser.add_argument("-m", "--math", dest="math", action="store_true", help="Limit searching to math and science programs only")
parser.add_argument("-r", "--repository", dest="repo", help="Limit searching by one repository- default is to use all", default="")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="Always provide verbose output")
parser.add_argument("-x", "--extract", dest="extract", action="store_true", help="After downloading, autoextract archive files when possible")
parser.add_argument("-y", "--assume-yes", dest="prompt", action="store_false", help="Never prompt for verification of command")
args = parser.parse_args()
#Verify that a valid command was specified
if not args.action in commands:
print "Error: Invalid action specified, action must be one of " + str(commands)
return
#args.category is special
if args.category != "":
category = "/" + args.category + "/"
else:
category = ""
#Initialize repositories; all behind-the-scene processing is done by plugins in calcrepo.repos
repositories = createRepoObjects()
if args.repo != "":
for repoName, repository in repositories.iteritems():
if repoName != args.repo:
repositories[repoName] = None
#Now, run commands for each repo
for name, repository in repositories.iteritems():
if repository != None:
repository.setRepoData(args.string, category, args.extension, args.math, args.game, args.searchFiles)
if args.action == "update":
repository.updateRepoIndexes(args.verbose)
elif (args.action == "list" or args.action == "search"):
repository.searchIndex()
elif (args.action == "get" or args.action == "download"):
repository.searchIndex()
repository.downloadFiles(args.prompt, args.extract)
elif args.action == "info":
repository.getFileInfos()
elif args.action == "count":
repository.countIndex()
if __name__ == '__main__':
main()
| 48.545455 | 160 | 0.719725 |
import argparse
import os
import sys
from repos import createRepoObjects
def main():
"""Core function for the script"""
commands = ['update', 'list', 'get', 'info', 'count', 'search', 'download']
parser = argparse.ArgumentParser(description="Command line access to software repositories for TI calculators, primarily ticalc.org and Cemetech")
parser.add_argument("action", metavar="ACTION", type=str, help="The calcpkg command to execute (count, get, info, list, update)")
parser.add_argument("string", metavar="STRING", type=str, help="The string to search for when using count, get, info, or list commands", nargs="?", default="")
parser.add_argument("-c", "--category", dest="category", help="Limit searching to a specified category", default="")
parser.add_argument("-e", "--extension", dest="extension", help="Limit searching to a specified file extension", default="")
parser.add_argument("-f", "--filename", dest="searchFiles", action="store_true", help="Search by archive filenames rather than descriptive package name")
parser.add_argument("-g", "--game", dest="game", action="store_true", help="Limit searching to games only")
parser.add_argument("-m", "--math", dest="math", action="store_true", help="Limit searching to math and science programs only")
parser.add_argument("-r", "--repository", dest="repo", help="Limit searching by one repository- default is to use all", default="")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="Always provide verbose output")
parser.add_argument("-x", "--extract", dest="extract", action="store_true", help="After downloading, autoextract archive files when possible")
parser.add_argument("-y", "--assume-yes", dest="prompt", action="store_false", help="Never prompt for verification of command")
args = parser.parse_args()
if not args.action in commands:
print "Error: Invalid action specified, action must be one of " + str(commands)
return
if args.category != "":
category = "/" + args.category + "/"
else:
category = ""
repositories = createRepoObjects()
if args.repo != "":
for repoName, repository in repositories.iteritems():
if repoName != args.repo:
repositories[repoName] = None
for name, repository in repositories.iteritems():
if repository != None:
repository.setRepoData(args.string, category, args.extension, args.math, args.game, args.searchFiles)
if args.action == "update":
repository.updateRepoIndexes(args.verbose)
elif (args.action == "list" or args.action == "search"):
repository.searchIndex()
elif (args.action == "get" or args.action == "download"):
repository.searchIndex()
repository.downloadFiles(args.prompt, args.extract)
elif args.action == "info":
repository.getFileInfos()
elif args.action == "count":
repository.countIndex()
if __name__ == '__main__':
main()
| false | true |
1c49cadd0256d125b86d00b3f7ba4dc0283c375c | 3,458 | py | Python | torchio/data/inference/aggregator.py | Jimmy2027/torchio | 98e5f4f379e877fa20c49f93645a3d0e0834f650 | [
"MIT"
] | null | null | null | torchio/data/inference/aggregator.py | Jimmy2027/torchio | 98e5f4f379e877fa20c49f93645a3d0e0834f650 | [
"MIT"
] | null | null | null | torchio/data/inference/aggregator.py | Jimmy2027/torchio | 98e5f4f379e877fa20c49f93645a3d0e0834f650 | [
"MIT"
] | null | null | null | from typing import Tuple
import torch
import numpy as np
from ...utils import to_tuple
from ...torchio import TypeData, TypeTuple
from ..subject import Subject
class GridAggregator:
r"""Aggregate patches for dense inference.
This class is typically used to build a volume made of batches after
inference of patches extracted by a :py:class:`~torchio.data.GridSampler`.
Args:
sample: Instance of:py:class:`~torchio.data.subject.Subject`
from which patches will be extracted (probably using a
:py:class:`~torchio.data.GridSampler`).
patch_overlap: Tuple of integers :math:`(d_o, h_o, w_o)` specifying the
overlap between patches. If a single number
:math:`n` is provided, :math:`d_o = h_o = w_o = n`.
out_channels: Number of channels in the output tensor.
.. note:: Adapted from NiftyNet. See `this NiftyNet tutorial
<https://niftynet.readthedocs.io/en/dev/window_sizes.html>`_ for more
information.
"""
def __init__(
self,
sample: Subject,
patch_overlap: TypeTuple,
out_channels: int = 1,
):
self._output_tensor = torch.zeros(out_channels, *sample.shape)
self.patch_overlap = to_tuple(patch_overlap, length=3)
@staticmethod
def _crop_batch(
patches: torch.Tensor,
location: np.ndarray,
border: Tuple[int, int, int],
) -> Tuple[TypeData, np.ndarray]:
location = location.astype(np.int)
batch_shape = patches.shape
spatial_shape = batch_shape[2:] # ignore batch and channels dim
num_dimensions = 3
for idx in range(num_dimensions):
location[:, idx] = location[:, idx] + border[idx]
location[:, idx + 3] = location[:, idx + 3] - border[idx]
cropped_shape = np.max(location[:, 3:6] - location[:, 0:3], axis=0)
diff = spatial_shape - cropped_shape
left = np.floor(diff / 2).astype(np.int)
i_ini, j_ini, k_ini = left
i_fin, j_fin, k_fin = left + cropped_shape
batch = patches[
:, # batch dimension
:, # channels dimension
i_ini:i_fin,
j_ini:j_fin,
k_ini:k_fin,
]
return batch, location
def _ensure_output_dtype(self, tensor: torch.Tensor) -> None:
"""Make sure the output tensor type is the same as the input patches."""
if self._output_tensor.dtype != tensor.dtype:
self._output_tensor = self._output_tensor.type(tensor.dtype)
def add_batch(self, patches: torch.Tensor, locations: TypeData) -> None:
patches = patches.cpu()
self._ensure_output_dtype(patches)
location_init = np.copy(locations)
init_ones = np.ones_like(patches)
patches, _ = self._crop_batch(
patches, location_init, self.patch_overlap)
location_init = np.copy(locations)
_, locations = self._crop_batch(
init_ones, location_init, self.patch_overlap)
for patch, location in zip(patches, locations):
i_ini, j_ini, k_ini, i_fin, j_fin, k_fin = location
channels = len(patch)
for channel in range(channels):
self._output_tensor[channel, i_ini:i_fin, j_ini:j_fin, k_ini:k_fin] = patch[channel]
def get_output_tensor(self) -> torch.Tensor:
return self._output_tensor
| 39.747126 | 100 | 0.622325 | from typing import Tuple
import torch
import numpy as np
from ...utils import to_tuple
from ...torchio import TypeData, TypeTuple
from ..subject import Subject
class GridAggregator:
def __init__(
self,
sample: Subject,
patch_overlap: TypeTuple,
out_channels: int = 1,
):
self._output_tensor = torch.zeros(out_channels, *sample.shape)
self.patch_overlap = to_tuple(patch_overlap, length=3)
@staticmethod
def _crop_batch(
patches: torch.Tensor,
location: np.ndarray,
border: Tuple[int, int, int],
) -> Tuple[TypeData, np.ndarray]:
location = location.astype(np.int)
batch_shape = patches.shape
spatial_shape = batch_shape[2:] num_dimensions = 3
for idx in range(num_dimensions):
location[:, idx] = location[:, idx] + border[idx]
location[:, idx + 3] = location[:, idx + 3] - border[idx]
cropped_shape = np.max(location[:, 3:6] - location[:, 0:3], axis=0)
diff = spatial_shape - cropped_shape
left = np.floor(diff / 2).astype(np.int)
i_ini, j_ini, k_ini = left
i_fin, j_fin, k_fin = left + cropped_shape
batch = patches[
:, :, i_ini:i_fin,
j_ini:j_fin,
k_ini:k_fin,
]
return batch, location
def _ensure_output_dtype(self, tensor: torch.Tensor) -> None:
if self._output_tensor.dtype != tensor.dtype:
self._output_tensor = self._output_tensor.type(tensor.dtype)
def add_batch(self, patches: torch.Tensor, locations: TypeData) -> None:
patches = patches.cpu()
self._ensure_output_dtype(patches)
location_init = np.copy(locations)
init_ones = np.ones_like(patches)
patches, _ = self._crop_batch(
patches, location_init, self.patch_overlap)
location_init = np.copy(locations)
_, locations = self._crop_batch(
init_ones, location_init, self.patch_overlap)
for patch, location in zip(patches, locations):
i_ini, j_ini, k_ini, i_fin, j_fin, k_fin = location
channels = len(patch)
for channel in range(channels):
self._output_tensor[channel, i_ini:i_fin, j_ini:j_fin, k_ini:k_fin] = patch[channel]
def get_output_tensor(self) -> torch.Tensor:
return self._output_tensor
| true | true |
1c49cb658dbcd25048b0ba1ab66c8574e990ca81 | 1,086 | py | Python | test/terra/pulse/de/__init__.py | sagarpahwa/qiskit-aer | 77e40c8d99fd0490d85285e96f87e4905017b646 | [
"Apache-2.0"
] | 313 | 2018-12-19T09:19:12.000Z | 2022-03-21T18:15:41.000Z | test/terra/pulse/de/__init__.py | sagarpahwa/qiskit-aer | 77e40c8d99fd0490d85285e96f87e4905017b646 | [
"Apache-2.0"
] | 933 | 2018-12-21T02:56:49.000Z | 2022-03-30T01:19:54.000Z | test/terra/pulse/de/__init__.py | sagarpahwa/qiskit-aer | 77e40c8d99fd0490d85285e96f87e4905017b646 | [
"Apache-2.0"
] | 313 | 2018-12-19T14:52:55.000Z | 2022-02-28T20:20:14.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
'''
Terra tests
'''
import os
def load_tests(loader, standard_tests, pattern):
"""
test suite for unittest discovery
"""
this_dir = os.path.dirname(__file__)
if pattern in ['test*.py', '*_test.py']:
package_tests = loader.discover(start_dir=this_dir, pattern=pattern)
standard_tests.addTests(package_tests)
elif pattern in ['profile*.py', '*_profile.py']:
loader.testMethodPrefix = 'profile'
package_tests = loader.discover(start_dir=this_dir, pattern='test*.py')
standard_tests.addTests(package_tests)
return standard_tests
| 32.909091 | 79 | 0.714549 |
import os
def load_tests(loader, standard_tests, pattern):
this_dir = os.path.dirname(__file__)
if pattern in ['test*.py', '*_test.py']:
package_tests = loader.discover(start_dir=this_dir, pattern=pattern)
standard_tests.addTests(package_tests)
elif pattern in ['profile*.py', '*_profile.py']:
loader.testMethodPrefix = 'profile'
package_tests = loader.discover(start_dir=this_dir, pattern='test*.py')
standard_tests.addTests(package_tests)
return standard_tests
| true | true |
1c49cba02d177d2ba601cb3a59b70360782c086a | 174,750 | py | Python | DungeonGenerator.py | JaxxyIV/ALttPDoorRandomizer | bbad1d1d8b1020b50453b66b2d88c5fb8712be38 | [
"MIT"
] | 42 | 2019-08-22T16:19:51.000Z | 2022-03-30T17:39:39.000Z | DungeonGenerator.py | JaxxyIV/ALttPDoorRandomizer | bbad1d1d8b1020b50453b66b2d88c5fb8712be38 | [
"MIT"
] | 48 | 2019-09-04T22:47:03.000Z | 2022-01-13T22:16:13.000Z | DungeonGenerator.py | JaxxyIV/ALttPDoorRandomizer | bbad1d1d8b1020b50453b66b2d88c5fb8712be38 | [
"MIT"
] | 35 | 2020-01-10T09:12:53.000Z | 2022-03-23T08:22:25.000Z | import RaceRandom as random
import collections
import itertools
from collections import defaultdict, deque
from functools import reduce
import logging
import math
import operator as op
import time
from typing import List
from BaseClasses import DoorType, Direction, CrystalBarrier, RegionType, Polarity, PolSlot, flooded_keys, Sector
from BaseClasses import Hook, hook_from_door
from Regions import dungeon_events, flooded_keys_reverse
from Dungeons import dungeon_regions, split_region_starts
from RoomData import DoorKind
class GraphPiece:
def __init__(self):
self.hanger_info = None
self.hanger_crystal = None
self.hooks = {}
self.visited_regions = set()
self.possible_bk_locations = set()
self.pinball_used = False
# Dungeons shouldn't be generated until all entrances are appropriately accessible
def pre_validate(builder, entrance_region_names, split_dungeon, world, player):
entrance_regions = convert_regions(entrance_region_names, world, player)
excluded = {}
for region in entrance_regions:
portal = next((x for x in world.dungeon_portals[player] if x.door.entrance.parent_region == region), None)
if portal and portal.destination:
excluded[region] = None
entrance_regions = [x for x in entrance_regions if x not in excluded.keys()]
proposed_map = {}
doors_to_connect = {}
all_regions = set()
bk_needed = False
bk_special = False
for sector in builder.sectors:
for door in sector.outstanding_doors:
doors_to_connect[door.name] = door
all_regions.update(sector.regions)
bk_needed = bk_needed or determine_if_bk_needed(sector, split_dungeon, world, player)
bk_special = bk_special or check_for_special(sector)
paths = determine_paths_for_dungeon(world, player, all_regions, builder.name)
dungeon, hangers, hooks = gen_dungeon_info(builder.name, builder.sectors, entrance_regions, all_regions,
proposed_map, doors_to_connect, bk_needed, bk_special, world, player)
return check_valid(builder.name, dungeon, hangers, hooks, proposed_map, doors_to_connect, all_regions,
bk_needed, bk_special, paths, entrance_regions, world, player)
def generate_dungeon(builder, entrance_region_names, split_dungeon, world, player):
stonewalls = check_for_stonewalls(builder)
sector = generate_dungeon_main(builder, entrance_region_names, split_dungeon, world, player)
for stonewall in stonewalls:
if not stonewall_valid(stonewall):
builder.pre_open_stonewalls.add(stonewall)
return sector
def check_for_stonewalls(builder):
stonewalls = set()
for sector in builder.sectors:
for door in sector.outstanding_doors:
if door.stonewall:
stonewalls.add(door)
return stonewalls
def generate_dungeon_main(builder, entrance_region_names, split_dungeon, world, player):
if builder.valid_proposal: # we made this earlier in gen, just use it
proposed_map = builder.valid_proposal
else:
proposed_map = generate_dungeon_find_proposal(builder, entrance_region_names, split_dungeon, world, player)
builder.valid_proposal = proposed_map
queue = collections.deque(proposed_map.items())
while len(queue) > 0:
a, b = queue.popleft()
connect_doors(a, b)
queue.remove((b, a))
if len(builder.sectors) == 0:
return Sector()
available_sectors = list(builder.sectors)
master_sector = available_sectors.pop()
for sub_sector in available_sectors:
master_sector.regions.extend(sub_sector.regions)
master_sector.outstanding_doors.clear()
master_sector.r_name_set = None
return master_sector
def generate_dungeon_find_proposal(builder, entrance_region_names, split_dungeon, world, player):
logger = logging.getLogger('')
name = builder.name
entrance_regions = convert_regions(entrance_region_names, world, player)
excluded = {}
for region in entrance_regions:
portal = next((x for x in world.dungeon_portals[player] if x.door.entrance.parent_region == region), None)
if portal and portal.destination:
excluded[region] = None
entrance_regions = [x for x in entrance_regions if x not in excluded.keys()]
doors_to_connect = {}
all_regions = set()
bk_needed = False
bk_special = False
for sector in builder.sectors:
for door in sector.outstanding_doors:
doors_to_connect[door.name] = door
all_regions.update(sector.regions)
bk_needed = bk_needed or determine_if_bk_needed(sector, split_dungeon, world, player)
bk_special = bk_special or check_for_special(sector)
proposed_map = {}
choices_master = [[]]
depth = 0
dungeon_cache = {}
backtrack = False
itr = 0
attempt = 1
finished = False
# flag if standard and this is hyrule castle
paths = determine_paths_for_dungeon(world, player, all_regions, name)
while not finished:
# what are my choices?
itr += 1
if itr > 1000:
if attempt > 9:
raise GenerationException('Generation taking too long. Ref %s' % name)
proposed_map = {}
choices_master = [[]]
depth = 0
dungeon_cache = {}
backtrack = False
itr = 0
attempt += 1
logger.debug(f'Starting new attempt {attempt}')
if depth not in dungeon_cache.keys():
dungeon, hangers, hooks = gen_dungeon_info(name, builder.sectors, entrance_regions, all_regions, proposed_map,
doors_to_connect, bk_needed, bk_special, world, player)
dungeon_cache[depth] = dungeon, hangers, hooks
valid = check_valid(name, dungeon, hangers, hooks, proposed_map, doors_to_connect, all_regions,
bk_needed, bk_special, paths, entrance_regions, world, player)
else:
dungeon, hangers, hooks = dungeon_cache[depth]
valid = True
if valid:
if len(proposed_map) == len(doors_to_connect):
if dungeon['Origin'].pinball_used:
door = world.get_door('Skull Pinball WS', player)
room = world.get_room(door.roomIndex, player)
if room.doorList[door.doorListPos][1] == DoorKind.Trap:
room.change(door.doorListPos, DoorKind.Normal)
door.trapFlag = 0x0
door.blocked = False
finished = True
continue
prev_choices = choices_master[depth]
# make a choice
hanger, hook = make_a_choice(dungeon, hangers, hooks, prev_choices, name)
if hanger is None:
backtrack = True
else:
logger.debug(' ' * depth + "%d: Linking %s to %s", depth, hanger.name, hook.name)
proposed_map[hanger] = hook
proposed_map[hook] = hanger
last_choice = (hanger, hook)
choices_master[depth].append(last_choice)
depth += 1
choices_master.append([])
else:
backtrack = True
if backtrack:
backtrack = False
choices_master.pop()
dungeon_cache.pop(depth, None)
depth -= 1
if depth < 0:
raise GenerationException('Invalid dungeon. Ref %s' % name)
a, b = choices_master[depth][-1]
logger.debug(' ' * depth + "%d: Rescinding %s, %s", depth, a.name, b.name)
proposed_map.pop(a, None)
proposed_map.pop(b, None)
return proposed_map
def determine_if_bk_needed(sector, split_dungeon, world, player):
if not split_dungeon:
for region in sector.regions:
for ext in region.exits:
door = world.check_for_door(ext.name, player)
if door is not None and door.bigKey:
return True
return False
def check_for_special(sector):
for region in sector.regions:
for loc in region.locations:
if loc.forced_big_key():
return True
return False
def gen_dungeon_info(name, available_sectors, entrance_regions, all_regions, proposed_map, valid_doors, bk_needed, bk_special, world, player):
# step 1 create dungeon: Dict<DoorName|Origin, GraphPiece>
dungeon = {}
start = ExplorationState(dungeon=name)
start.big_key_special = bk_special
group_flags, door_map = find_bk_groups(name, available_sectors, proposed_map, bk_special)
bk_flag = False if world.bigkeyshuffle[player] and not bk_special else bk_needed
def exception(d):
return name == 'Skull Woods 2' and d.name == 'Skull Pinball WS'
original_state = extend_reachable_state_improved(entrance_regions, start, proposed_map, all_regions,
valid_doors, bk_flag, world, player, exception)
dungeon['Origin'] = create_graph_piece_from_state(None, original_state, original_state, proposed_map, exception)
either_crystal = True # if all hooks from the origin are either, explore all bits with either
for hook, crystal in dungeon['Origin'].hooks.items():
if crystal != CrystalBarrier.Either:
either_crystal = False
break
init_crystal = CrystalBarrier.Either if either_crystal else CrystalBarrier.Orange
hanger_set = set()
o_state_cache = {}
for sector in available_sectors:
for door in sector.outstanding_doors:
if door not in proposed_map.keys():
hanger_set.add(door)
bk_flag = group_flags[door_map[door]]
parent = door.entrance.parent_region
crystal_start = CrystalBarrier.Either if parent.crystal_switch else init_crystal
init_state = ExplorationState(crystal_start, dungeon=name)
init_state.big_key_special = start.big_key_special
o_state = extend_reachable_state_improved([parent], init_state, proposed_map, all_regions,
valid_doors, bk_flag, world, player, exception)
o_state_cache[door.name] = o_state
piece = create_graph_piece_from_state(door, o_state, o_state, proposed_map, exception)
dungeon[door.name] = piece
check_blue_states(hanger_set, dungeon, o_state_cache, proposed_map, all_regions, valid_doors,
group_flags, door_map, world, player, exception)
# catalog hooks: Dict<Hook, List<Door, Crystal, Door>>
# and hangers: Dict<Hang, List<Door>>
avail_hooks = defaultdict(list)
hangers = defaultdict(list)
for key, piece in dungeon.items():
door_hang = piece.hanger_info
if door_hang is not None:
hanger = hanger_from_door(door_hang)
hangers[hanger].append(door_hang)
for door, crystal in piece.hooks.items():
hook = hook_from_door(door)
avail_hooks[hook].append((door, crystal, door_hang))
# thin out invalid hanger
winnow_hangers(hangers, avail_hooks)
return dungeon, hangers, avail_hooks
def find_bk_groups(name, available_sectors, proposed_map, bk_special):
groups = {}
door_ids = {}
gid = 1
for sector in available_sectors:
if bk_special:
my_gid = None
for door in sector.outstanding_doors:
if door in proposed_map and proposed_map[door] in door_ids:
if my_gid:
merge_gid = door_ids[proposed_map[door]]
for door in door_ids.keys():
if door_ids[door] == merge_gid:
door_ids[door] = my_gid
groups[my_gid] = groups[my_gid] or groups[merge_gid]
else:
my_gid = door_ids[proposed_map[door]]
if not my_gid:
my_gid = gid
gid += 1
for door in sector.outstanding_doors:
door_ids[door] = my_gid
if my_gid not in groups.keys():
groups[my_gid] = False
for region in sector.regions:
for loc in region.locations:
if loc.forced_item and loc.item.bigkey and name in loc.item.name:
groups[my_gid] = True
else:
for door in sector.outstanding_doors:
door_ids[door] = gid
groups[gid] = False
return groups, door_ids
def check_blue_states(hanger_set, dungeon, o_state_cache, proposed_map, all_regions, valid_doors, group_flags, door_map,
world, player, exception):
not_blue = set()
not_blue.update(hanger_set)
doors_to_check = set()
doors_to_check.update(hanger_set) # doors to check, check everything on first pass
blue_hooks = []
blue_hangers = []
new_blues = True
while new_blues:
new_blues = False
for door in doors_to_check:
piece = dungeon[door.name]
for hook, crystal in piece.hooks.items():
if crystal != CrystalBarrier.Orange:
h_type = hook_from_door(hook)
if h_type not in blue_hooks:
new_blues = True
blue_hooks.append(h_type)
if piece.hanger_crystal == CrystalBarrier.Either:
h_type = hanger_from_door(piece.hanger_info)
if h_type not in blue_hangers:
new_blues = True
blue_hangers.append(h_type)
doors_to_check = set()
for door in not_blue: # am I now blue?
hang_type = hanger_from_door(door) # am I hangable on a hook?
hook_type = hook_from_door(door) # am I hookable onto a hanger?
if (hang_type in blue_hooks and not door.stonewall) or hook_type in blue_hangers:
bk_flag = group_flags[door_map[door]]
explore_blue_state(door, dungeon, o_state_cache[door.name], proposed_map, all_regions, valid_doors,
bk_flag, world, player, exception)
doors_to_check.add(door)
not_blue.difference_update(doors_to_check)
def explore_blue_state(door, dungeon, o_state, proposed_map, all_regions, valid_doors, bk_flag, world, player, exception):
parent = door.entrance.parent_region
blue_start = ExplorationState(CrystalBarrier.Blue, o_state.dungeon)
blue_start.big_key_special = o_state.big_key_special
b_state = extend_reachable_state_improved([parent], blue_start, proposed_map, all_regions, valid_doors, bk_flag,
world, player, exception)
dungeon[door.name] = create_graph_piece_from_state(door, o_state, b_state, proposed_map, exception)
def make_a_choice(dungeon, hangers, avail_hooks, prev_choices, name):
# choose a hanger
all_hooks = {}
origin = dungeon['Origin']
for key in avail_hooks.keys():
for hstuff in avail_hooks[key]:
all_hooks[hstuff[0]] = None
candidate_hangers = []
for key in hangers.keys():
candidate_hangers.extend(hangers[key])
candidate_hangers.sort(key=lambda x: x.name) # sorting to create predictable seeds
random.shuffle(candidate_hangers) # randomize if equal preference
stage_2_hangers = []
if len(prev_choices) > 0:
prev_hanger = prev_choices[0][0]
if prev_hanger in candidate_hangers:
stage_2_hangers.append(prev_hanger)
candidate_hangers.remove(prev_hanger)
hookable_hangers = collections.deque()
queue = collections.deque(candidate_hangers)
while len(queue) > 0:
c_hang = queue.popleft()
if c_hang in all_hooks.keys():
hookable_hangers.append(c_hang)
else:
stage_2_hangers.append(c_hang) # prefer hangers that are not hooks
# todo : prefer hangers with fewer hooks at some point? not sure about this
# this prefer hangers of the fewest type - to catch problems fast
hookable_hangers = sorted(hookable_hangers, key=lambda door: len(hangers[hanger_from_door(door)]), reverse=True)
origin_hangers = []
while len(hookable_hangers) > 0:
c_hang = hookable_hangers.pop()
if c_hang in origin.hooks.keys():
origin_hangers.append(c_hang)
else:
stage_2_hangers.append(c_hang) # prefer hangers that are not hooks on the 'origin'
stage_2_hangers.extend(origin_hangers)
hook = None
next_hanger = None
while hook is None:
if len(stage_2_hangers) == 0:
return None, None
next_hanger = stage_2_hangers.pop(0)
next_hanger_type = hanger_from_door(next_hanger)
hook_candidates = []
for door, crystal, orig_hang in avail_hooks[next_hanger_type]:
if filter_choices(next_hanger, door, orig_hang, prev_choices, hook_candidates):
hook_candidates.append(door)
if len(hook_candidates) > 0:
hook_candidates.sort(key=lambda x: x.name) # sort for deterministic seeds
hook = random.choice(tuple(hook_candidates))
elif name == 'Skull Woods 2' and next_hanger.name == 'Skull Pinball WS':
continue
else:
return None, None
return next_hanger, hook
def filter_choices(next_hanger, door, orig_hang, prev_choices, hook_candidates):
if (next_hanger, door) in prev_choices or (door, next_hanger) in prev_choices:
return False
return next_hanger != door and orig_hang != next_hanger and door not in hook_candidates
def check_valid(name, dungeon, hangers, hooks, proposed_map, doors_to_connect, all_regions,
bk_needed, bk_special, paths, entrance_regions, world, player):
# evaluate if everything is still plausible
# only origin is left in the dungeon and not everything is connected
if len(dungeon.keys()) <= 1 and len(proposed_map.keys()) < len(doors_to_connect):
return False
# origin has no more hooks, but not all doors have been proposed
if not world.bigkeyshuffle[player]:
possible_bks = len(dungeon['Origin'].possible_bk_locations)
true_origin_hooks = [x for x in dungeon['Origin'].hooks.keys() if not x.bigKey or possible_bks > 0 or not bk_needed]
if len(true_origin_hooks) == 0 and len(proposed_map.keys()) < len(doors_to_connect):
return False
if len(true_origin_hooks) == 0 and bk_needed and possible_bks == 0 and len(proposed_map.keys()) == len(doors_to_connect):
return False
for key in hangers.keys():
if len(hooks[key]) > 0 and len(hangers[key]) == 0:
return False
# todo: stonewall - check that there's no hook-only that is without a matching hanger
must_hang = defaultdict(list)
all_hooks = set()
for key in hooks.keys():
for hook in hooks[key]:
all_hooks.add(hook[0])
for key in hangers.keys():
for hanger in hangers[key]:
if hanger not in all_hooks:
must_hang[key].append(hanger)
for key in must_hang.keys():
if len(must_hang[key]) > len(hooks[key]):
return False
outstanding_doors = defaultdict(list)
for d in doors_to_connect.values():
if d not in proposed_map.keys():
outstanding_doors[hook_from_door(d)].append(d)
for key in outstanding_doors.keys():
opp_key = opposite_h_type(key)
if len(outstanding_doors[key]) > 0 and len(hangers[key]) == 0 and len(hooks[opp_key]) == 0:
return False
all_visited = set()
bk_possible = not bk_needed or (world.bigkeyshuffle[player] and not bk_special)
for piece in dungeon.values():
all_visited.update(piece.visited_regions)
if not bk_possible and len(piece.possible_bk_locations) > 0:
bk_possible = True
if len(all_regions.difference(all_visited)) > 0:
return False
if not bk_possible:
return False
if not valid_paths(name, paths, entrance_regions, doors_to_connect, all_regions, proposed_map,
bk_needed, bk_special, world, player):
return False
new_hangers_found = True
accessible_hook_types = []
hanger_matching = set()
all_hangers = set()
origin_hooks = set(dungeon['Origin'].hooks.keys())
for door_hook in origin_hooks:
h_type = hook_from_door(door_hook)
if h_type not in accessible_hook_types:
accessible_hook_types.append(h_type)
while new_hangers_found:
new_hangers_found = False
for hanger_set in hangers.values():
for hanger in hanger_set:
all_hangers.add(hanger)
h_type = hanger_from_door(hanger)
if (h_type in accessible_hook_types or hanger in origin_hooks) and hanger not in hanger_matching:
new_hangers_found = True
hanger_matching.add(hanger)
matching_hooks = dungeon[hanger.name].hooks.keys()
origin_hooks.update(matching_hooks)
for door_hook in matching_hooks:
new_h_type = hook_from_door(door_hook)
if new_h_type not in accessible_hook_types:
accessible_hook_types.append(new_h_type)
return len(all_hangers.difference(hanger_matching)) == 0
def valid_paths(name, paths, entrance_regions, valid_doors, all_regions, proposed_map,
bk_needed, bk_special, world, player):
for path in paths:
if type(path) is tuple:
target = path[1]
start_regions = []
for region in all_regions:
if path[0] == region.name:
start_regions.append(region)
break
else:
target = path
start_regions = entrance_regions
if not valid_path(name, start_regions, target, valid_doors, proposed_map, all_regions,
bk_needed, bk_special, world, player):
return False
return True
def valid_path(name, starting_regions, target, valid_doors, proposed_map, all_regions,
bk_needed, bk_special, world, player):
target_regions = set()
if type(target) is not list:
for region in all_regions:
if target == region.name:
target_regions.add(region)
break
else:
for region in all_regions:
if region.name in target:
target_regions.add(region)
start = ExplorationState(dungeon=name)
start.big_key_special = bk_special
bk_flag = False if world.bigkeyshuffle[player] and not bk_special else bk_needed
def exception(d):
return name == 'Skull Woods 2' and d.name == 'Skull Pinball WS'
original_state = extend_reachable_state_improved(starting_regions, start, proposed_map, all_regions,
valid_doors, bk_flag, world, player, exception)
for exp_door in original_state.unattached_doors:
if not exp_door.door.blocked:
return True # outstanding connection possible
for target in target_regions:
if original_state.visited_at_all(target):
return True
return False # couldn't find an outstanding door or the target
def determine_required_paths(world, player):
paths = {}
for name, builder in world.dungeon_layouts[player].items():
all_regions = builder.master_sector.regions
paths[name] = determine_paths_for_dungeon(world, player, all_regions, name)
return paths
boss_path_checks = ['Eastern Boss', 'Desert Boss', 'Hera Boss', 'Tower Agahnim 1', 'PoD Boss', 'Swamp Boss',
'Skull Boss', 'Ice Boss', 'Mire Boss', 'TR Boss', 'GT Agahnim 2']
# pinball is allowed to orphan you
drop_path_checks = ['Skull Pot Circle', 'Skull Left Drop', 'Skull Back Drop', 'Sewers Rat Path']
def determine_paths_for_dungeon(world, player, all_regions, name):
all_r_names = set(x.name for x in all_regions)
paths = []
non_hole_portals = []
for portal in world.dungeon_portals[player]:
if portal.door.entrance.parent_region in all_regions:
non_hole_portals.append(portal.door.entrance.parent_region.name)
if portal.destination:
paths.append(portal.door.entrance.parent_region.name)
if world.mode[player] == 'standard' and name == 'Hyrule Castle':
paths.append('Hyrule Dungeon Cellblock')
paths.append(('Hyrule Dungeon Cellblock', 'Sanctuary'))
if world.doorShuffle[player] in ['basic'] and name == 'Thieves Town':
paths.append('Thieves Attic Window')
elif 'Thieves Attic Window' in all_r_names:
paths.append('Thieves Attic Window')
for boss in boss_path_checks:
if boss in all_r_names:
paths.append(boss)
if 'Thieves Boss' in all_r_names:
paths.append('Thieves Boss')
paths.append(('Thieves Blind\'s Cell', 'Thieves Boss'))
for drop_check in drop_path_checks:
if drop_check in all_r_names:
paths.append((drop_check, non_hole_portals))
return paths
def winnow_hangers(hangers, hooks):
removal_info = []
for hanger, door_set in hangers.items():
for door in door_set:
hook_set = hooks[hanger]
if len(hook_set) == 0:
removal_info.append((hanger, door))
else:
found_valid = False
for door_hook, crystal, orig_hanger in hook_set:
if orig_hanger != door:
found_valid = True
break
if not found_valid:
removal_info.append((hanger, door))
for hanger, door in removal_info:
hangers[hanger].remove(door)
def stonewall_valid(stonewall):
bad_door = stonewall.dest
if bad_door.blocked:
return True # great we're done with this one
loop_region = stonewall.entrance.parent_region
start_regions = [bad_door.entrance.parent_region]
if bad_door.dependents:
for dep in bad_door.dependents:
start_regions.append(dep.entrance.parent_region)
queue = deque(start_regions)
visited = set(start_regions)
while len(queue) > 0:
region = queue.popleft()
if region == loop_region:
return False # guaranteed loop
possible_entrances = list(region.entrances)
for entrance in possible_entrances:
parent = entrance.parent_region
if parent.type != RegionType.Dungeon:
return False # you can get stuck from an entrance
else:
door = entrance.door
if (door is None or (door != stonewall and not door.blocked)) and parent not in visited:
visited.add(parent)
queue.append(parent)
# we didn't find anything bad
return True
def create_graph_piece_from_state(door, o_state, b_state, proposed_map, exception):
# todo: info about dungeon events - not sure about that
graph_piece = GraphPiece()
all_unattached = {}
for exp_d in o_state.unattached_doors:
all_unattached[exp_d.door] = exp_d.crystal
for exp_d in b_state.unattached_doors:
d = exp_d.door
if d in all_unattached.keys():
if all_unattached[d] != exp_d.crystal:
if all_unattached[d] == CrystalBarrier.Orange and exp_d.crystal == CrystalBarrier.Blue:
all_unattached[d] = CrystalBarrier.Null
elif all_unattached[d] == CrystalBarrier.Blue and exp_d.crystal == CrystalBarrier.Orange:
# the swapping case
logging.getLogger('').warning('Mismatched state @ %s (o:%s b:%s)', d.name, all_unattached[d],
exp_d.crystal)
elif all_unattached[d] == CrystalBarrier.Either:
all_unattached[d] = exp_d.crystal # pessimism, and if not this, leave it alone
else:
all_unattached[exp_d.door] = exp_d.crystal
h_crystal = door.crystal if door is not None else None
for d, crystal in all_unattached.items():
if (door is None or d != door) and (not d.blocked or exception(d))and d not in proposed_map.keys():
graph_piece.hooks[d] = crystal
if d == door:
h_crystal = crystal
graph_piece.hanger_info = door
graph_piece.hanger_crystal = h_crystal
graph_piece.visited_regions.update(o_state.visited_blue)
graph_piece.visited_regions.update(o_state.visited_orange)
graph_piece.visited_regions.update(b_state.visited_blue)
graph_piece.visited_regions.update(b_state.visited_orange)
graph_piece.possible_bk_locations.update(filter_for_potential_bk_locations(o_state.bk_found))
graph_piece.possible_bk_locations.update(filter_for_potential_bk_locations(b_state.bk_found))
graph_piece.pinball_used = o_state.pinball_used or b_state.pinball_used
return graph_piece
def filter_for_potential_bk_locations(locations):
return [x for x in locations if
'- Big Chest' not in x.name and '- Prize' not in x.name and x.name not in dungeon_events
and not x.forced_item and x.name not in ['Agahnim 1', 'Agahnim 2']]
type_map = {
Hook.Stairs: Hook.Stairs,
Hook.North: Hook.South,
Hook.South: Hook.North,
Hook.West: Hook.East,
Hook.East: Hook.West
}
def opposite_h_type(h_type) -> Hook:
return type_map[h_type]
hang_dir_map = {
Direction.North: Hook.South,
Direction.South: Hook.North,
Direction.West: Hook.East,
Direction.East: Hook.West,
}
def hanger_from_door(door):
if door.type == DoorType.SpiralStairs:
return Hook.Stairs
if door.type in [DoorType.Normal, DoorType.Open, DoorType.StraightStairs, DoorType.Ladder]:
return hang_dir_map[door.direction]
return None
def connect_doors(a, b):
# Return on unsupported types.
if a.type in [DoorType.Hole, DoorType.Warp, DoorType.Interior, DoorType.Logical]:
return
# Connect supported types
if a.type in [DoorType.Normal, DoorType.SpiralStairs, DoorType.Open, DoorType.StraightStairs, DoorType.Ladder]:
if a.blocked:
connect_one_way(b.entrance, a.entrance)
elif b.blocked:
connect_one_way(a.entrance, b.entrance)
else:
connect_two_way(a.entrance, b.entrance)
dep_doors, target = [], None
if len(a.dependents) > 0:
dep_doors, target = a.dependents, b
elif len(b.dependents) > 0:
dep_doors, target = b.dependents, a
if target is not None:
target_region = target.entrance.parent_region
for dep in dep_doors:
connect_simple_door(dep, target_region)
return
# If we failed to account for a type, panic
raise RuntimeError('Unknown door type ' + a.type.name)
def connect_two_way(entrance, ext):
# if these were already connected somewhere, remove the backreference
if entrance.connected_region is not None:
entrance.connected_region.entrances.remove(entrance)
if ext.connected_region is not None:
ext.connected_region.entrances.remove(ext)
entrance.connect(ext.parent_region)
ext.connect(entrance.parent_region)
if entrance.parent_region.dungeon:
ext.parent_region.dungeon = entrance.parent_region.dungeon
x = entrance.door
y = ext.door
if x is not None:
x.dest = y
if y is not None:
y.dest = x
def connect_one_way(entrance, ext):
# if these were already connected somewhere, remove the backreference
if entrance.connected_region is not None:
entrance.connected_region.entrances.remove(entrance)
if ext.connected_region is not None:
ext.connected_region.entrances.remove(ext)
entrance.connect(ext.parent_region)
if entrance.parent_region.dungeon:
ext.parent_region.dungeon = entrance.parent_region.dungeon
x = entrance.door
y = ext.door
if x is not None:
x.dest = y
if y is not None:
y.dest = x
def connect_simple_door(exit_door, region):
exit_door.entrance.connect(region)
exit_door.dest = region
special_big_key_doors = ['Hyrule Dungeon Cellblock Door', "Thieves Blind's Cell Door"]
class ExplorationState(object):
def __init__(self, init_crystal=CrystalBarrier.Orange, dungeon=None):
self.unattached_doors = []
self.avail_doors = []
self.event_doors = []
self.visited_orange = []
self.visited_blue = []
self.events = set()
self.crystal = init_crystal
# key region stuff
self.door_krs = {}
# key validation stuff
self.small_doors = []
self.big_doors = []
self.opened_doors = []
self.big_key_opened = False
self.big_key_special = False
self.found_locations = []
self.ttl_locations = 0
self.used_locations = 0
self.key_locations = 0
self.used_smalls = 0
self.bk_found = set()
self.non_door_entrances = []
self.dungeon = dungeon
self.pinball_used = False
def copy(self):
ret = ExplorationState(dungeon=self.dungeon)
ret.unattached_doors = list(self.unattached_doors)
ret.avail_doors = list(self.avail_doors)
ret.event_doors = list(self.event_doors)
ret.visited_orange = list(self.visited_orange)
ret.visited_blue = list(self.visited_blue)
ret.events = set(self.events)
ret.crystal = self.crystal
ret.door_krs = self.door_krs.copy()
ret.small_doors = list(self.small_doors)
ret.big_doors = list(self.big_doors)
ret.opened_doors = list(self.opened_doors)
ret.big_key_opened = self.big_key_opened
ret.big_key_special = self.big_key_special
ret.ttl_locations = self.ttl_locations
ret.key_locations = self.key_locations
ret.used_locations = self.used_locations
ret.used_smalls = self.used_smalls
ret.found_locations = list(self.found_locations)
ret.bk_found = set(self.bk_found)
ret.non_door_entrances = list(self.non_door_entrances)
ret.dungeon = self.dungeon
ret.pinball_used = self.pinball_used
return ret
def next_avail_door(self):
self.avail_doors.sort(key=lambda x: 0 if x.flag else 1 if x.door.bigKey else 2)
exp_door = self.avail_doors.pop()
self.crystal = exp_door.crystal
return exp_door
def visit_region(self, region, key_region=None, key_checks=False, bk_Flag=False):
if self.crystal == CrystalBarrier.Either:
if region not in self.visited_blue:
self.visited_blue.append(region)
if region not in self.visited_orange:
self.visited_orange.append(region)
elif self.crystal == CrystalBarrier.Orange:
self.visited_orange.append(region)
elif self.crystal == CrystalBarrier.Blue:
self.visited_blue.append(region)
if region.type == RegionType.Dungeon:
for location in region.locations:
if key_checks and location not in self.found_locations:
if location.forced_item and 'Small Key' in location.item.name:
self.key_locations += 1
if location.name not in dungeon_events and '- Prize' not in location.name and location.name not in ['Agahnim 1', 'Agahnim 2']:
self.ttl_locations += 1
if location not in self.found_locations: # todo: special logic for TT Boss?
self.found_locations.append(location)
if not bk_Flag:
self.bk_found.add(location)
if location.name in dungeon_events and location.name not in self.events:
if self.flooded_key_check(location):
self.perform_event(location.name, key_region)
if location.name in flooded_keys_reverse.keys() and self.location_found(
flooded_keys_reverse[location.name]):
self.perform_event(flooded_keys_reverse[location.name], key_region)
def flooded_key_check(self, location):
if location.name not in flooded_keys.keys():
return True
return flooded_keys[location.name] in [x.name for x in self.found_locations]
def location_found(self, location_name):
for l in self.found_locations:
if l.name == location_name:
return True
return False
def perform_event(self, location_name, key_region):
self.events.add(location_name)
queue = collections.deque(self.event_doors)
while len(queue) > 0:
exp_door = queue.popleft()
if exp_door.door.req_event == location_name:
self.avail_doors.append(exp_door)
self.event_doors.remove(exp_door)
if key_region is not None:
d_name = exp_door.door.name
if d_name not in self.door_krs.keys():
self.door_krs[d_name] = key_region
def add_all_entrance_doors_check_unattached(self, region, world, player):
door_list = [x for x in get_doors(world, region, player) if x.type in [DoorType.Normal, DoorType.SpiralStairs]]
door_list.extend(get_entrance_doors(world, region, player))
for door in door_list:
if self.can_traverse(door):
if door.dest is None and not self.in_door_list_ic(door, self.unattached_doors):
self.append_door_to_list(door, self.unattached_doors)
elif door.req_event is not None and door.req_event not in self.events and not self.in_door_list(door,
self.event_doors):
self.append_door_to_list(door, self.event_doors)
elif not self.in_door_list(door, self.avail_doors):
self.append_door_to_list(door, self.avail_doors)
for entrance in region.entrances:
door = world.check_for_door(entrance.name, player)
if door is None:
self.non_door_entrances.append(entrance)
def add_all_doors_check_unattached(self, region, world, player):
for door in get_doors(world, region, player):
if self.can_traverse(door):
if door.controller is not None:
door = door.controller
if door.dest is None and not self.in_door_list_ic(door, self.unattached_doors):
self.append_door_to_list(door, self.unattached_doors)
elif door.req_event is not None and door.req_event not in self.events and not self.in_door_list(door,
self.event_doors):
self.append_door_to_list(door, self.event_doors)
elif not self.in_door_list(door, self.avail_doors):
self.append_door_to_list(door, self.avail_doors)
def add_all_doors_check_proposed(self, region, proposed_map, valid_doors, flag, world, player, exception):
for door in get_doors(world, region, player):
if door.blocked and exception(door):
self.pinball_used = True
if self.can_traverse(door, exception):
if door.controller is not None:
door = door.controller
if door.dest is None and door not in proposed_map.keys() and door.name in valid_doors.keys():
if not self.in_door_list_ic(door, self.unattached_doors):
self.append_door_to_list(door, self.unattached_doors, flag)
else:
other = self.find_door_in_list(door, self.unattached_doors)
if self.crystal != other.crystal:
other.crystal = CrystalBarrier.Either
elif door.req_event is not None and door.req_event not in self.events and not self.in_door_list(door,
self.event_doors):
self.append_door_to_list(door, self.event_doors, flag)
elif not self.in_door_list(door, self.avail_doors):
self.append_door_to_list(door, self.avail_doors, flag)
def add_all_doors_check_key_region(self, region, key_region, world, player):
for door in get_doors(world, region, player):
if self.can_traverse(door):
if door.req_event is not None and door.req_event not in self.events and not self.in_door_list(door,
self.event_doors):
self.append_door_to_list(door, self.event_doors)
elif not self.in_door_list(door, self.avail_doors):
self.append_door_to_list(door, self.avail_doors)
if door.name not in self.door_krs.keys():
self.door_krs[door.name] = key_region
else:
if door.name not in self.door_krs.keys():
self.door_krs[door.name] = key_region
def add_all_doors_check_keys(self, region, key_door_proposal, world, player):
for door in get_doors(world, region, player):
if self.can_traverse(door):
if door.controller:
door = door.controller
if door in key_door_proposal and door not in self.opened_doors:
if not self.in_door_list(door, self.small_doors):
self.append_door_to_list(door, self.small_doors)
elif (door.bigKey or door.name in special_big_key_doors) and not self.big_key_opened:
if not self.in_door_list(door, self.big_doors):
self.append_door_to_list(door, self.big_doors)
elif door.req_event is not None and door.req_event not in self.events:
if not self.in_door_list(door, self.event_doors):
self.append_door_to_list(door, self.event_doors)
elif not self.in_door_list(door, self.avail_doors):
self.append_door_to_list(door, self.avail_doors)
def visited(self, region):
if self.crystal == CrystalBarrier.Either:
return region in self.visited_blue and region in self.visited_orange
elif self.crystal == CrystalBarrier.Orange:
return region in self.visited_orange
elif self.crystal == CrystalBarrier.Blue:
return region in self.visited_blue
return False
def visited_at_all(self, region):
return region in self.visited_blue or region in self.visited_orange
def found_forced_bk(self):
for location in self.found_locations:
if location.forced_big_key():
return True
return False
def can_traverse(self, door, exception=None):
if door.blocked:
return exception(door) if exception else False
if door.crystal not in [CrystalBarrier.Null, CrystalBarrier.Either]:
return self.crystal == CrystalBarrier.Either or door.crystal == self.crystal
return True
def count_locations_exclude_specials(self):
cnt = 0
for loc in self.found_locations:
if '- Big Chest' not in loc.name and '- Prize' not in loc.name and loc.name not in dungeon_events and not loc.forced_item:
cnt += 1
return cnt
def validate(self, door, region, world, player):
return self.can_traverse(door) and not self.visited(region) and valid_region_to_explore(region, self.dungeon,
world, player)
def in_door_list(self, door, door_list):
for d in door_list:
if d.door == door and d.crystal == self.crystal:
return True
return False
@staticmethod
def in_door_list_ic(door, door_list):
for d in door_list:
if d.door == door:
return True
return False
@staticmethod
def find_door_in_list(door, door_list):
for d in door_list:
if d.door == door:
return d
return None
def append_door_to_list(self, door, door_list, flag=False):
if door.crystal == CrystalBarrier.Null:
door_list.append(ExplorableDoor(door, self.crystal, flag))
else:
door_list.append(ExplorableDoor(door, door.crystal, flag))
def key_door_sort(self, d):
if d.door.smallKey:
if d.door in self.opened_doors:
return 1
else:
return 0
return 2
class ExplorableDoor(object):
def __init__(self, door, crystal, flag):
self.door = door
self.crystal = crystal
self.flag = flag
def __str__(self):
return str(self.__unicode__())
def __unicode__(self):
return '%s (%s)' % (self.door.name, self.crystal.name)
def extend_reachable_state_improved(search_regions, state, proposed_map, all_regions, valid_doors, bk_flag, world, player, exception):
local_state = state.copy()
for region in search_regions:
local_state.visit_region(region)
local_state.add_all_doors_check_proposed(region, proposed_map, valid_doors, False, world, player, exception)
while len(local_state.avail_doors) > 0:
explorable_door = local_state.next_avail_door()
if explorable_door.door.bigKey:
if bk_flag:
big_not_found = not special_big_key_found(local_state) if local_state.big_key_special else local_state.count_locations_exclude_specials() == 0
if big_not_found:
continue # we can't open this door
if explorable_door.door in proposed_map:
connect_region = world.get_entrance(proposed_map[explorable_door.door].name, player).parent_region
else:
connect_region = world.get_entrance(explorable_door.door.name, player).connected_region
if connect_region is not None:
if valid_region_to_explore_in_regions(connect_region, all_regions, world, player) and not local_state.visited(
connect_region):
flag = explorable_door.flag or explorable_door.door.bigKey
local_state.visit_region(connect_region, bk_Flag=flag)
local_state.add_all_doors_check_proposed(connect_region, proposed_map, valid_doors, flag, world, player, exception)
return local_state
def special_big_key_found(state):
for location in state.found_locations:
if location.forced_item and location.forced_item.bigkey:
return True
return False
def valid_region_to_explore_in_regions(region, all_regions, world, player):
if region is None:
return False
return (region.type == RegionType.Dungeon and region in all_regions)\
or region.name in world.inaccessible_regions[player]\
or (region.name == 'Hyrule Castle Ledge' and world.mode[player] == 'standard')
# cross-utility methods
def valid_region_to_explore(region, name, world, player):
if region is None:
return False
return (region.type == RegionType.Dungeon and region.dungeon.name in name)\
or region.name in world.inaccessible_regions[player]\
or (region.name == 'Hyrule Castle Ledge' and world.mode[player] == 'standard')
def get_doors(world, region, player):
res = []
for ext in region.exits:
door = world.check_for_door(ext.name, player)
if door is not None:
res.append(door)
return res
def get_dungeon_doors(region, world, player):
res = []
for ext in region.exits:
door = world.check_for_door(ext.name, player)
if door is not None and ext.parent_region.type == RegionType.Dungeon:
res.append(door)
return res
def get_entrance_doors(world, region, player):
res = []
for ext in region.entrances:
door = world.check_for_door(ext.name, player)
if door is not None:
res.append(door)
return res
def convert_regions(region_names, world, player):
region_list = []
for name in region_names:
region_list.append(world.get_region(name, player))
return region_list
# Begin crossed mode sector shuffle
class DungeonBuilder(object):
def __init__(self, name):
self.name = name
self.sectors = []
self.location_cnt = 0
self.key_drop_cnt = 0
self.bk_required = False
self.bk_provided = False
self.c_switch_required = False
self.c_switch_present = False
self.c_locked = False
self.dead_ends = 0
self.branches = 0
self.forced_loops = 0
self.total_conn_lack = 0
self.conn_needed = defaultdict(int)
self.conn_supplied = defaultdict(int)
self.conn_balance = defaultdict(int)
self.mag_needed = {}
self.unfulfilled = defaultdict(int)
self.all_entrances = None # used for sector segregation/branching
self.entrance_list = None # used for overworld accessibility
self.layout_starts = None # used for overworld accessibility
self.master_sector = None
self.path_entrances = None # used for pathing/key doors, I think
self.split_flag = False
self.pre_open_stonewalls = set() # used by stonewall system
self.candidates = None
self.key_doors_num = None
self.combo_size = None
self.flex = 0
self.key_door_proposal = None
self.allowance = None
if 'Stonewall' in name:
self.allowance = 1
elif 'Prewall' in name:
orig_name = name[:-8]
if orig_name in dungeon_dead_end_allowance.keys():
self.allowance = dungeon_dead_end_allowance[orig_name]
if self.allowance is None:
self.allowance = 1
self.valid_proposal = None
self.split_dungeon_map = None
self.exception_list = []
def polarity_complement(self):
pol = Polarity()
for sector in self.sectors:
pol += sector.polarity()
return pol.complement()
def polarity(self):
pol = Polarity()
for sector in self.sectors:
pol += sector.polarity()
return pol
def __str__(self):
return str(self.__unicode__())
def __unicode__(self):
return '%s' % self.name
def simple_dungeon_builder(name, sector_list):
define_sector_features(sector_list)
builder = DungeonBuilder(name)
dummy_pool = dict.fromkeys(sector_list)
global_pole = GlobalPolarity(dummy_pool)
for sector in sector_list:
assign_sector(sector, builder, dummy_pool, global_pole)
return builder
def create_dungeon_builders(all_sectors, connections_tuple, world, player,
dungeon_entrances=None, split_dungeon_entrances=None):
logger = logging.getLogger('')
logger.info('Shuffling Dungeon Sectors')
if dungeon_entrances is None:
dungeon_entrances = default_dungeon_entrances
if split_dungeon_entrances is None:
split_dungeon_entrances = split_region_starts
define_sector_features(all_sectors)
finished, dungeon_map, attempts = False, {}, 0
while not finished:
candidate_sectors = dict.fromkeys(all_sectors)
global_pole = GlobalPolarity(candidate_sectors)
dungeon_map = {}
for key in dungeon_regions.keys():
dungeon_map[key] = DungeonBuilder(key)
for key in dungeon_boss_sectors.keys():
current_dungeon = dungeon_map[key]
for r_name in dungeon_boss_sectors[key]:
assign_sector(find_sector(r_name, candidate_sectors), current_dungeon, candidate_sectors, global_pole)
if key == 'Hyrule Castle' and world.mode[player] == 'standard':
for r_name in ['Hyrule Dungeon Cellblock', 'Sanctuary']: # need to deliver zelda
assign_sector(find_sector(r_name, candidate_sectors), current_dungeon,
candidate_sectors, global_pole)
entrances_map, potentials, connections = connections_tuple
accessible_sectors, reverse_d_map = set(), {}
for key in dungeon_entrances.keys():
current_dungeon = dungeon_map[key]
current_dungeon.all_entrances = dungeon_entrances[key]
for r_name in current_dungeon.all_entrances:
sector = find_sector(r_name, candidate_sectors)
assign_sector(sector, current_dungeon, candidate_sectors, global_pole)
if r_name in entrances_map[key]:
if sector:
accessible_sectors.add(sector)
else:
if not sector:
sector = find_sector(r_name, all_sectors)
reverse_d_map[sector] = key
if world.mode[player] == 'standard':
current_dungeon = dungeon_map['Hyrule Castle']
standard_stair_check(dungeon_map, current_dungeon, candidate_sectors, global_pole)
complete_dungeons = {x: y for x, y in dungeon_map.items() if sum(len(sector.outstanding_doors) for sector in y.sectors) <= 0}
[dungeon_map.pop(key) for key in complete_dungeons.keys()]
# categorize sectors
identify_destination_sectors(accessible_sectors, reverse_d_map, dungeon_map, connections,
dungeon_entrances, split_dungeon_entrances)
for name, builder in dungeon_map.items():
calc_allowance_and_dead_ends(builder, connections_tuple, world, player)
if world.mode[player] == 'open' and world.shuffle[player] not in ['crossed', 'insanity']:
sanc = find_sector('Sanctuary', candidate_sectors)
if sanc: # only run if sanc if a candidate
lw_builders = []
for name, portal_list in dungeon_portals.items():
for portal_name in portal_list:
if world.get_portal(portal_name, player).light_world:
lw_builders.append(dungeon_map[name])
break
# portals only - not drops for mirror stuff
sanc_builder = random.choice(lw_builders)
assign_sector(sanc, sanc_builder, candidate_sectors, global_pole)
free_location_sectors = {}
crystal_switches = {}
crystal_barriers = {}
polarized_sectors = {}
neutral_sectors = {}
for sector in candidate_sectors:
if sector.chest_locations > 0:
free_location_sectors[sector] = None
elif sector.c_switch:
crystal_switches[sector] = None
elif sector.blue_barrier:
crystal_barriers[sector] = None
elif sector.polarity().is_neutral():
neutral_sectors[sector] = None
else:
polarized_sectors[sector] = None
assign_location_sectors(dungeon_map, free_location_sectors, global_pole)
leftover = assign_crystal_switch_sectors(dungeon_map, crystal_switches, crystal_barriers, global_pole)
ensure_crystal_switches_reachable(dungeon_map, leftover, polarized_sectors, crystal_barriers, global_pole)
for sector in leftover:
if sector.polarity().is_neutral():
neutral_sectors[sector] = None
else:
polarized_sectors[sector] = None
# blue barriers
assign_crystal_barrier_sectors(dungeon_map, crystal_barriers, global_pole)
try:
# polarity:
if not global_pole.is_valid(dungeon_map):
# restart
raise NeutralizingException('Either free location/crystal assignment is already globally invalid')
logger.info(world.fish.translate("cli", "cli", "balance.doors"))
builder_info = dungeon_entrances, split_dungeon_entrances, connections_tuple, world, player
assign_polarized_sectors(dungeon_map, polarized_sectors, global_pole, builder_info)
# the rest
assign_the_rest(dungeon_map, neutral_sectors, global_pole, builder_info)
dungeon_map.update(complete_dungeons)
finished = True
except (NeutralizingException, GenerationException) as e:
attempts += 1
logger.debug(f'Attempt {attempts} failed with {str(e)}')
if attempts >= 10:
raise Exception('Could not find a valid seed quickly, something is likely horribly wrong.', e)
return dungeon_map
def standard_stair_check(dungeon_map, dungeon, candidate_sectors, global_pole):
# this is because there must be at least one non-dead stairway in hc to get out
# this check may not be necessary
filtered_sectors = [x for x in candidate_sectors if any(y for y in x.outstanding_doors if not y.dead and y.type == DoorType.SpiralStairs)]
valid = False
while not valid:
chosen_sector = random.choice(filtered_sectors)
filtered_sectors.remove(chosen_sector)
valid = global_pole.is_valid_choice(dungeon_map, dungeon, [chosen_sector])
if valid:
assign_sector(chosen_sector, dungeon, candidate_sectors, global_pole)
def identify_destination_sectors(accessible_sectors, reverse_d_map, dungeon_map, connections, dungeon_entrances, split_dungeon_entrances):
accessible_overworld, found_connections, explored = set(), set(), False
while not explored:
explored = True
for ent_name, region in connections.items():
if ent_name in found_connections:
continue
sector = find_sector(ent_name, reverse_d_map.keys())
if sector in accessible_sectors:
found_connections.add(ent_name)
accessible_overworld.add(region) # todo: drops don't give ow access
explored = False
elif region in accessible_overworld:
found_connections.add(ent_name)
accessible_sectors.add(sector)
explored = False
else:
d_name = reverse_d_map[sector]
if d_name not in split_dungeon_entrances:
for r_name in dungeon_entrances[d_name]:
ent_sector = find_sector(r_name, dungeon_map[d_name].sectors)
if ent_sector in accessible_sectors and ent_name not in dead_entrances:
sector.destination_entrance = True
found_connections.add(ent_name)
accessible_sectors.add(sector)
accessible_overworld.add(region)
explored = False
break
elif d_name in split_dungeon_entrances.keys():
split_section = None
for split_name, split_list in split_dungeon_entrances[d_name].items():
if ent_name in split_list:
split_section = split_name
break
if split_section:
for r_name in split_dungeon_entrances[d_name][split_section]:
ent_sector = find_sector(r_name, dungeon_map[d_name].sectors)
if ent_sector in accessible_sectors and ent_name not in dead_entrances:
sector.destination_entrance = True
found_connections.add(ent_name)
accessible_sectors.add(sector)
accessible_overworld.add(region)
explored = False
break
# todo: split version that adds allowance for potential entrances
def calc_allowance_and_dead_ends(builder, connections_tuple, world, player):
portals = world.dungeon_portals[player]
entrances_map, potentials, connections = connections_tuple
name = builder.name if not builder.split_flag else builder.name.rsplit(' ', 1)[0]
needed_connections = [x for x in builder.all_entrances if x not in entrances_map[name]]
starting_allowance = 0
used_sectors = set()
destination_entrances = [x.door.entrance.parent_region.name for x in portals if x.destination]
dead_ends = [x.door.entrance.parent_region.name for x in portals if x.deadEnd]
for entrance in entrances_map[name]:
sector = find_sector(entrance, builder.sectors)
if sector:
outflow_target = 0 if entrance not in drop_entrances_allowance else 1
if sector not in used_sectors and (sector.adj_outflow() > outflow_target or entrance in dead_ends):
if entrance not in destination_entrances:
starting_allowance += 1
else:
builder.branches -= 1
used_sectors.add(sector)
elif sector not in used_sectors:
if entrance in destination_entrances and sector.branches() > 0:
builder.branches -= 1
if entrance not in drop_entrances_allowance:
needed_connections.append(entrance)
builder.allowance = starting_allowance
for entrance in needed_connections:
sector = find_sector(entrance, builder.sectors)
if sector and sector not in used_sectors: # ignore things on same sector
is_destination = entrance in destination_entrances
connect_able = False
if entrance in connections.keys():
enabling_region = connections[entrance]
check_list = list(potentials[enabling_region])
if enabling_region.name in ['Desert Ledge', 'Desert Palace Entrance (North) Spot']:
alternate = 'Desert Palace Entrance (North) Spot' if enabling_region.name == 'Desert Ledge' else 'Desert Ledge'
if world.get_region(alternate, player) in potentials:
check_list.extend(potentials[world.get_region(alternate, player)])
connecting_entrances = [x for x in check_list if x != entrance and x not in dead_entrances and x not in drop_entrances_allowance]
connect_able = len(connecting_entrances) > 0
if is_destination and sector.branches() == 0: #
builder.dead_ends += 1
if is_destination and sector.branches() > 0:
builder.branches -= 1
if connect_able and not is_destination:
builder.allowance += 1
used_sectors.add(sector)
def define_sector_features(sectors):
for sector in sectors:
for region in sector.regions:
for loc in region.locations:
if '- Prize' in loc.name or loc.name in ['Agahnim 1', 'Agahnim 2']:
pass
elif loc.forced_item and 'Small Key' in loc.item.name:
sector.key_only_locations += 1
elif loc.forced_item and loc.forced_item.bigkey:
sector.bk_provided = True
elif loc.name not in dungeon_events and not loc.forced_item:
sector.chest_locations += 1
if '- Big Chest' in loc.name or loc.name in ["Hyrule Castle - Zelda's Chest",
"Thieves' Town - Blind's Cell"]:
sector.bk_required = True
for ext in region.exits:
door = ext.door
if door is not None:
if door.crystal == CrystalBarrier.Either:
sector.c_switch = True
elif door.crystal == CrystalBarrier.Orange:
sector.orange_barrier = True
elif door.crystal == CrystalBarrier.Blue:
sector.blue_barrier = True
if door.bigKey:
sector.bk_required = True
def assign_sector(sector, dungeon, candidate_sectors, global_pole):
if sector:
del candidate_sectors[sector]
global_pole.consume(sector)
assign_sector_helper(sector, dungeon)
def assign_sector_helper(sector, builder):
builder.sectors.append(sector)
builder.location_cnt += sector.chest_locations
builder.key_drop_cnt += sector.key_only_locations
if sector.c_switch:
builder.c_switch_present = True
if sector.blue_barrier:
builder.c_switch_required = True
if sector.bk_required:
builder.bk_required = True
if sector.bk_provided:
builder.bk_provided = True
count_conn_needed_supplied(sector, builder.conn_needed, builder.conn_supplied)
builder.dead_ends += sector.dead_ends()
builder.branches += sector.branches()
if sector in builder.exception_list:
builder.exception_list.remove(sector)
else:
if builder.split_dungeon_map:
builder.split_dungeon_map = None
if builder.valid_proposal:
builder.valid_proposal = None
def count_conn_needed_supplied(sector, conn_needed, conn_supplied):
for door in sector.outstanding_doors:
# todo: destination sectors like skull 2 west should be
if (door.blocked or door.dead or sector.adj_outflow() <= 1) and not sector.is_entrance_sector():
conn_needed[hook_from_door(door)] += 1
# todo: stonewall
else: # todo: dungeons that need connections... skull, tr, hc, desert (when edges are done)
conn_supplied[hanger_from_door(door)] += 1
def find_sector(r_name, sectors):
for s in sectors:
if r_name in s.region_set():
return s
return None
def assign_location_sectors(dungeon_map, free_location_sectors, global_pole):
valid = False
choices = None
sector_list = list(free_location_sectors)
random.shuffle(sector_list)
while not valid:
choices, d_idx, totals = weighted_random_locations(dungeon_map, sector_list)
for i, sector in enumerate(sector_list):
choice = d_idx[choices[i].name]
totals[choice] += sector.chest_locations
valid = True
for d_name, idx in d_idx.items():
if totals[idx] < 5: # min locations for dungeons is 5 (bk exception)
valid = False
break
for i, choice in enumerate(choices):
builder = dungeon_map[choice.name]
assign_sector(sector_list[i], builder, free_location_sectors, global_pole)
def weighted_random_locations(dungeon_map, free_location_sectors):
population = []
ttl_assigned = 0
weights = []
totals = []
d_idx = {}
for i, dungeon_builder in enumerate(dungeon_map.values()):
population.append(dungeon_builder)
totals.append(dungeon_builder.location_cnt)
ttl_assigned += dungeon_builder.location_cnt
weights.append(6.375)
d_idx[dungeon_builder.name] = i
average = ttl_assigned / 13
for i, db in enumerate(population):
if db.location_cnt < average:
weights[i] += average - db.location_cnt
if db.location_cnt > average:
weights[i] = max(0, weights[i] - db.location_cnt + average)
choices = random.choices(population, weights, k=len(free_location_sectors))
return choices, d_idx, totals
def assign_crystal_switch_sectors(dungeon_map, crystal_switches, crystal_barriers, global_pole, assign_one=False):
population = []
some_c_switches_present = False
for name, builder in dungeon_map.items():
if builder.c_switch_required and not builder.c_switch_present and not builder.c_locked:
population.append(name)
if builder.c_switch_present and not builder.c_locked:
some_c_switches_present = True
if len(population) == 0: # nothing needs a switch
if assign_one and not some_c_switches_present: # something should have one
if len(crystal_switches) == 0:
raise GenerationException('No crystal switches to assign. Ref %s' % next(iter(dungeon_map.keys())))
valid, builder_choice, switch_choice = False, None, None
switch_candidates = list(crystal_switches)
switch_choice = random.choice(switch_candidates)
switch_candidates.remove(switch_choice)
builder_candidates = [name for name, builder in dungeon_map.items() if not builder.c_locked]
while not valid:
if len(builder_candidates) == 0:
if len(switch_candidates) == 0:
raise GenerationException('No where to assign crystal switch. Ref %s' % next(iter(dungeon_map.keys())))
switch_choice = random.choice(switch_candidates)
switch_candidates.remove(switch_choice)
builder_candidates = list(dungeon_map.keys())
choice = random.choice(builder_candidates)
builder_candidates.remove(choice)
builder_choice = dungeon_map[choice]
test_set = [switch_choice]
test_set.extend(crystal_barriers)
valid = global_pole.is_valid_choice(dungeon_map, builder_choice, test_set)
assign_sector(switch_choice, builder_choice, crystal_switches, global_pole)
return crystal_switches
if len(crystal_switches) == 0:
raise GenerationException('No crystal switches to assign')
sector_list = list(crystal_switches)
if len(population) > len(sector_list):
raise GenerationException('Not enough crystal switch sectors for those needed')
choices = random.sample(sector_list, k=len(population))
for i, choice in enumerate(choices):
builder = dungeon_map[population[i]]
assign_sector(choice, builder, crystal_switches, global_pole)
return crystal_switches
def ensure_crystal_switches_reachable(dungeon_map, crystal_switches, polarized_sectors, crystal_barriers, global_pole):
invalid_builders = []
for name, builder in dungeon_map.items():
if builder.c_switch_present and builder.c_switch_required and not builder.c_locked:
invalid_builders.append(builder)
while len(invalid_builders) > 0:
valid_builders = []
for builder in invalid_builders:
entrance_sectors = []
reachable_crystals = defaultdict()
for sector in builder.sectors:
if sector.equations is None:
sector.equations = calc_sector_equations(sector)
if sector.is_entrance_sector() and not sector.destination_entrance:
need_switch = True
for region in sector.get_start_regions():
if region.crystal_switch:
need_switch = False
break
any_benefit = False
for eq in sector.equations:
if len(eq.benefit) > 0:
any_benefit = True
break
if need_switch and any_benefit:
entrance_sectors.append(sector)
for eq in sector.equations:
if eq.c_switch:
reachable_crystals[hook_from_door(eq.door)] = True
valid_ent_sectors = []
for entrance_sector in entrance_sectors:
other_sectors = [x for x in builder.sectors if x != entrance_sector]
reachable, access = is_c_switch_reachable(entrance_sector, reachable_crystals, other_sectors)
if reachable:
valid_ent_sectors.append(entrance_sector)
else:
candidates = {}
for c in find_pol_cand_for_c_switch(access, reachable_crystals, polarized_sectors):
candidates[c] = 'Polarized'
for c in find_crystal_cand(access, crystal_switches):
candidates[c] = 'Crystals'
for c in find_pol_cand_for_c_switch(access, reachable_crystals, crystal_barriers):
candidates[c] = 'Barriers'
valid, sector, which_list = False, None, None
while not valid:
if len(candidates) <= 0:
raise GenerationException(f'need to provide more sophisticated crystal connection for {entrance_sector}')
sector, which_list = random.choice(list(candidates.items()))
del candidates[sector]
valid = global_pole.is_valid_choice(dungeon_map, builder, [sector])
if which_list == 'Polarized':
assign_sector(sector, builder, polarized_sectors, global_pole)
elif which_list == 'Crystals':
assign_sector(sector, builder, crystal_switches, global_pole)
elif which_list == 'Barriers':
assign_sector(sector, builder, crystal_barriers, global_pole)
entrance_sectors = [x for x in entrance_sectors if x not in valid_ent_sectors]
if len(entrance_sectors) == 0:
valid_builders.append(builder)
invalid_builders = [x for x in invalid_builders if x not in valid_builders]
def is_c_switch_reachable(entrance_sector, reachable_crystals, other_sectors):
current_access = {}
for eq in entrance_sector.equations:
if eq.total_cost() <= 0:
for key, door_list in eq.benefit.items():
for door in door_list:
if door not in eq.crystal_blocked.keys() or eq.crystal_blocked[door] != CrystalBarrier.Blue:
current_access[key] = True
break
for key, flag in current_access.items():
if opposite_h_type(key) in reachable_crystals.keys():
return True, {}
changed = True
while changed:
changed = False
for sector in other_sectors:
for eq in sector.equations:
key, cost_door = eq.cost
if key in current_access.keys() and current_access[key]:
for bene_key, door_list in eq.benefit.items():
for door in door_list:
block_dict = eq.crystal_blocked
if door not in block_dict.keys() or block_dict[door] != CrystalBarrier.Blue:
if bene_key not in current_access.keys():
current_access[bene_key] = True
changed = True
break
for key, flag in current_access.items():
if opposite_h_type(key) in reachable_crystals.keys():
return True, {}
return False, current_access
def find_pol_cand_for_c_switch(access, reachable_crystals, polarized_candidates):
candidates = []
for sector in polarized_candidates:
if pol_cand_matches_access_reach(sector, access, reachable_crystals):
candidates.append(sector)
return candidates
def pol_cand_matches_access_reach(sector, access, reachable_crystals):
if sector.equations is None:
sector.equations = calc_sector_equations(sector)
for eq in sector.equations:
key, cost_door = eq.cost
if key in access.keys() and access[key]:
for bene_key, door_list in eq.benefit.items():
for door in door_list:
if door not in eq.crystal_blocked.keys() or eq.crystal_blocked[door] != CrystalBarrier.Blue:
if opposite_h_type(bene_key) in reachable_crystals.keys():
return True
return False
def find_crystal_cand(access, crystal_switches):
candidates = []
for sector in crystal_switches:
if crystal_cand_matches_access(sector, access):
candidates.append(sector)
return candidates
def crystal_cand_matches_access(sector, access):
if sector.equations is None:
sector.equations = calc_sector_equations(sector)
for eq in sector.equations:
key, cost_door = eq.cost
if key in access.keys() and access[key] and eq.c_switch and len(sector.outstanding_doors) > 1:
return True
return False
def assign_crystal_barrier_sectors(dungeon_map, crystal_barriers, global_pole):
population = []
for name, builder in dungeon_map.items():
if builder.c_switch_present and not builder.c_locked:
population.append(name)
sector_list = list(crystal_barriers)
random.shuffle(sector_list)
choices = random.choices(population, k=len(sector_list))
for i, choice in enumerate(choices):
builder = dungeon_map[choice]
assign_sector(sector_list[i], builder, crystal_barriers, global_pole)
def identify_polarity_issues(dungeon_map):
unconnected_builders = {}
for name, builder in dungeon_map.items():
identify_polarity_issues_internal(name, builder, unconnected_builders)
return unconnected_builders
def identify_polarity_issues_internal(name, builder, unconnected_builders):
if len(builder.sectors) == 1:
return
else:
def sector_filter(x, y):
return x != y
# else:
# def sector_filter(x, y):
# return x != y and (x.outflow() > 1 or is_entrance_sector(builder, x))
connection_flags = {}
for slot in PolSlot:
connection_flags[slot] = {}
for slot2 in PolSlot:
connection_flags[slot][slot2] = False
for sector in builder.sectors:
others = [x for x in builder.sectors if sector_filter(x, sector)]
other_mag = sum_magnitude(others)
sector_mag = sector.magnitude()
check_flags(sector_mag, connection_flags)
unconnected_sector = True
for i in PolSlot:
if sector_mag[i.value] == 0 or other_mag[i.value] > 0 or self_connecting(sector, i, sector_mag):
unconnected_sector = False
break
if unconnected_sector:
for i in PolSlot:
if sector_mag[i.value] > 0 and other_mag[i.value] == 0 and not self_connecting(sector, i, sector_mag):
builder.mag_needed[i] = [x for x in PolSlot if other_mag[x.value] > 0]
if name not in unconnected_builders.keys():
unconnected_builders[name] = builder
ttl_mag = sum_magnitude(builder.sectors)
for slot in PolSlot:
for slot2 in PolSlot:
if ttl_mag[slot.value] > 0 and ttl_mag[slot2.value] > 0 and not connection_flags[slot][slot2]:
builder.mag_needed[slot] = [slot2]
builder.mag_needed[slot2] = [slot]
if name not in unconnected_builders.keys():
unconnected_builders[name] = builder
def self_connecting(sector, slot, magnitude):
return sector.polarity()[slot.value] == 0 and sum(magnitude) > magnitude[slot.value]
def check_flags(sector_mag, connection_flags):
for slot in PolSlot:
for slot2 in PolSlot:
if sector_mag[slot.value] > 0 and sector_mag[slot2.value] > 0:
connection_flags[slot][slot2] = True
if slot != slot2:
for check_slot in PolSlot: # transitivity check
if check_slot not in [slot, slot2] and connection_flags[slot2][check_slot]:
connection_flags[slot][check_slot] = True
connection_flags[check_slot][slot] = True
def identify_simple_branching_issues(dungeon_map):
problem_builders = {}
for name, builder in dungeon_map.items():
if name == 'Skull Woods 2': # i dislike this special case todo: identify destination entrances
builder.conn_supplied[Hook.West] += 1
builder.conn_needed[Hook.East] -= 1
builder.forced_loops = calc_forced_loops(builder.sectors)
if builder.dead_ends + builder.forced_loops * 2 > builder.branches + builder.allowance:
problem_builders[name] = builder
for h_type in Hook:
lack = builder.conn_balance[h_type] = builder.conn_supplied[h_type] - builder.conn_needed[h_type]
if lack < 0:
builder.total_conn_lack += -lack
problem_builders[name] = builder
return problem_builders
def calc_forced_loops(sector_list):
forced_loops = 0
for sector in sector_list:
h_mag = sector.hook_magnitude()
other_sectors = [x for x in sector_list if x != sector]
other_mag = sum_hook_magnitude(other_sectors)
loop_parts = 0
for hook in Hook:
opp = opposite_h_type(hook).value
if h_mag[hook.value] > other_mag[opp] and loop_present(hook, opp, h_mag, other_mag):
loop_parts += (h_mag[hook.value] - other_mag[opp]) / 2
forced_loops += math.floor(loop_parts)
return forced_loops
def loop_present(hook, opp, h_mag, other_mag):
if hook == Hook.Stairs:
return h_mag[hook.value] - other_mag[opp] >= 2
else:
return h_mag[opp] >= h_mag[hook.value] - other_mag[opp]
def is_satisfied(door_dict_list):
for door_dict in door_dict_list:
for door_list in door_dict.values():
if len(door_list) > 0:
return False
return True
# todo: maybe filter by used doors too
# todo: I want the number of door that match is accessible by still
def filter_match_deps(candidate, match_deps):
return [x for x in match_deps if x != candidate]
def sum_magnitude(sector_list):
result = [0] * len(PolSlot)
for sector in sector_list:
vector = sector.magnitude()
for i in range(len(result)):
result[i] = result[i] + vector[i]
return result
def sum_hook_magnitude(sector_list):
result = [0] * len(Hook)
for sector in sector_list:
vector = sector.hook_magnitude()
for i in range(len(result)):
result[i] = result[i] + vector[i]
return result
def sum_polarity(sector_list):
pol = Polarity()
for sector in sector_list:
pol += sector.polarity()
return pol
def assign_polarized_sectors(dungeon_map, polarized_sectors, global_pole, builder_info):
# step 1: fix polarity connection issues
unconnected_builders = identify_polarity_issues(dungeon_map)
while len(unconnected_builders) > 0:
for name, builder in unconnected_builders.items():
candidates = find_connection_candidates(builder.mag_needed, polarized_sectors)
valid, sector = False, None
while not valid:
if len(candidates) == 0:
raise GenerationException('Cross Dungeon Builder: Cannot find a candidate for connectedness. %s' % name)
sector = random.choice(candidates)
candidates.remove(sector)
valid = global_pole.is_valid_choice(dungeon_map, builder, [sector])
assign_sector(sector, builder, polarized_sectors, global_pole)
builder.mag_needed = {}
unconnected_builders = identify_polarity_issues(unconnected_builders)
# step 2: fix dead ends
problem_builders = identify_simple_branching_issues(dungeon_map)
while len(problem_builders) > 0:
for name, builder in problem_builders.items():
candidates, charges = find_simple_branching_candidates(builder, polarized_sectors)
best = min(charges)
best_candidates = [x for i, x in enumerate(candidates) if charges[i] <= best]
valid, choice = False, None
while not valid:
if len(best_candidates) == 0:
if len(candidates) == 0:
raise GenerationException('Cross Dungeon Builder: Simple branch problems: %s' % name)
best = min(charges)
best_candidates = [x for i, x in enumerate(candidates) if charges[i] <= best]
choice = random.choice(best_candidates)
best_candidates.remove(choice)
i = candidates.index(choice)
candidates.pop(i)
charges.pop(i)
valid = global_pole.is_valid_choice(dungeon_map, builder, [choice]) and valid_connected_assignment(builder, [choice])
assign_sector(choice, builder, polarized_sectors, global_pole)
builder.total_conn_lack = 0
builder.conn_balance.clear()
problem_builders = identify_simple_branching_issues(problem_builders)
# step 3: fix neutrality issues
polarity_step_3(dungeon_map, polarized_sectors, global_pole)
# step 4: fix dead ends again
neutral_choices: List[List] = neutralize_the_rest(polarized_sectors)
problem_builders = identify_branching_issues(dungeon_map, builder_info)
while len(problem_builders) > 0:
for name, builder in problem_builders.items():
candidates = find_branching_candidates(builder, neutral_choices, builder_info)
valid, choice = False, None
while not valid:
if len(candidates) <= 0:
raise GenerationException('Cross Dungeon Builder: Complex branch problems: %s' % name)
choice = random.choice(candidates)
candidates.remove(choice)
valid = global_pole.is_valid_choice(dungeon_map, builder, choice) and valid_polarized_assignment(builder, choice)
neutral_choices.remove(choice)
for sector in choice:
assign_sector(sector, builder, polarized_sectors, global_pole)
builder.unfulfilled.clear()
problem_builders = identify_branching_issues(problem_builders, builder_info)
# step 5: assign randomly until gone - must maintain connectedness, neutral polarity, branching, lack, etc.
comb_w_replace = len(dungeon_map) ** len(neutral_choices)
combinations = None
if comb_w_replace <= 1000:
combinations = list(itertools.product(dungeon_map.keys(), repeat=len(neutral_choices)))
random.shuffle(combinations)
tries = 0
while len(polarized_sectors) > 0:
if tries > 1000 or (combinations and tries >= len(combinations)):
raise GenerationException('No valid assignment found. Ref: %s' % next(iter(dungeon_map.keys())))
if combinations:
choices = combinations[tries]
else:
choices = random.choices(list(dungeon_map.keys()), k=len(neutral_choices))
chosen_sectors = defaultdict(list)
for i, choice in enumerate(choices):
chosen_sectors[choice].extend(neutral_choices[i])
all_valid = True
for name, sector_list in chosen_sectors.items():
if not valid_assignment(dungeon_map[name], sector_list, builder_info):
all_valid = False
break
if all_valid:
for i, choice in enumerate(choices):
builder = dungeon_map[choice]
for sector in neutral_choices[i]:
assign_sector(sector, builder, polarized_sectors, global_pole)
tries += 1
def polarity_step_3(dungeon_map, polarized_sectors, global_pole):
# step 3a: fix odd builders
odd_builders = [x for x in dungeon_map.values() if sum_polarity(x.sectors).charge() % 2 != 0]
grouped_choices: List[List] = find_forced_groupings(polarized_sectors, dungeon_map)
random.shuffle(odd_builders)
odd_candidates = find_odd_sectors(grouped_choices)
tries = 0
while len(odd_builders) > 0:
if tries > 1000:
raise GenerationException('Unable to fix dungeon parity. Ref: %s' % next(iter(odd_builders)).name)
best_choices = None
best_charge = sum([x.polarity().charge() for x in dungeon_map.values()])
samples = 0
combos = ncr(len(odd_candidates), len(odd_builders))
sample_target = 100 if combos > 10 else combos * 2
while best_choices is None or samples < sample_target:
samples += 1
if len(odd_candidates) < len(odd_builders):
raise GenerationException(f'Unable to fix dungeon parity - not enough candidates.'
f' Ref: {next(iter(odd_builders)).name}')
choices = random.sample(odd_candidates, k=len(odd_builders))
valid = global_pole.is_valid_multi_choice(dungeon_map, odd_builders, choices)
charge = calc_total_charge(dungeon_map, odd_builders, choices)
if valid and charge < best_charge:
best_choices = choices
best_charge = charge
if samples > sample_target and best_choices is None:
best_choices = choices
best_charge = charge
samples = 0
all_valid = True
for i, candidate_list in enumerate(best_choices):
test_set = find_forced_connections(dungeon_map, candidate_list, polarized_sectors)
builder = odd_builders[i]
if ensure_test_set_connectedness(test_set, builder, polarized_sectors, dungeon_map, global_pole):
all_valid &= valid_branch_only(builder, candidate_list)
else:
all_valid = False
break
if not all_valid:
break
if all_valid:
for i, candidate_list in enumerate(best_choices):
builder = odd_builders[i]
for sector in candidate_list:
assign_sector(sector, builder, polarized_sectors, global_pole)
odd_builders = [x for x in dungeon_map.values() if sum_polarity(x.sectors).charge() % 2 != 0]
else:
tries += 1
# step 3b: neutralize all builders
parallel_full_neutralization(dungeon_map, polarized_sectors, global_pole)
def parallel_full_neutralization(dungeon_map, polarized_sectors, global_pole):
start = time.process_time()
builders = list(dungeon_map.values())
finished = all([x.polarity().is_neutral() for x in builders])
solution_list, current_depth = defaultdict(list), 1
complete_builders = [x for x in builders if x.polarity().is_neutral()]
avail_sectors = list(polarized_sectors)
while not finished:
builders_to_check = [x for x in builders if not (x.polarity()+sum_polarity(solution_list[x])).is_neutral()]
candidates, last_depth = find_exact_neutralizing_candidates_parallel_db(builders_to_check, solution_list,
avail_sectors, current_depth)
increment_depth = True
any_valid = False
for builder, candidate_list in candidates.items():
valid, sectors = False, None
while not valid:
if len(candidate_list) == 0:
increment_depth = False #need to look again at current level
break
sectors = random.choice(candidate_list)
candidate_list.remove(sectors)
proposal = solution_list.copy()
proposal[builder] = list(proposal[builder])
proposal[builder].extend(sectors)
valid = global_pole.is_valid_multi_choice_2(dungeon_map, builders, proposal)
if valid:
any_valid = True
solution_list[builder].extend(sectors)
for sector in sectors:
avail_sectors.remove(sector)
complete_builders.append(builder)
for other_builder, other_cand_list in candidates.items():
if other_builder not in complete_builders:
candidates_to_remove = list()
for candidate in other_cand_list:
for sector in sectors:
if sector in candidate:
candidates_to_remove.append(candidate)
break
other_cand_list[:] = [x for x in other_cand_list if x not in candidates_to_remove]
# remove sectors from other candidate lists
if not any_valid:
increment_depth = True
current_depth = last_depth + 1 if increment_depth else last_depth
finished = all([(x.polarity()+sum_polarity(solution_list[x])).is_neutral() for x in builders])
logging.getLogger('').info(f'-Balanced solution found in {time.process_time()-start}')
for builder, sectors in solution_list.items():
for sector in sectors:
assign_sector(sector, builder, polarized_sectors, global_pole)
def find_forced_connections(dungeon_map, candidate_list, polarized_sectors):
test_set = list(candidate_list)
other_sectors = [x for x in polarized_sectors if x not in candidate_list]
dungeon_hooks = defaultdict(int)
for name, builder in dungeon_map.items():
d_mag = sum_hook_magnitude(builder.sectors)
for val in Hook:
dungeon_hooks[val] += d_mag[val.value]
queue = deque(candidate_list)
while queue:
candidate = queue.popleft()
c_mag = candidate.hook_magnitude()
other_candidates = [x for x in candidate_list if x != candidate]
for val in Hook:
if c_mag[val.value] > 0:
opp = opposite_h_type(val)
o_val = opp.value
if sum_hook_magnitude(other_candidates)[o_val] == 0 and dungeon_hooks[opp] == 0 and not valid_self(c_mag, val, opp):
forced_sector = []
for sec in other_sectors:
if sec.hook_magnitude()[o_val] > 0:
forced_sector.append(sec)
if len(forced_sector) > 1:
break
if len(forced_sector) == 1:
test_set.append(forced_sector[0])
return test_set
def valid_self(c_mag, val, opp):
if val == Hook.Stairs:
return c_mag[val.value] > 2
else:
return c_mag[opp.value] > 0 and sum(c_mag) > 2
def ensure_test_set_connectedness(test_set, builder, polarized_sectors, dungeon_map, global_pole):
test_copy = list(test_set)
while not valid_connected_assignment(builder, test_copy):
dummy_builder = DungeonBuilder("Dummy Builder for " + builder.name)
dummy_builder.sectors = builder.sectors + test_copy
possibles = [x for x in polarized_sectors if x not in test_copy]
candidates = find_connected_candidates(possibles)
valid, sector = False, None
while not valid:
if len(candidates) == 0:
return False
sector = random.choice(candidates)
candidates.remove(sector)
t2 = test_copy+[sector]
valid = global_pole.is_valid_choice(dungeon_map, builder, t2) and valid_branch_only(builder, t2)
test_copy.append(sector)
dummy_builder.sectors = builder.sectors + test_copy
test_set[:] = test_copy
return True
def calc_total_charge(dungeon_map, builders, sector_lists):
polarity_list = [x.polarity() for x in dungeon_map.values() if x not in builders]
for i, sectors in enumerate(sector_lists):
builder = builders[i]
polarity = builder.polarity() + sum_polarity(sectors)
polarity_list.append(polarity)
return sum([x.charge() for x in polarity_list])
class GlobalPolarity:
def __init__(self, candidate_sectors):
self.positives = [0, 0, 0]
self.negatives = [0, 0, 0]
self.evens = 0
self.odds = 0
for sector in candidate_sectors:
pol = sector.polarity()
if pol.charge() % 2 == 0:
self.evens += 1
else:
self.odds += 1
for slot in PolSlot:
if pol.vector[slot.value] < 0:
self.negatives[slot.value] += -pol.vector[slot.value]
elif pol.vector[slot.value] > 0:
self.positives[slot.value] += pol.vector[slot.value]
def copy(self):
gp = GlobalPolarity([])
gp.positives = self.positives.copy()
gp.negatives = self.negatives.copy()
gp.evens = self.evens
gp.odds = self.odds
return gp
def is_valid(self, dungeon_map):
polarities = [x.polarity() for x in dungeon_map.values()]
return self._check_parity(polarities) and self._is_valid_polarities(polarities)
def _check_parity(self, polarities):
local_evens = 0
local_odds = 0
for pol in polarities:
if pol.charge() % 2 == 0:
local_evens += 1
else:
local_odds += 1
if local_odds > self.odds:
return False
return True
def _is_valid_polarities(self, polarities):
positives = self.positives.copy()
negatives = self.negatives.copy()
for polarity in polarities:
for slot in PolSlot:
if polarity[slot.value] > 0 and slot != PolSlot.Stairs:
if negatives[slot.value] >= polarity[slot.value]:
negatives[slot.value] -= polarity[slot.value]
else:
return False
elif polarity[slot.value] < 0 and slot != PolSlot.Stairs:
if positives[slot.value] >= -polarity[slot.value]:
positives[slot.value] += polarity[slot.value]
else:
return False
elif slot == PolSlot.Stairs:
if positives[slot.value] >= polarity[slot.value]:
positives[slot.value] -= polarity[slot.value]
else:
return False
return True
def consume(self, sector):
polarity = sector.polarity()
if polarity.charge() % 2 == 0:
self.evens -= 1
else:
self.odds -= 1
for slot in PolSlot:
if polarity[slot.value] > 0 and slot != PolSlot.Stairs:
if self.positives[slot.value] >= polarity[slot.value]:
self.positives[slot.value] -= polarity[slot.value]
else:
raise GenerationException('Invalid assignment of %s' % sector.name)
elif polarity[slot.value] < 0 and slot != PolSlot.Stairs:
if self.negatives[slot.value] >= -polarity[slot.value]:
self.negatives[slot.value] += polarity[slot.value]
else:
raise GenerationException('Invalid assignment of %s' % sector.name)
elif slot == PolSlot.Stairs:
if self.positives[slot.value] >= polarity[slot.value]:
self.positives[slot.value] -= polarity[slot.value]
else:
raise GenerationException('Invalid assignment of %s' % sector.name)
def is_valid_choice(self, dungeon_map, builder, sectors):
proposal = self.copy()
non_neutral_polarities = [x.polarity() for x in dungeon_map.values() if not x.polarity().is_neutral() and x != builder]
current_polarity = builder.polarity() + sum_polarity(sectors)
non_neutral_polarities.append(current_polarity)
for sector in sectors:
proposal.consume(sector)
return proposal._check_parity(non_neutral_polarities) and proposal._is_valid_polarities(non_neutral_polarities)
def is_valid_multi_choice(self, dungeon_map, builders, sector_lists):
proposal = self.copy()
non_neutral_polarities = [x.polarity() for x in dungeon_map.values() if not x.polarity().is_neutral()
and x not in builders]
for i, sectors in enumerate(sector_lists):
builder = builders[i]
current_polarity = builder.polarity() + sum_polarity(sectors)
non_neutral_polarities.append(current_polarity)
for sector in sectors:
proposal.consume(sector)
return proposal._check_parity(non_neutral_polarities) and proposal._is_valid_polarities(non_neutral_polarities)
def is_valid_multi_choice_2(self, dungeon_map, builders, sector_dict):
proposal = self.copy()
non_neutral_polarities = [x.polarity() for x in dungeon_map.values() if not x.polarity().is_neutral()
and x not in builders]
for builder, sectors in sector_dict.items():
current_polarity = builder.polarity() + sum_polarity(sectors)
non_neutral_polarities.append(current_polarity)
for sector in sectors:
proposal.consume(sector)
return proposal._check_parity(non_neutral_polarities) and proposal._is_valid_polarities(non_neutral_polarities)
# def check_odd_polarities(self, candidate_sectors, dungeon_map):
# odd_candidates = [x for x in candidate_sectors if x.polarity().charge() % 2 != 0]
# odd_map = {n: x for (n, x) in dungeon_map.items() if sum_polarity(x.sectors).charge() % 2 != 0}
# gp = GlobalPolarity(odd_candidates)
# return gp.is_valid(odd_map)
def find_connection_candidates(mag_needed, sector_pool):
candidates = []
for sector in sector_pool:
if sector.branching_factor() < 2:
continue
mag = sector.magnitude()
matches = False
for slot, match_slot in mag_needed.items():
if mag[slot.value] > 0:
for i in PolSlot:
if i in match_slot and mag[i.value] > 0:
matches = True
break
if matches:
candidates.append(sector)
return candidates
def find_simple_branching_candidates(builder, sector_pool):
candidates = defaultdict(list)
charges = defaultdict(list)
outflow_needed = builder.dead_ends + builder.forced_loops * 2 > builder.branches + builder.allowance
total_needed = builder.dead_ends + builder.forced_loops * 2 - builder.branches + builder.allowance
original_lack = builder.total_conn_lack
best_lack = original_lack
for sector in sector_pool:
if outflow_needed and sector.branching_factor() <= 2:
continue
calc_sector_balance(sector)
ttl_lack = 0
for hook in Hook:
lack = builder.conn_balance[hook] + sector.conn_balance[hook]
if lack < 0:
ttl_lack += -lack
forced_loops = calc_forced_loops(builder.sectors + [sector])
net_outflow = builder.dead_ends + forced_loops * 2 + sector.dead_ends() - builder.branches - builder.allowance - sector.branches()
valid_branches = net_outflow < total_needed
if valid_branches and (ttl_lack < original_lack or original_lack >= 0):
candidates[ttl_lack].append(sector)
charges[ttl_lack].append((builder.polarity()+sector.polarity()).charge())
if ttl_lack < best_lack:
best_lack = ttl_lack
if best_lack == original_lack and not outflow_needed:
raise GenerationException('These candidates may not help at all')
if len(candidates[best_lack]) <= 0:
raise GenerationException('Nothing can fix the simple branching issue. Panic ensues.')
return candidates[best_lack], charges[best_lack]
def calc_sector_balance(sector): # todo: move to base class?
if sector.conn_balance is None:
sector.conn_balance = defaultdict(int)
for door in sector.outstanding_doors:
if door.blocked or door.dead or sector.branching_factor() <= 1:
sector.conn_balance[hook_from_door(door)] -= 1
else:
sector.conn_balance[hanger_from_door(door)] += 1
def find_odd_sectors(grouped_candidates):
return [x for x in grouped_candidates if sum_polarity(x).charge() % 2 != 0]
# This is related to the perfect sum problem in CS
# * Best algorithm so far - db for dynamic programming
# * Keeps track of unique deviations from neutral in the index
# * Another assumption is that solutions that take fewer sector are more ideal
# * When attempting to add depth and there are no more possibilities, this raises an Exception
# * Each depth should be checked before asking for another one
# An alterative approach would be to trim the db after deciding the candidate at the current depth will be
# part of the propsoal
def find_exact_neutralizing_candidates_parallel_db(builders, proposal, avail_sectors, current_depth):
candidate_map = defaultdict(list)
polarity_map = {}
for builder in builders:
polarity_map[builder] = builder.polarity() + sum_polarity(proposal[builder])
finished = False
db, index = create_db_for_depth(current_depth, avail_sectors)
while not finished:
depth_map = db[current_depth]
for builder in builders:
target = polarity_map[builder].complement()
if target in depth_map.keys():
finished = True
candidate_map[builder].extend(depth_map[target].keys())
if finished:
for builder in list(candidate_map.keys()):
try:
candidate_map[builder] = weed_candidates(builder, {0: candidate_map[builder]}, 0)
except NeutralizingException:
del candidate_map[builder]
if len(candidate_map) == 0:
finished = False
if not finished:
current_depth += 1
add_depth_to_db(db, index, current_depth, avail_sectors)
return candidate_map, current_depth
def create_db_for_depth(depth, avail_sectors):
db = {0: {Polarity(): {OrderedFrozenSet(): None}}}
db_index = {Polarity()}
for i in range(1, depth+1):
add_depth_to_db(db, db_index, i, avail_sectors)
return db, db_index
def add_depth_to_db(db, db_index, i, avail_sectors):
previous = db[i-1]
depth_map = defaultdict(dict)
index_additions = set()
for sector in avail_sectors:
sector_set = {sector}
sector_pol = sector.polarity()
for polarity, choices in previous.items():
combo_pol = sector_pol + polarity
if combo_pol not in db_index:
index_additions.add(combo_pol)
for choice in choices:
if sector in choice.frozen_set:
continue
new_set = choice.new_with_element(sector_set)
depth_map[combo_pol][new_set] = None
for addition in index_additions:
if len(depth_map[addition]) > 0:
db_index.add(addition)
else:
del depth_map[addition]
if len(depth_map) == 0:
raise NeutralizingException('There is not a solution for this particular combination. Crystal switch issue?') # restart required
db[i] = depth_map
class OrderedFrozenSet:
def __init__(self):
self.frozen_set = frozenset()
self.order = []
def __eq__(self, other):
return self.frozen_set == other.frozen_set
def __hash__(self):
return hash(self.frozen_set)
def __iter__(self):
return self.order.__iter__()
def __len__(self):
return len(self.order)
def new_with_element(self, elements):
ret = OrderedFrozenSet()
ret.frozen_set = frozenset(self.frozen_set | elements)
ret.order = list(self.order)
ret.order.extend(elements)
return ret
# this could be re-worked for the more complete solution
# i'm not sure it does a whole lot now
def weed_candidates(builder, candidates, best_charge):
official_cand = []
while len(official_cand) == 0:
if len(candidates.keys()) == 0:
raise NeutralizingException('Cross Dungeon Builder: Weeded out all candidates %s' % builder.name)
while best_charge not in candidates.keys():
best_charge += 1
candidate_list = candidates.pop(best_charge)
best_lack = None
for cand in candidate_list:
ttl_deads = 0
ttl_branches = 0
for sector in cand:
calc_sector_balance(sector)
ttl_deads += sector.dead_ends()
ttl_branches += sector.branches()
ttl_lack = 0
ttl_balance = 0
for hook in Hook:
bal = 0
for sector in cand:
bal += sector.conn_balance[hook]
lack = builder.conn_balance[hook] + bal
ttl_balance += lack
if lack < 0:
ttl_lack += -lack
forced_loops = calc_forced_loops(builder.sectors + list(cand))
if ttl_balance >= 0 and builder.dead_ends + ttl_deads + forced_loops * 2 <= builder.branches + ttl_branches + builder.allowance:
if best_lack is None or ttl_lack < best_lack:
best_lack = ttl_lack
official_cand = [cand]
elif ttl_lack == best_lack:
official_cand.append(cand)
# choose from among those that use less
best_len = None
cand_len = []
for cand in official_cand:
size = len(cand)
if best_len is None or size < best_len:
best_len = size
cand_len = [cand]
elif size == best_len:
cand_len.append(cand)
return cand_len
def find_branching_candidates(builder, neutral_choices, builder_info):
candidates = []
for choice in neutral_choices:
resolved, problem_list = check_for_valid_layout(builder, choice, builder_info)
if resolved:
candidates.append(choice)
return candidates
def find_connected_candidates(sector_pool):
candidates = []
for sector in sector_pool:
if sector.adj_outflow() >= 2:
candidates.append(sector)
return candidates
def neutralize_the_rest(sector_pool):
neutral_choices = []
main_pool = list(sector_pool)
failed_pool = []
r_size = 1
while len(main_pool) > 0 or len(failed_pool) > 0:
if len(main_pool) <= r_size:
main_pool.extend(failed_pool)
failed_pool.clear()
r_size += 1
candidate = random.choice(main_pool)
main_pool.remove(candidate)
if r_size > len(main_pool):
raise GenerationException("Cross Dungeon Builder: no more neutral pairings possible")
combinations = ncr(len(main_pool), r_size)
itr = 0
done = False
while not done:
ttl_polarity = candidate.polarity()
choice_set = kth_combination(itr, main_pool, r_size)
for choice in choice_set:
ttl_polarity += choice.polarity()
if ttl_polarity.is_neutral():
choice_set.append(candidate)
neutral_choices.append(choice_set)
main_pool = [x for x in main_pool if x not in choice_set]
failed_pool = [x for x in failed_pool if x not in choice_set]
done = True
else:
itr += 1
if itr >= combinations:
failed_pool.append(candidate)
done = True
return neutral_choices
# doesn't force a grouping when all in the found_list comes from the same sector
def find_forced_groupings(sector_pool, dungeon_map):
dungeon_hooks = {}
for name, builder in dungeon_map.items():
dungeon_hooks[name] = categorize_groupings(builder.sectors)
groupings = []
queue = deque(sector_pool)
skips = set()
while len(queue) > 0:
grouping = queue.popleft()
is_list = isinstance(grouping, List)
if not is_list and grouping in skips:
continue
grouping = grouping if is_list else [grouping]
hook_categories = categorize_groupings(grouping)
force_found = False
for val in Hook:
if val in hook_categories.keys():
required_doors, flexible_doors = hook_categories[val]
if len(required_doors) >= 1:
opp = opposite_h_type(val)
found_list = []
if opp in hook_categories.keys() and len(hook_categories[opp][1]) > 0:
found_list.extend(hook_categories[opp][1])
for name, hooks in dungeon_hooks.items():
if opp in hooks.keys() and len(hooks[opp][1]) > 0:
found_list.extend(hooks[opp][1])
other_sectors = [x for x in sector_pool if x not in grouping]
other_sector_cats = categorize_groupings(other_sectors)
if opp in other_sector_cats.keys() and len(other_sector_cats[opp][1]) > 0:
found_list.extend(other_sector_cats[opp][1])
if len(required_doors) == len(found_list):
forced_sectors = []
for sec in other_sectors:
cats = categorize_groupings([sec])
if opp in cats.keys() and len(cats[opp][1]) > 0:
forced_sectors.append(sec)
if len(forced_sectors) > 0:
grouping.extend(forced_sectors)
skips.update(forced_sectors)
merge_groups = []
for group in groupings:
for sector in group:
if sector in forced_sectors:
merge_groups.append(group)
for merge in merge_groups:
grouping = list(set(grouping).union(set(merge)))
groupings.remove(merge)
queue.append(grouping)
force_found = True
elif len(flexible_doors) == 1:
opp = opposite_h_type(val)
found_list = []
if opp in hook_categories.keys() and (len(hook_categories[opp][0]) > 0 or len(hook_categories[opp][1]) > 0):
found_list.extend(hook_categories[opp][0])
found_list.extend([x for x in hook_categories[opp][1] if x not in flexible_doors])
for name, hooks in dungeon_hooks.items():
if opp in hooks.keys() and (len(hooks[opp][0]) > 0 or len(hooks[opp][1]) > 0):
found_list.extend(hooks[opp][0])
found_list.extend(hooks[opp][1])
other_sectors = [x for x in sector_pool if x not in grouping]
other_sector_cats = categorize_groupings(other_sectors)
if opp in other_sector_cats.keys() and (len(other_sector_cats[opp][0]) > 0 or len(other_sector_cats[opp][1]) > 0):
found_list.extend(other_sector_cats[opp][0])
found_list.extend(other_sector_cats[opp][1])
if len(found_list) == 1:
forced_sectors = []
for sec in other_sectors:
cats = categorize_groupings([sec])
if opp in cats.keys() and (len(cats[opp][0]) > 0 or len(cats[opp][1]) > 0):
forced_sectors.append(sec)
if len(forced_sectors) > 0:
grouping.extend(forced_sectors)
skips.update(forced_sectors)
merge_groups = []
for group in groupings:
for sector in group:
if sector in forced_sectors:
merge_groups.append(group)
for merge in merge_groups:
grouping += merge
groupings.remove(merge)
queue.append(grouping)
force_found = True
if force_found:
break
if not force_found:
groupings.append(grouping)
return groupings
def categorize_groupings(sectors):
hook_categories = {}
for sector in sectors:
for door in sector.outstanding_doors:
hook = hook_from_door(door)
if hook not in hook_categories.keys():
hook_categories[hook] = ([], [])
if door.blocked or door.dead:
hook_categories[hook][0].append(door)
else:
hook_categories[hook][1].append(door)
return hook_categories
def valid_assignment(builder, sector_list, builder_info):
if not valid_entrance(builder, sector_list, builder_info):
return False
if not valid_c_switch(builder, sector_list):
return False
if not valid_polarized_assignment(builder, sector_list):
return False
resolved, problems = check_for_valid_layout(builder, sector_list, builder_info)
return resolved
def valid_entrance(builder, sector_list, builder_info):
is_dead_end = False
if len(builder.sectors) == 0:
is_dead_end = True
else:
entrances, splits, c_tuple, world, player = builder_info
if builder.name not in entrances.keys():
name_parts = builder.name.rsplit(' ', 1)
entrance_list = splits[name_parts[0]][name_parts[1]]
entrances = []
for sector in builder.sectors:
if sector.is_entrance_sector():
sector.region_set()
entrances.append(sector)
all_dead = True
for sector in entrances:
for region in entrance_list:
if region in sector.region_set():
portal = next((x for x in world.dungeon_portals[player] if x.door.entrance.parent_region.name == region), None)
if portal and not portal.deadEnd:
all_dead = False
break
if not all_dead:
break
is_dead_end = all_dead
return len(sector_list) == 0 if is_dead_end else True
def valid_c_switch(builder, sector_list):
if builder.c_switch_present:
return True
for sector in sector_list:
if sector.c_switch:
return True
if builder.c_switch_required:
return False
for sector in sector_list:
if sector.blue_barrier:
return False
return True
def valid_connected_assignment(builder, sector_list):
full_list = sector_list + builder.sectors
if len(full_list) == 1 and sum_magnitude(full_list) == [0, 0, 0]:
return True
for sector in full_list:
if sector.is_entrance_sector():
continue
others = [x for x in full_list if x != sector]
other_mag = sum_magnitude(others)
sector_mag = sector.magnitude()
hookable = False
for i in range(len(sector_mag)):
if sector_mag[i] > 0 and other_mag[i] > 0:
hookable = True
if not hookable:
return False
return True
def valid_branch_assignment(builder, sector_list):
if not valid_connected_assignment(builder, sector_list):
return False
return valid_branch_only(builder, sector_list)
def valid_branch_only(builder, sector_list):
forced_loops = calc_forced_loops(builder.sectors + sector_list)
ttl_deads = 0
ttl_branches = 0
for s in sector_list:
# calc_sector_balance(sector) # do I want to check lack here? see weed_candidates
ttl_deads += s.dead_ends()
ttl_branches += s.branches()
return builder.dead_ends + ttl_deads + forced_loops * 2 <= builder.branches + ttl_branches + builder.allowance
def valid_polarized_assignment(builder, sector_list):
if not valid_branch_assignment(builder, sector_list):
return False
return (sum_polarity(sector_list) + sum_polarity(builder.sectors)).is_neutral()
def assign_the_rest(dungeon_map, neutral_sectors, global_pole, builder_info):
comb_w_replace = len(dungeon_map) ** len(neutral_sectors)
combinations = None
if comb_w_replace <= 1000:
combinations = list(itertools.product(dungeon_map.keys(), repeat=len(neutral_sectors)))
random.shuffle(combinations)
tries = 0
while len(neutral_sectors) > 0:
if tries > 1000 or (combinations and tries >= len(combinations)):
raise GenerationException('No valid assignment found for "neutral" sectors. Ref: %s' % next(iter(dungeon_map.keys())))
# sector_list = list(neutral_sectors)
if combinations:
choices = combinations[tries]
else:
choices = random.choices(list(dungeon_map.keys()), k=len(neutral_sectors))
neutral_sector_list = list(neutral_sectors)
chosen_sectors = defaultdict(list)
for i, choice in enumerate(choices):
chosen_sectors[choice].append(neutral_sector_list[i])
all_valid = True
for name, sector_list in chosen_sectors.items():
if not valid_assignment(dungeon_map[name], sector_list, builder_info):
all_valid = False
break
if all_valid:
for name, sector_list in chosen_sectors.items():
builder = dungeon_map[name]
for sector in sector_list:
assign_sector(sector, builder, neutral_sectors, global_pole)
tries += 1
def split_dungeon_builder(builder, split_list, builder_info):
if builder.split_dungeon_map and len(builder.exception_list) == 0:
for name, proposal in builder.valid_proposal.items():
builder.split_dungeon_map[name].valid_proposal = proposal
return builder.split_dungeon_map # we made this earlier in gen, just use it
attempts, comb_w_replace, merge_attempt, merge_limit = 0, None, 0, len(split_list) - 1
while attempts < 5: # does not solve coin flips 3% of the time
try:
candidate_sectors = dict.fromkeys(builder.sectors)
global_pole = GlobalPolarity(candidate_sectors)
dungeon_map, sub_builder, merge_keys = {}, None, []
if merge_attempt > 0:
candidates = []
for name, split_entrances in split_list.items():
if len(split_entrances) > 1:
candidates.append(name)
continue
elif len(split_entrances) <= 0:
continue
ents, splits, c_tuple, world, player = builder_info
r_name = split_entrances[0]
p = next((x for x in world.dungeon_portals[player] if x.door.entrance.parent_region.name == r_name), None)
if p and not p.deadEnd:
candidates.append(name)
merge_keys = random.sample(candidates, merge_attempt+1) if len(candidates) >= merge_attempt+1 else []
for name, split_entrances in split_list.items():
key = builder.name + ' ' + name
if merge_keys and name in merge_keys:
other_keys = [builder.name + ' ' + x for x in merge_keys if x != name]
other_key = next((x for x in other_keys if x in dungeon_map), None)
if other_key:
key = other_key
sub_builder = dungeon_map[other_key]
sub_builder.all_entrances.extend(split_entrances)
if key not in dungeon_map:
dungeon_map[key] = sub_builder = DungeonBuilder(key)
sub_builder.split_flag = True
sub_builder.all_entrances = list(split_entrances)
for r_name in split_entrances:
assign_sector(find_sector(r_name, candidate_sectors), sub_builder, candidate_sectors, global_pole)
comb_w_replace = len(dungeon_map) ** len(candidate_sectors)
return balance_split(candidate_sectors, dungeon_map, global_pole, builder_info)
except (GenerationException, NeutralizingException):
if comb_w_replace and comb_w_replace <= 10000:
attempts += 5 # all the combinations were tried already, no use repeating
else:
attempts += 1
if attempts >= 5 and merge_attempt < merge_limit:
merge_attempt, attempts = merge_attempt + 1, 0
raise GenerationException('Unable to resolve in 5 attempts')
def balance_split(candidate_sectors, dungeon_map, global_pole, builder_info):
dungeon_entrances, split_dungeon_entrances, connections_tuple, world, player = builder_info
for name, builder in dungeon_map.items():
calc_allowance_and_dead_ends(builder, connections_tuple, world, player)
comb_w_replace = len(dungeon_map) ** len(candidate_sectors)
if comb_w_replace <= 10000:
combinations = list(itertools.product(dungeon_map.keys(), repeat=len(candidate_sectors)))
random.shuffle(combinations)
tries = 0
while tries < len(combinations):
choices = combinations[tries]
main_sector_list = list(candidate_sectors)
chosen_sectors = defaultdict(list)
for i, choice in enumerate(choices):
chosen_sectors[choice].append(main_sector_list[i])
all_valid = True
for name, builder in dungeon_map.items():
if not valid_assignment(builder, chosen_sectors[name], builder_info):
all_valid = False
break
if all_valid:
for name, sector_list in chosen_sectors.items():
builder = dungeon_map[name]
for sector in sector_list:
assign_sector(sector, builder, candidate_sectors, global_pole)
return dungeon_map
tries += 1
raise GenerationException('Split Dungeon Builder: Impossible dungeon. Ref %s' % next(iter(dungeon_map.keys())))
# categorize sectors
check_for_forced_dead_ends(dungeon_map, candidate_sectors, global_pole)
check_for_forced_assignments(dungeon_map, candidate_sectors, global_pole)
check_for_forced_crystal(dungeon_map, candidate_sectors, global_pole)
crystal_switches, crystal_barriers, neutral_sectors, polarized_sectors = categorize_sectors(candidate_sectors)
leftover = assign_crystal_switch_sectors(dungeon_map, crystal_switches, crystal_barriers,
global_pole, len(crystal_barriers) > 0)
ensure_crystal_switches_reachable(dungeon_map, leftover, polarized_sectors, crystal_barriers, global_pole)
for sector in leftover:
if sector.polarity().is_neutral():
neutral_sectors[sector] = None
else:
polarized_sectors[sector] = None
# blue barriers
assign_crystal_barrier_sectors(dungeon_map, crystal_barriers, global_pole)
# polarity:
assign_polarized_sectors(dungeon_map, polarized_sectors, global_pole, builder_info)
# the rest
assign_the_rest(dungeon_map, neutral_sectors, global_pole, builder_info)
return dungeon_map
def check_for_forced_dead_ends(dungeon_map, candidate_sectors, global_pole):
dead_end_sectors = [x for x in candidate_sectors if x.branching_factor() <= 1]
other_sectors = [x for x in candidate_sectors if x not in dead_end_sectors]
for name, builder in dungeon_map.items():
other_sectors += builder.sectors
other_magnitude = sum_hook_magnitude(other_sectors)
dead_cnt = [0] * len(Hook)
for sector in dead_end_sectors:
hook_mag = sector.hook_magnitude()
for hook in Hook:
if hook_mag[hook.value] != 0:
dead_cnt[hook.value] += 1
for hook in Hook:
opp = opposite_h_type(hook).value
if dead_cnt[hook.value] > other_magnitude[opp]:
raise GenerationException('Impossible to satisfy all these dead ends')
elif dead_cnt[hook.value] == other_magnitude[opp]:
candidates = [x for x in dead_end_sectors if x.hook_magnitude()[hook.value] > 0]
for sector in other_sectors:
if sector.hook_magnitude()[opp] > 0 and sector.is_entrance_sector() and sector.branching_factor() == 2:
builder = None
for b in dungeon_map.values():
if sector in b.sectors:
builder = b
break
valid, candidate_sector = False, None
while not valid:
if len(candidates) == 0:
raise GenerationException('Split Dungeon Builder: Bad dead end %s' % builder.name)
candidate_sector = random.choice(candidates)
candidates.remove(candidate_sector)
valid = global_pole.is_valid_choice(dungeon_map, builder, [candidate_sector]) and check_crystal(candidate_sector, sector)
assign_sector(candidate_sector, builder, candidate_sectors, global_pole)
builder.c_locked = True
def check_crystal(dead_end, entrance):
if dead_end.blue_barrier and not entrance.c_switch and not dead_end.c_switch:
return False
if entrance.blue_barrier and not entrance.c_switch and not dead_end.c_switch:
return False
return True
def check_for_forced_assignments(dungeon_map, candidate_sectors, global_pole):
done = False
while not done:
done = True
magnitude = sum_hook_magnitude(candidate_sectors)
dungeon_hooks = {}
for name, builder in dungeon_map.items():
dungeon_hooks[name] = sum_hook_magnitude(builder.sectors)
for val in Hook:
if magnitude[val.value] == 1:
forced_sector = None
for sec in candidate_sectors:
if sec.hook_magnitude()[val.value] > 0:
forced_sector = sec
break
opp = opposite_h_type(val).value
other_sectors = [x for x in candidate_sectors if x != forced_sector]
if sum_hook_magnitude(other_sectors)[opp] == 0:
found_hooks = []
for name, hooks in dungeon_hooks.items():
if hooks[opp] > 0 and not dungeon_map[name].c_locked:
found_hooks.append(name)
if len(found_hooks) == 1:
done = False
assign_sector(forced_sector, dungeon_map[found_hooks[0]], candidate_sectors, global_pole)
def check_for_forced_crystal(dungeon_map, candidate_sectors, global_pole):
for name, builder in dungeon_map.items():
if check_for_forced_crystal_single(builder, candidate_sectors):
builder.c_switch_required = True
def check_for_forced_crystal_single(builder, candidate_sectors):
builder_doors = defaultdict(dict)
for sector in builder.sectors:
for door in sector.outstanding_doors:
builder_doors[hook_from_door(door)][door] = sector
if len(builder_doors) == 0:
return False
candidate_doors = defaultdict(dict)
for sector in candidate_sectors:
for door in sector.outstanding_doors:
candidate_doors[hook_from_door(door)][door] = sector
for hook in builder_doors.keys():
for door in builder_doors[hook].keys():
opp = opposite_h_type(hook)
if opp in builder_doors.keys():
for d, sector in builder_doors[opp].items():
if d != door and (not sector.blue_barrier or sector.c_switch):
return False
for d, sector in candidate_doors[opp].items():
if not sector.blue_barrier or sector.c_switch:
return False
return True
def categorize_sectors(candidate_sectors):
crystal_switches = {}
crystal_barriers = {}
polarized_sectors = {}
neutral_sectors = {}
for sector in candidate_sectors:
if sector.c_switch:
crystal_switches[sector] = None
elif sector.blue_barrier:
crystal_barriers[sector] = None
elif sector.polarity().is_neutral():
neutral_sectors[sector] = None
else:
polarized_sectors[sector] = None
return crystal_switches, crystal_barriers, neutral_sectors, polarized_sectors
class NeutralizingException(Exception):
pass
class GenerationException(Exception):
pass
class DoorEquation:
def __init__(self, door):
self.door = door
self.cost = None, None
self.benefit = defaultdict(list)
self.required = False
self.access_id = None
self.c_switch = False
self.crystal_blocked = {}
self.entrance_flag = False
def copy(self):
eq = DoorEquation(self.door)
eq.cost = self.cost
for key, doors in self.benefit.items():
eq.benefit[key] = doors.copy()
eq.required = self.required
eq.c_switch = self.c_switch
eq.crystal_blocked = self.crystal_blocked.copy()
return eq
def total_cost(self):
return 0 if self.cost[0] is None else 1
def gross(self, current_access):
key, cost_door = self.cost
if key is None:
# todo: could just be Orange as well (multiple entrance case)
crystal_access = current_access.access_door[None]
else:
crystal_access = None
for match_door, crystal in current_access.outstanding_doors.items():
if hook_from_door(match_door) == key:
if crystal_access is None or current_access._better_crystal(crystal_access, crystal):
crystal_access = crystal
ttl = 0
for key, door_list in self.benefit.items():
for door in door_list:
if door in current_access.outstanding_doors.keys() or door in current_access.proposed_connections.keys():
continue
if door in self.crystal_blocked.keys() and not self.c_switch:
if crystal_access == CrystalBarrier.Either or crystal_access == self.crystal_blocked[door]:
ttl += 1
else:
ttl += 1
return ttl
def profit(self, current_access):
return self.gross(current_access) - self.total_cost()
def neutral(self):
key, door = self.cost
if key is not None and len(self.benefit[key]) <= 0:
return False
return True
def neutral_profit(self):
key, door = self.cost
if key is not None:
if len(self.benefit[key]) < 1:
return False
if len(self.benefit[key]) > 1:
return True
return False
else:
return True
def can_cover_cost(self, current_access):
key, door = self.cost
if key is not None and current_access[key] < 1:
return False
return True
class DungeonAccess:
def __init__(self):
self.access = defaultdict(int)
self.door_access = {} # door -> crystal
self.door_sector_map = {} # door -> original sector
self.outstanding_doors = {}
self.blocked_doors = {}
self.door_access[None] = CrystalBarrier.Orange
self.proposed_connections = {}
self.reached_doors = set()
def can_cover_equation(self, equation):
key, door = equation.cost
if key is None:
return True
return self.access[key] >= 1
def can_pay(self, key):
if key is None:
return True
return self.access[key] >= 1
def adjust_for_equation(self, equation, sector):
if equation.cost[0] is None:
original_crystal = self.door_access[None]
for key, door_list in equation.benefit.items():
self.access[key] += len(door_list)
for door in door_list:
# I can't think of an entrance sector that forces blue
crystal_state = CrystalBarrier.Either if equation.c_switch else original_crystal
if crystal_state == CrystalBarrier.Either:
self.door_access[None] = CrystalBarrier.Either
self.door_access[door] = crystal_state
self.door_sector_map[door] = sector
self.outstanding_doors[door] = crystal_state
self.reached_doors.add(door)
else:
key, door = equation.cost
self.access[key] -= 1
# find the a matching connection
best_door, best_crystal = None, None
for match_door, crystal in self.outstanding_doors.items():
if hook_from_door(match_door) == key:
if best_door is None or self._better_crystal(best_crystal, crystal):
best_door = match_door
best_crystal = crystal
if best_door is None:
raise Exception('Something went terribly wrong I think')
# for match_door, crystal in self.blocked_doors.items():
# if hook_from_door(match_door) == key:
# if best_door is None or self._better_crystal(best_crystal, crystal):
# best_door = match_door
# best_crystal = crystal
self.door_sector_map[door] = sector
self.door_access[door] = best_crystal
self.reached_doors.add(door)
self.proposed_connections[door] = best_door
self.proposed_connections[best_door] = door
if best_door in self.outstanding_doors.keys():
del self.outstanding_doors[best_door]
elif best_door in self.blocked_doors.keys():
del self.blocked_doors[best_door]
self.reached_doors.add(best_door)
# todo: backpropagate crystal access
if equation.c_switch or best_crystal == CrystalBarrier.Either:
# if not equation.door.blocked:
self.door_access[door] = CrystalBarrier.Either
self.door_access[best_door] = CrystalBarrier.Either
queue = deque([best_door, door])
visited = set()
while len(queue) > 0:
next_door = queue.popleft()
visited.add(next_door)
curr_sector = self.door_sector_map[next_door]
next_eq = None
for eq in curr_sector.equations:
if eq.door == next_door:
next_eq = eq
break
if next_eq.entrance_flag:
crystal_state = self.door_access[next_door]
self.door_access[None] = crystal_state
for eq in curr_sector.equations:
cand_door = eq.door
crystal_state = self.door_access[None]
if cand_door in next_eq.crystal_blocked.keys():
crystal_state = next_eq.crystal_blocked[cand_door]
if cand_door not in visited:
self.door_access[cand_door] = crystal_state
if not cand_door.blocked:
if cand_door in self.outstanding_doors.keys():
self.outstanding_doors[cand_door] = crystal_state
if cand_door in self.proposed_connections.keys():
partner_door = self.proposed_connections[cand_door]
self.door_access[partner_door] = crystal_state
if partner_door in self.outstanding_doors.keys():
self.outstanding_doors[partner_door] = crystal_state
if partner_door not in visited:
queue.append(partner_door)
else:
for key, door_list in next_eq.benefit.items():
for cand_door in door_list:
crystal_state = self.door_access[next_door]
if cand_door in next_eq.crystal_blocked.keys():
crystal_state = next_eq.crystal_blocked[cand_door]
if cand_door in self.blocked_doors.keys():
needed_crystal = self.blocked_doors[cand_door]
if meets_crystal_requirment(crystal_state, needed_crystal):
del self.blocked_doors[cand_door]
if cand_door != door:
self.access[key] += 1
self.outstanding_doors[cand_door] = crystal_state
self.door_access[cand_door] = crystal_state
self.reached_doors.add(cand_door)
if cand_door not in visited:
self.door_access[cand_door] = crystal_state
if not cand_door.blocked:
if cand_door in self.outstanding_doors.keys():
self.outstanding_doors[cand_door] = crystal_state
if cand_door in self.proposed_connections.keys():
partner_door = self.proposed_connections[cand_door]
self.door_access[partner_door] = crystal_state
if partner_door in self.outstanding_doors.keys():
self.outstanding_doors[partner_door] = crystal_state
queue.append(cand_door)
queue.append(partner_door)
for key, door_list in equation.benefit.items():
for door in door_list:
crystal_access = self.door_access[best_door]
can_access = True
if door in equation.crystal_blocked.keys():
if crystal_access == CrystalBarrier.Either or crystal_access == equation.crystal_blocked[door]:
crystal_access = equation.crystal_blocked[door]
else:
self.blocked_doors[door] = equation.crystal_blocked[door]
can_access = False
self.door_sector_map[door] = sector
if can_access and door not in self.reached_doors:
self.access[key] += 1
self.door_access[door] = crystal_access
self.outstanding_doors[door] = crystal_access
self.reached_doors.add(door)
def _better_crystal(self, current_champ, contender):
if current_champ == CrystalBarrier.Either:
return False
elif contender == CrystalBarrier.Either:
return True
elif current_champ == CrystalBarrier.Blue:
return False
elif contender == CrystalBarrier.Blue:
return True
else:
return False
def identify_branching_issues(dungeon_map, builder_info):
unconnected_builders = {}
for name, builder in dungeon_map.items():
resolved, unreached_doors = check_for_valid_layout(builder, [], builder_info)
if not resolved:
unconnected_builders[name] = builder
for hook, door_list in unreached_doors.items():
builder.unfulfilled[hook] += len(door_list)
return unconnected_builders
def check_for_valid_layout(builder, sector_list, builder_info):
dungeon_entrances, split_dungeon_entrances, c_tuple, world, player = builder_info
if builder.name in split_dungeon_entrances.keys():
try:
temp_builder = DungeonBuilder(builder.name)
for s in sector_list + builder.sectors:
assign_sector_helper(s, temp_builder)
split_list = split_dungeon_entrances[builder.name]
builder.split_dungeon_map = split_dungeon_builder(temp_builder, split_list, builder_info)
builder.valid_proposal = {}
possible_regions = set()
for portal in world.dungeon_portals[player]:
if not portal.destination and portal.name in dungeon_portals[builder.name]:
possible_regions.add(portal.door.entrance.parent_region.name)
if builder.name in dungeon_drops.keys():
possible_regions.update(dungeon_drops[builder.name])
for name, split_build in builder.split_dungeon_map.items():
name_bits = name.split(" ")
orig_name = " ".join(name_bits[:-1])
entrance_regions = split_dungeon_entrances[orig_name][name_bits[-1]]
# todo: this is hardcoded information for random entrances
for sector in split_build.sectors:
match_set = set(sector.region_set()).intersection(possible_regions)
if len(match_set) > 0:
for r_name in match_set:
if r_name not in entrance_regions:
entrance_regions.append(r_name)
# entrance_regions = [x for x in entrance_regions if x not in split_check_entrance_invalid]
proposal = generate_dungeon_find_proposal(split_build, entrance_regions, True, world, player)
# record split proposals
builder.valid_proposal[name] = proposal
builder.exception_list = list(sector_list)
return True, {}
except (GenerationException, NeutralizingException):
builder.split_dungeon_map = None
builder.valid_proposal = None
unreached_doors = resolve_equations(builder, sector_list)
return False, unreached_doors
else:
unreached_doors = resolve_equations(builder, sector_list)
return len(unreached_doors) == 0, unreached_doors
def resolve_equations(builder, sector_list):
unreached_doors = defaultdict(list)
equations = {x: y for x, y in copy_door_equations(builder, sector_list).items() if len(y) > 0}
current_access = {}
sector_split = {} # those sectors that belong to a certain sector
if builder.name in split_region_starts.keys():
for name, region_list in split_region_starts[builder.name].items():
current_access[name] = DungeonAccess()
for r_name in region_list:
sector = find_sector(r_name, builder.sectors)
sector_split[sector] = name
else:
current_access[builder.name] = DungeonAccess()
# resolve all that provide more access
free_sector, eq_list, free_eq = find_free_equation(equations)
while free_eq is not None:
if free_sector in sector_split.keys():
access_id = sector_split[free_sector]
access = current_access[access_id]
else:
access_id = next(iter(current_access.keys()))
access = current_access[access_id]
resolve_equation(free_eq, eq_list, free_sector, access_id, access, equations)
free_sector, eq_list, free_eq = find_free_equation(equations)
while len(equations) > 0:
valid_access = next_access(current_access)
eq, eq_list, sector, access, access_id = None, None, None, None, None
if len(valid_access) == 1:
access_id, access = valid_access[0]
eq, eq_list, sector = find_priority_equation(equations, access_id, access)
elif len(valid_access) > 1:
access_id, access = valid_access[0]
eq, eq_list, sector = find_greedy_equation(equations, access_id, access, sector_split)
if eq:
resolve_equation(eq, eq_list, sector, access_id, access, equations)
else:
for sector, eq_list in equations.items():
for eq in eq_list:
unreached_doors[hook_from_door(eq.door)].append(eq.door)
return unreached_doors
valid_access = next_access(current_access)
for access_id, dungeon_access in valid_access:
access = dungeon_access.access
access[Hook.Stairs] = access[Hook.Stairs] % 2
ns_leftover = min(access[Hook.North], access[Hook.South])
access[Hook.North] -= ns_leftover
access[Hook.South] -= ns_leftover
ew_leftover = min(access[Hook.West], access[Hook.East])
access[Hook.East] -= ew_leftover
access[Hook.West] -= ew_leftover
if sum(access.values()) > 0:
for hook, num in access.items():
for i in range(num):
unreached_doors[hook].append('placeholder')
return unreached_doors
def next_access(current_access):
valid_ones = [(x, y) for x, y in current_access.items() if sum(y.access.values()) > 0]
valid_ones.sort(key=lambda x: sum(x[1].access.values()))
return valid_ones
# an equations with no change to access (check)
# the highest benefit equations, that can be paid for (check)
# 0-benefit required transforms
# 0-benefit transforms (how to pick between these?)
# negative benefit transforms (dead end)
def find_priority_equation(equations, access_id, current_access):
flex = calc_flex(equations, current_access)
required = calc_required(equations, current_access)
wanted_candidates = []
best_profit = None
all_candidates = []
local_profit_map = {}
for sector, eq_list in equations.items():
eq_list.sort(key=lambda eq: eq.profit(current_access), reverse=True)
best_local_profit = None
for eq in eq_list:
profit = eq.profit(current_access)
if current_access.can_cover_equation(eq) and (eq.access_id is None or eq.access_id == access_id):
# if eq.neutral_profit() or eq.neutral():
# return eq, eq_list, sector # don't need to compare - just use it now
if best_local_profit is None or profit > best_local_profit:
best_local_profit = profit
all_candidates.append((eq, eq_list, sector))
elif (best_profit is None or profit >= best_profit) and profit > 0:
if best_profit is None or profit > best_profit:
wanted_candidates = [eq]
best_profit = profit
else:
wanted_candidates.append(eq)
local_profit_map[sector] = best_local_profit
filtered_candidates = filter_requirements(all_candidates, equations, required, current_access)
filtered_candidates = [x for x in filtered_candidates if x[0].gross(current_access) > 0]
if len(filtered_candidates) == 0:
filtered_candidates = all_candidates # probably bad things
if len(filtered_candidates) == 0:
return None, None, None # can't pay for anything
if len(filtered_candidates) == 1:
return filtered_candidates[0]
neutral_candidates = [x for x in filtered_candidates if (x[0].neutral_profit() or x[0].neutral()) and x[0].profit(current_access) == local_profit_map[x[2]]]
if len(neutral_candidates) == 0:
neutral_candidates = filtered_candidates
if len(neutral_candidates) == 1:
return neutral_candidates[0]
filtered_candidates = filter_requirements(neutral_candidates, equations, required, current_access)
if len(filtered_candidates) == 0:
filtered_candidates = neutral_candidates
if len(filtered_candidates) == 1:
return filtered_candidates[0]
triplet_candidates = []
best_profit = None
for eq, eq_list, sector in filtered_candidates:
profit = eq.profit(current_access)
if best_profit is None or profit >= best_profit:
if best_profit is None or profit > best_profit:
triplet_candidates = [(eq, eq_list, sector)]
best_profit = profit
else:
triplet_candidates.append((eq, eq_list, sector))
filtered_candidates = filter_requirements(triplet_candidates, equations, required, current_access)
if len(filtered_candidates) == 0:
filtered_candidates = triplet_candidates
if len(filtered_candidates) == 1:
return filtered_candidates[0]
required_candidates = [x for x in filtered_candidates if x[0].required]
if len(required_candidates) == 0:
required_candidates = filtered_candidates
if len(required_candidates) == 1:
return required_candidates[0]
c_switch_candidates = [x for x in required_candidates if x[0].c_switch]
if len(c_switch_candidates) == 0:
c_switch_candidates = required_candidates
if len(c_switch_candidates) == 1:
return c_switch_candidates[0]
loop_candidates = find_enabling_switch_connections(current_access)
if len(loop_candidates) >= 1:
return loop_candidates[0] # just pick one
flexible_candidates = [x for x in c_switch_candidates if x[0].can_cover_cost(flex)]
if len(flexible_candidates) == 0:
flexible_candidates = c_switch_candidates
if len(flexible_candidates) == 1:
return flexible_candidates[0]
good_local_candidates = [x for x in flexible_candidates if local_profit_map[x[2]] == x[0].profit(current_access)]
if len(good_local_candidates) == 0:
good_local_candidates = flexible_candidates
if len(good_local_candidates) == 1:
return good_local_candidates[0]
leads_to_profit = [x for x in good_local_candidates if can_enable_wanted(x[0], wanted_candidates)]
if len(leads_to_profit) == 0:
leads_to_profit = good_local_candidates
if len(leads_to_profit) == 1:
return leads_to_profit[0]
cost_point = {x[0]: find_cost_point(x, current_access) for x in leads_to_profit}
best_point = max(cost_point.values())
cost_point_candidates = [x for x in leads_to_profit if cost_point[x[0]] == best_point]
if len(cost_point_candidates) == 0:
cost_point_candidates = leads_to_profit
return cost_point_candidates[0] # just pick one I guess
def find_enabling_switch_connections(current_access):
triad_list = []
# probably should check for loop/branches in builder at some stage
# - but this could indicate that a loop or branch is necessary
for cand_door, crystal in current_access.outstanding_doors.items():
for blocked_door, req_crystal in current_access.blocked_doors.items():
if hook_from_door(cand_door) == hanger_from_door(blocked_door):
if crystal == CrystalBarrier.Either or crystal == req_crystal:
sector, equation = current_access.door_sector_map[blocked_door], None
for eq in sector.equations:
if eq.door == blocked_door:
equation = eq.copy()
break
if equation:
triad_list.append((equation, [equation], sector))
return triad_list
def find_cost_point(eq_triplet, access):
cost_point = 0
key, cost_door = eq_triplet[0].cost
if cost_door is not None:
cost_point += access.access[key] - 1
return cost_point
def find_greedy_equation(equations, access_id, current_access, sector_split):
all_candidates = []
for sector, eq_list in equations.items():
if sector not in sector_split.keys() or sector_split[sector] == access_id:
eq_list.sort(key=lambda eq: eq.profit(current_access), reverse=True)
for eq in eq_list:
if current_access.can_cover_equation(eq) and (eq.access_id is None or eq.access_id == access_id):
all_candidates.append((eq, eq_list, sector))
if len(all_candidates) == 0:
return None, None, None # can't pay for anything
if len(all_candidates) == 1:
return all_candidates[0]
filtered_candidates = [x for x in all_candidates if x[0].profit(current_access) + 2 >= len(x[2].outstanding_doors)]
if len(filtered_candidates) == 0:
filtered_candidates = all_candidates # terrible! ugly dead ends
if len(filtered_candidates) == 1:
return filtered_candidates[0]
triplet_candidates = []
worst_profit = None
for eq, eq_list, sector in filtered_candidates:
profit = eq.profit(current_access)
if worst_profit is None or profit <= worst_profit:
if worst_profit is None or profit < worst_profit:
triplet_candidates = [(eq, eq_list, sector)]
worst_profit = profit
else:
triplet_candidates.append((eq, eq_list, sector))
if len(triplet_candidates) == 0:
triplet_candidates = filtered_candidates # probably bad things
return triplet_candidates[0] # just pick one?
def calc_required(equations, current_access):
ttl = sum(current_access.access.values())
local_profit_map = {}
for sector, eq_list in equations.items():
best_local_profit = None
for eq in eq_list:
profit = eq.profit(current_access)
if best_local_profit is None or profit > best_local_profit:
best_local_profit = profit
local_profit_map[sector] = best_local_profit
ttl += best_local_profit
if ttl == 0:
new_lists = {}
for sector, eq_list in equations.items():
if len(eq_list) > 1:
rem_list = []
for eq in eq_list:
if eq.profit(current_access) < local_profit_map[sector]:
rem_list.append(eq)
if len(rem_list) > 0:
new_lists[sector] = [x for x in eq_list if x not in rem_list]
for sector, eq_list in new_lists.items():
if len(eq_list) <= 1:
for eq in eq_list:
eq.required = True
equations[sector] = eq_list
required_costs = defaultdict(int)
required_benefits = defaultdict(int)
for sector, eq_list in equations.items():
for eq in eq_list:
if eq.required:
key, door = eq.cost
required_costs[key] += 1
for key, door_list in eq.benefit.items():
required_benefits[key] += len(door_list)
return required_costs, required_benefits
def calc_flex(equations, current_access):
flex_spending = defaultdict(int)
required_costs = defaultdict(int)
for sector, eq_list in equations.items():
for eq in eq_list:
if eq.required:
key, door = eq.cost
required_costs[key] += 1
for key in Hook:
flex_spending[key] = max(0, current_access.access[key]-required_costs[key])
return flex_spending
def filter_requirements(triplet_candidates, equations, required, current_access):
r_costs, r_exits = required
valid_candidates = []
for cand, cand_list, cand_sector in triplet_candidates:
valid = True
if not cand.required and not cand.c_switch:
potential_benefit = defaultdict(int)
benefit_counted = set()
potential_costs = defaultdict(int)
for h_type, benefit in current_access.access.items():
cur_cost = 1 if cand.cost[0] is not None else 0
if benefit - cur_cost > 0:
potential_benefit[h_type] += benefit - cur_cost
for h_type, benefit_list in cand.benefit.items():
potential_benefit[h_type] += len(benefit_list)
for sector, eq_list in equations.items():
if sector == cand_sector:
affected_doors = [d for x in cand.benefit.values() for d in x] + [cand.cost[1]]
adj_list = [x for x in eq_list if x.door not in affected_doors]
else:
adj_list = eq_list
for eq in adj_list:
for h_type, benefit_list in eq.benefit.items():
total_benefit = set(benefit_list) - benefit_counted
potential_benefit[h_type] += len(total_benefit)
benefit_counted.update(benefit_list)
h_type, cost_door = eq.cost
potential_costs[h_type] += 1
for h_type, requirement in r_costs.items():
if requirement > 0 and potential_benefit[h_type] < requirement:
valid = False
break
if valid:
for h_type, requirement in r_exits.items():
if requirement > 0 and potential_costs[h_type] < requirement:
valid = False
break
if valid:
valid_candidates.append((cand, cand_list, cand_sector))
return valid_candidates
def can_enable_wanted(test_eq, wanted_candidates):
for wanted in wanted_candidates:
covered = True
key, cost_door = wanted.cost
if len(test_eq.benefit[key]) < 1:
covered = False
if covered:
return True
return False
def resolve_equation(equation, eq_list, sector, access_id, current_access, equations):
if not current_access.can_pay(equation.cost[0]):
raise GenerationException('Cannot pay for this connection')
current_access.adjust_for_equation(equation, sector)
eq_list.remove(equation)
reached_doors = set(current_access.reached_doors)
reached_doors.update(current_access.blocked_doors.keys())
for r_eq in list(eq_list):
all_benefits_met = r_eq.door in reached_doors
for key in Hook:
fringe_list = [x for x in r_eq.benefit[key] if x not in reached_doors]
r_eq.benefit[key] = fringe_list
if len(fringe_list) > 0:
all_benefits_met = False
if all_benefits_met:
eq_list.remove(r_eq)
if len(eq_list) == 0 and sector in equations.keys():
del equations[sector]
else:
for eq in eq_list:
eq.access_id = access_id
def find_free_equation(equations):
for sector, eq_list in equations.items():
for eq in eq_list:
if eq.total_cost() <= 0:
return sector, eq_list, eq
return None, None, None
def copy_door_equations(builder, sector_list):
equations = {}
for sector in builder.sectors + sector_list:
if sector.equations is None:
# todo: sort equations?
sector.equations = calc_sector_equations(sector)
curr_list = equations[sector] = []
for equation in sector.equations:
curr_list.append(equation.copy())
return equations
def calc_sector_equations(sector):
equations = []
is_entrance = sector.is_entrance_sector() and not sector.destination_entrance
if is_entrance:
flagged_equations = []
for door in sector.outstanding_doors:
equation, flag = calc_door_equation(door, sector, True)
if flag:
flagged_equations.append(equation)
equations.append(equation)
for flagged_equation in flagged_equations:
for equation in equations:
for key, door_list in equation.benefit.items():
if flagged_equation.door in door_list and flagged_equation != equation:
door_list.remove(flagged_equation.door)
else:
for door in sector.outstanding_doors:
equation, flag = calc_door_equation(door, sector, False)
equations.append(equation)
return equations
def calc_door_equation(door, sector, look_for_entrance):
if look_for_entrance and not door.blocked:
flag = sector.is_entrance_sector()
if flag:
eq = DoorEquation(door)
eq.benefit[hook_from_door(door)].append(door)
eq.required = True
eq.c_switch = door.crystal == CrystalBarrier.Either
# exceptions for long entrances ???
# if door.name in ['PoD Dark Alley']:
eq.entrance_flag = True
return eq, flag
eq = DoorEquation(door)
eq.required = door.blocked or door.dead
eq.cost = (hanger_from_door(door), door)
eq.entrance_flag = sector.is_entrance_sector()
if not door.stonewall:
start_region = door.entrance.parent_region
visited = {(start_region, CrystalBarrier.Null)}
queue = deque([(start_region, CrystalBarrier.Null)])
found_events = set()
event_doors = set()
while len(queue) > 0:
region, crystal_barrier = queue.popleft()
if region.crystal_switch and crystal_barrier == CrystalBarrier.Null:
eq.c_switch = True
crystal_barrier = CrystalBarrier.Either
# todo: backtracking from double switch with orange on--
for loc in region.locations:
if loc.name in dungeon_events:
found_events.add(loc.name)
for d in event_doors:
if loc.name == d.req_event:
connect = d.entrance.connected_region
if connect is not None and connect.type == RegionType.Dungeon and valid_crystal(d, crystal_barrier):
cb_flag = crystal_barrier if d.crystal == CrystalBarrier.Null else d.crystal
cb_flag = CrystalBarrier.Null if cb_flag == CrystalBarrier.Either else cb_flag
if (connect, cb_flag) not in visited:
visited.add((connect, cb_flag))
queue.append((connect, cb_flag))
for ext in region.exits:
d = ext.door
if d is not None:
if d.controller is not None:
d = d.controller
if d is not door and d in sector.outstanding_doors and not d.blocked:
eq_list = eq.benefit[hook_from_door(d)]
if d not in eq_list:
eq_list.append(d)
crystal_barrier = crystal_barrier if d.crystal == CrystalBarrier.Null else d.crystal
if crystal_barrier != CrystalBarrier.Null:
if d in eq.crystal_blocked.keys() and eq.crystal_blocked[d] != crystal_barrier:
del eq.crystal_blocked[d]
else:
eq.crystal_blocked[d] = crystal_barrier
elif d.crystal == CrystalBarrier.Null:
if d in eq.crystal_blocked.keys() and eq.crystal_blocked[d] != crystal_barrier:
del eq.crystal_blocked[d]
if d.req_event is not None and d.req_event not in found_events:
event_doors.add(d)
else:
connect = ext.connected_region if ext.door.controller is None else d.entrance.parent_region
if connect is not None and connect.type == RegionType.Dungeon and valid_crystal(d, crystal_barrier):
cb_flag = crystal_barrier if d.crystal == CrystalBarrier.Null else d.crystal
cb_flag = CrystalBarrier.Null if cb_flag == CrystalBarrier.Either else cb_flag
if (connect, cb_flag) not in visited:
visited.add((connect, cb_flag))
queue.append((connect, cb_flag))
if len(eq.benefit) == 0:
eq.required = True
return eq, False
def meets_crystal_requirment(current_crystal, requirement):
if current_crystal == CrystalBarrier.Either:
return True
return current_crystal == requirement
def valid_crystal(door, current_crystal):
if door.crystal in [CrystalBarrier.Null, CrystalBarrier.Either]:
return True
if current_crystal in [CrystalBarrier.Either, CrystalBarrier.Null]:
return True
return door.crystal == current_crystal
# common functions - todo: move to a common place
def kth_combination(k, l, r):
if r == 0:
return []
elif len(l) == r:
return l
else:
i = ncr(len(l) - 1, r - 1)
if k < i:
return l[0:1] + kth_combination(k, l[1:], r - 1)
else:
return kth_combination(k - i, l[1:], r)
def ncr(n, r):
if r == 0:
return 1
r = min(r, n - r)
numerator = reduce(op.mul, range(n, n - r, -1), 1)
denominator = reduce(op.mul, range(1, r + 1), 1)
return int(numerator / denominator)
dungeon_boss_sectors = {
'Hyrule Castle': [],
'Eastern Palace': ['Eastern Boss'],
'Desert Palace': ['Desert Boss'],
'Tower of Hera': ['Hera Boss'],
'Agahnims Tower': ['Tower Agahnim 1'],
'Palace of Darkness': ['PoD Boss'],
'Swamp Palace': ['Swamp Boss'],
'Skull Woods': ['Skull Boss'],
'Thieves Town': ['Thieves Blind\'s Cell', 'Thieves Boss'],
'Ice Palace': ['Ice Boss'],
'Misery Mire': ['Mire Boss'],
'Turtle Rock': ['TR Boss'],
'Ganons Tower': ['GT Agahnim 2']
}
default_dungeon_entrances = {
'Hyrule Castle': ['Hyrule Castle Lobby', 'Hyrule Castle West Lobby', 'Hyrule Castle East Lobby', 'Sewers Rat Path',
'Sanctuary'],
'Eastern Palace': ['Eastern Lobby'],
'Desert Palace': ['Desert Back Lobby', 'Desert Main Lobby', 'Desert West Lobby', 'Desert East Lobby'],
'Tower of Hera': ['Hera Lobby'],
'Agahnims Tower': ['Tower Lobby'],
'Palace of Darkness': ['PoD Lobby'],
'Swamp Palace': ['Swamp Lobby'],
'Skull Woods': ['Skull 1 Lobby', 'Skull Pinball', 'Skull Left Drop', 'Skull Pot Circle', 'Skull 2 East Lobby',
'Skull 2 West Lobby', 'Skull Back Drop', 'Skull 3 Lobby'],
'Thieves Town': ['Thieves Lobby'],
'Ice Palace': ['Ice Lobby'],
'Misery Mire': ['Mire Lobby'],
'Turtle Rock': ['TR Main Lobby', 'TR Eye Bridge', 'TR Big Chest Entrance', 'TR Lazy Eyes'],
'Ganons Tower': ['GT Lobby']
}
drop_entrances = {
'Hyrule Castle': ['Sewers Rat Path'],
'Eastern Palace': [],
'Desert Palace': [],
'Tower of Hera': [],
'Agahnims Tower': [],
'Palace of Darkness': [],
'Swamp Palace': [],
'Skull Woods': ['Skull Pinball', 'Skull Left Drop', 'Skull Pot Circle', 'Skull Back Drop'],
'Thieves Town': [],
'Ice Palace': [],
'Misery Mire': [],
'Turtle Rock': [],
'Ganons Tower': []
}
# todo: calculate these for ER - the multi entrance dungeons anyway
dungeon_dead_end_allowance = {
'Hyrule Castle': 6,
'Eastern Palace': 1,
'Desert Palace': 2,
'Tower of Hera': 1,
'Agahnims Tower': 1,
'Palace of Darkness': 1,
'Swamp Palace': 1,
'Skull Woods': 3, # two allowed in skull 1, 1 in skull 3, 0 in skull 2
'Thieves Town': 1,
'Ice Palace': 1,
'Misery Mire': 1,
'Turtle Rock': 2, # this assumes one overworld connection
'Ganons Tower': 1,
'Desert Palace Back': 1,
'Desert Palace Main': 1,
'Skull Woods 1': 0,
'Skull Woods 2': 0,
'Skull Woods 3': 1,
}
drop_entrances_allowance = [
'Sewers Rat Path', 'Skull Pinball', 'Skull Left Drop', 'Skull Pot Circle', 'Skull Back Drop'
]
dead_entrances = [
'TR Big Chest Entrance'
]
split_check_entrance_invalid = [
'Desert East Lobby', 'Skull 2 West Lobby'
]
dungeon_portals = {
'Hyrule Castle': ['Hyrule Castle South', 'Hyrule Castle West', 'Hyrule Castle East', 'Sanctuary'],
'Eastern Palace': ['Eastern'],
'Desert Palace': ['Desert Back', 'Desert South', 'Desert West', 'Desert East'],
'Tower of Hera': ['Hera'],
'Agahnims Tower': ['Agahnims Tower'],
'Palace of Darkness': ['Palace of Darkness'],
'Swamp Palace': ['Swamp'],
'Skull Woods': ['Skull 1', 'Skull 2 East', 'Skull 2 West', 'Skull 3'],
'Thieves Town': ['Thieves Town'],
'Ice Palace': ['Ice'],
'Misery Mire': ['Mire'],
'Turtle Rock': ['Turtle Rock Main', 'Turtle Rock Lazy Eyes', 'Turtle Rock Chest', 'Turtle Rock Eye Bridge'],
'Ganons Tower': ['Ganons Tower']
}
dungeon_drops = {
'Hyrule Castle': ['Sewers Rat Path'],
'Skull Woods': ['Skull Pot Circle', 'Skull Pinball', 'Skull Left Drop', 'Skull Back Drop'],
}
| 44.544991 | 160 | 0.614152 | import RaceRandom as random
import collections
import itertools
from collections import defaultdict, deque
from functools import reduce
import logging
import math
import operator as op
import time
from typing import List
from BaseClasses import DoorType, Direction, CrystalBarrier, RegionType, Polarity, PolSlot, flooded_keys, Sector
from BaseClasses import Hook, hook_from_door
from Regions import dungeon_events, flooded_keys_reverse
from Dungeons import dungeon_regions, split_region_starts
from RoomData import DoorKind
class GraphPiece:
def __init__(self):
self.hanger_info = None
self.hanger_crystal = None
self.hooks = {}
self.visited_regions = set()
self.possible_bk_locations = set()
self.pinball_used = False
def pre_validate(builder, entrance_region_names, split_dungeon, world, player):
entrance_regions = convert_regions(entrance_region_names, world, player)
excluded = {}
for region in entrance_regions:
portal = next((x for x in world.dungeon_portals[player] if x.door.entrance.parent_region == region), None)
if portal and portal.destination:
excluded[region] = None
entrance_regions = [x for x in entrance_regions if x not in excluded.keys()]
proposed_map = {}
doors_to_connect = {}
all_regions = set()
bk_needed = False
bk_special = False
for sector in builder.sectors:
for door in sector.outstanding_doors:
doors_to_connect[door.name] = door
all_regions.update(sector.regions)
bk_needed = bk_needed or determine_if_bk_needed(sector, split_dungeon, world, player)
bk_special = bk_special or check_for_special(sector)
paths = determine_paths_for_dungeon(world, player, all_regions, builder.name)
dungeon, hangers, hooks = gen_dungeon_info(builder.name, builder.sectors, entrance_regions, all_regions,
proposed_map, doors_to_connect, bk_needed, bk_special, world, player)
return check_valid(builder.name, dungeon, hangers, hooks, proposed_map, doors_to_connect, all_regions,
bk_needed, bk_special, paths, entrance_regions, world, player)
def generate_dungeon(builder, entrance_region_names, split_dungeon, world, player):
stonewalls = check_for_stonewalls(builder)
sector = generate_dungeon_main(builder, entrance_region_names, split_dungeon, world, player)
for stonewall in stonewalls:
if not stonewall_valid(stonewall):
builder.pre_open_stonewalls.add(stonewall)
return sector
def check_for_stonewalls(builder):
stonewalls = set()
for sector in builder.sectors:
for door in sector.outstanding_doors:
if door.stonewall:
stonewalls.add(door)
return stonewalls
def generate_dungeon_main(builder, entrance_region_names, split_dungeon, world, player):
if builder.valid_proposal: # we made this earlier in gen, just use it
proposed_map = builder.valid_proposal
else:
proposed_map = generate_dungeon_find_proposal(builder, entrance_region_names, split_dungeon, world, player)
builder.valid_proposal = proposed_map
queue = collections.deque(proposed_map.items())
while len(queue) > 0:
a, b = queue.popleft()
connect_doors(a, b)
queue.remove((b, a))
if len(builder.sectors) == 0:
return Sector()
available_sectors = list(builder.sectors)
master_sector = available_sectors.pop()
for sub_sector in available_sectors:
master_sector.regions.extend(sub_sector.regions)
master_sector.outstanding_doors.clear()
master_sector.r_name_set = None
return master_sector
def generate_dungeon_find_proposal(builder, entrance_region_names, split_dungeon, world, player):
logger = logging.getLogger('')
name = builder.name
entrance_regions = convert_regions(entrance_region_names, world, player)
excluded = {}
for region in entrance_regions:
portal = next((x for x in world.dungeon_portals[player] if x.door.entrance.parent_region == region), None)
if portal and portal.destination:
excluded[region] = None
entrance_regions = [x for x in entrance_regions if x not in excluded.keys()]
doors_to_connect = {}
all_regions = set()
bk_needed = False
bk_special = False
for sector in builder.sectors:
for door in sector.outstanding_doors:
doors_to_connect[door.name] = door
all_regions.update(sector.regions)
bk_needed = bk_needed or determine_if_bk_needed(sector, split_dungeon, world, player)
bk_special = bk_special or check_for_special(sector)
proposed_map = {}
choices_master = [[]]
depth = 0
dungeon_cache = {}
backtrack = False
itr = 0
attempt = 1
finished = False
# flag if standard and this is hyrule castle
paths = determine_paths_for_dungeon(world, player, all_regions, name)
while not finished:
# what are my choices?
itr += 1
if itr > 1000:
if attempt > 9:
raise GenerationException('Generation taking too long. Ref %s' % name)
proposed_map = {}
choices_master = [[]]
depth = 0
dungeon_cache = {}
backtrack = False
itr = 0
attempt += 1
logger.debug(f'Starting new attempt {attempt}')
if depth not in dungeon_cache.keys():
dungeon, hangers, hooks = gen_dungeon_info(name, builder.sectors, entrance_regions, all_regions, proposed_map,
doors_to_connect, bk_needed, bk_special, world, player)
dungeon_cache[depth] = dungeon, hangers, hooks
valid = check_valid(name, dungeon, hangers, hooks, proposed_map, doors_to_connect, all_regions,
bk_needed, bk_special, paths, entrance_regions, world, player)
else:
dungeon, hangers, hooks = dungeon_cache[depth]
valid = True
if valid:
if len(proposed_map) == len(doors_to_connect):
if dungeon['Origin'].pinball_used:
door = world.get_door('Skull Pinball WS', player)
room = world.get_room(door.roomIndex, player)
if room.doorList[door.doorListPos][1] == DoorKind.Trap:
room.change(door.doorListPos, DoorKind.Normal)
door.trapFlag = 0x0
door.blocked = False
finished = True
continue
prev_choices = choices_master[depth]
# make a choice
hanger, hook = make_a_choice(dungeon, hangers, hooks, prev_choices, name)
if hanger is None:
backtrack = True
else:
logger.debug(' ' * depth + "%d: Linking %s to %s", depth, hanger.name, hook.name)
proposed_map[hanger] = hook
proposed_map[hook] = hanger
last_choice = (hanger, hook)
choices_master[depth].append(last_choice)
depth += 1
choices_master.append([])
else:
backtrack = True
if backtrack:
backtrack = False
choices_master.pop()
dungeon_cache.pop(depth, None)
depth -= 1
if depth < 0:
raise GenerationException('Invalid dungeon. Ref %s' % name)
a, b = choices_master[depth][-1]
logger.debug(' ' * depth + "%d: Rescinding %s, %s", depth, a.name, b.name)
proposed_map.pop(a, None)
proposed_map.pop(b, None)
return proposed_map
def determine_if_bk_needed(sector, split_dungeon, world, player):
if not split_dungeon:
for region in sector.regions:
for ext in region.exits:
door = world.check_for_door(ext.name, player)
if door is not None and door.bigKey:
return True
return False
def check_for_special(sector):
for region in sector.regions:
for loc in region.locations:
if loc.forced_big_key():
return True
return False
def gen_dungeon_info(name, available_sectors, entrance_regions, all_regions, proposed_map, valid_doors, bk_needed, bk_special, world, player):
# step 1 create dungeon: Dict<DoorName|Origin, GraphPiece>
dungeon = {}
start = ExplorationState(dungeon=name)
start.big_key_special = bk_special
group_flags, door_map = find_bk_groups(name, available_sectors, proposed_map, bk_special)
bk_flag = False if world.bigkeyshuffle[player] and not bk_special else bk_needed
def exception(d):
return name == 'Skull Woods 2' and d.name == 'Skull Pinball WS'
original_state = extend_reachable_state_improved(entrance_regions, start, proposed_map, all_regions,
valid_doors, bk_flag, world, player, exception)
dungeon['Origin'] = create_graph_piece_from_state(None, original_state, original_state, proposed_map, exception)
either_crystal = True # if all hooks from the origin are either, explore all bits with either
for hook, crystal in dungeon['Origin'].hooks.items():
if crystal != CrystalBarrier.Either:
either_crystal = False
break
init_crystal = CrystalBarrier.Either if either_crystal else CrystalBarrier.Orange
hanger_set = set()
o_state_cache = {}
for sector in available_sectors:
for door in sector.outstanding_doors:
if door not in proposed_map.keys():
hanger_set.add(door)
bk_flag = group_flags[door_map[door]]
parent = door.entrance.parent_region
crystal_start = CrystalBarrier.Either if parent.crystal_switch else init_crystal
init_state = ExplorationState(crystal_start, dungeon=name)
init_state.big_key_special = start.big_key_special
o_state = extend_reachable_state_improved([parent], init_state, proposed_map, all_regions,
valid_doors, bk_flag, world, player, exception)
o_state_cache[door.name] = o_state
piece = create_graph_piece_from_state(door, o_state, o_state, proposed_map, exception)
dungeon[door.name] = piece
check_blue_states(hanger_set, dungeon, o_state_cache, proposed_map, all_regions, valid_doors,
group_flags, door_map, world, player, exception)
# catalog hooks: Dict<Hook, List<Door, Crystal, Door>>
# and hangers: Dict<Hang, List<Door>>
avail_hooks = defaultdict(list)
hangers = defaultdict(list)
for key, piece in dungeon.items():
door_hang = piece.hanger_info
if door_hang is not None:
hanger = hanger_from_door(door_hang)
hangers[hanger].append(door_hang)
for door, crystal in piece.hooks.items():
hook = hook_from_door(door)
avail_hooks[hook].append((door, crystal, door_hang))
# thin out invalid hanger
winnow_hangers(hangers, avail_hooks)
return dungeon, hangers, avail_hooks
def find_bk_groups(name, available_sectors, proposed_map, bk_special):
groups = {}
door_ids = {}
gid = 1
for sector in available_sectors:
if bk_special:
my_gid = None
for door in sector.outstanding_doors:
if door in proposed_map and proposed_map[door] in door_ids:
if my_gid:
merge_gid = door_ids[proposed_map[door]]
for door in door_ids.keys():
if door_ids[door] == merge_gid:
door_ids[door] = my_gid
groups[my_gid] = groups[my_gid] or groups[merge_gid]
else:
my_gid = door_ids[proposed_map[door]]
if not my_gid:
my_gid = gid
gid += 1
for door in sector.outstanding_doors:
door_ids[door] = my_gid
if my_gid not in groups.keys():
groups[my_gid] = False
for region in sector.regions:
for loc in region.locations:
if loc.forced_item and loc.item.bigkey and name in loc.item.name:
groups[my_gid] = True
else:
for door in sector.outstanding_doors:
door_ids[door] = gid
groups[gid] = False
return groups, door_ids
def check_blue_states(hanger_set, dungeon, o_state_cache, proposed_map, all_regions, valid_doors, group_flags, door_map,
world, player, exception):
not_blue = set()
not_blue.update(hanger_set)
doors_to_check = set()
doors_to_check.update(hanger_set) # doors to check, check everything on first pass
blue_hooks = []
blue_hangers = []
new_blues = True
while new_blues:
new_blues = False
for door in doors_to_check:
piece = dungeon[door.name]
for hook, crystal in piece.hooks.items():
if crystal != CrystalBarrier.Orange:
h_type = hook_from_door(hook)
if h_type not in blue_hooks:
new_blues = True
blue_hooks.append(h_type)
if piece.hanger_crystal == CrystalBarrier.Either:
h_type = hanger_from_door(piece.hanger_info)
if h_type not in blue_hangers:
new_blues = True
blue_hangers.append(h_type)
doors_to_check = set()
for door in not_blue: # am I now blue?
hang_type = hanger_from_door(door) # am I hangable on a hook?
hook_type = hook_from_door(door) # am I hookable onto a hanger?
if (hang_type in blue_hooks and not door.stonewall) or hook_type in blue_hangers:
bk_flag = group_flags[door_map[door]]
explore_blue_state(door, dungeon, o_state_cache[door.name], proposed_map, all_regions, valid_doors,
bk_flag, world, player, exception)
doors_to_check.add(door)
not_blue.difference_update(doors_to_check)
def explore_blue_state(door, dungeon, o_state, proposed_map, all_regions, valid_doors, bk_flag, world, player, exception):
parent = door.entrance.parent_region
blue_start = ExplorationState(CrystalBarrier.Blue, o_state.dungeon)
blue_start.big_key_special = o_state.big_key_special
b_state = extend_reachable_state_improved([parent], blue_start, proposed_map, all_regions, valid_doors, bk_flag,
world, player, exception)
dungeon[door.name] = create_graph_piece_from_state(door, o_state, b_state, proposed_map, exception)
def make_a_choice(dungeon, hangers, avail_hooks, prev_choices, name):
# choose a hanger
all_hooks = {}
origin = dungeon['Origin']
for key in avail_hooks.keys():
for hstuff in avail_hooks[key]:
all_hooks[hstuff[0]] = None
candidate_hangers = []
for key in hangers.keys():
candidate_hangers.extend(hangers[key])
candidate_hangers.sort(key=lambda x: x.name) # sorting to create predictable seeds
random.shuffle(candidate_hangers) # randomize if equal preference
stage_2_hangers = []
if len(prev_choices) > 0:
prev_hanger = prev_choices[0][0]
if prev_hanger in candidate_hangers:
stage_2_hangers.append(prev_hanger)
candidate_hangers.remove(prev_hanger)
hookable_hangers = collections.deque()
queue = collections.deque(candidate_hangers)
while len(queue) > 0:
c_hang = queue.popleft()
if c_hang in all_hooks.keys():
hookable_hangers.append(c_hang)
else:
stage_2_hangers.append(c_hang) # prefer hangers that are not hooks
# todo : prefer hangers with fewer hooks at some point? not sure about this
# this prefer hangers of the fewest type - to catch problems fast
hookable_hangers = sorted(hookable_hangers, key=lambda door: len(hangers[hanger_from_door(door)]), reverse=True)
origin_hangers = []
while len(hookable_hangers) > 0:
c_hang = hookable_hangers.pop()
if c_hang in origin.hooks.keys():
origin_hangers.append(c_hang)
else:
stage_2_hangers.append(c_hang) # prefer hangers that are not hooks on the 'origin'
stage_2_hangers.extend(origin_hangers)
hook = None
next_hanger = None
while hook is None:
if len(stage_2_hangers) == 0:
return None, None
next_hanger = stage_2_hangers.pop(0)
next_hanger_type = hanger_from_door(next_hanger)
hook_candidates = []
for door, crystal, orig_hang in avail_hooks[next_hanger_type]:
if filter_choices(next_hanger, door, orig_hang, prev_choices, hook_candidates):
hook_candidates.append(door)
if len(hook_candidates) > 0:
hook_candidates.sort(key=lambda x: x.name) # sort for deterministic seeds
hook = random.choice(tuple(hook_candidates))
elif name == 'Skull Woods 2' and next_hanger.name == 'Skull Pinball WS':
continue
else:
return None, None
return next_hanger, hook
def filter_choices(next_hanger, door, orig_hang, prev_choices, hook_candidates):
if (next_hanger, door) in prev_choices or (door, next_hanger) in prev_choices:
return False
return next_hanger != door and orig_hang != next_hanger and door not in hook_candidates
def check_valid(name, dungeon, hangers, hooks, proposed_map, doors_to_connect, all_regions,
bk_needed, bk_special, paths, entrance_regions, world, player):
# evaluate if everything is still plausible
# only origin is left in the dungeon and not everything is connected
if len(dungeon.keys()) <= 1 and len(proposed_map.keys()) < len(doors_to_connect):
return False
# origin has no more hooks, but not all doors have been proposed
if not world.bigkeyshuffle[player]:
possible_bks = len(dungeon['Origin'].possible_bk_locations)
true_origin_hooks = [x for x in dungeon['Origin'].hooks.keys() if not x.bigKey or possible_bks > 0 or not bk_needed]
if len(true_origin_hooks) == 0 and len(proposed_map.keys()) < len(doors_to_connect):
return False
if len(true_origin_hooks) == 0 and bk_needed and possible_bks == 0 and len(proposed_map.keys()) == len(doors_to_connect):
return False
for key in hangers.keys():
if len(hooks[key]) > 0 and len(hangers[key]) == 0:
return False
# todo: stonewall - check that there's no hook-only that is without a matching hanger
must_hang = defaultdict(list)
all_hooks = set()
for key in hooks.keys():
for hook in hooks[key]:
all_hooks.add(hook[0])
for key in hangers.keys():
for hanger in hangers[key]:
if hanger not in all_hooks:
must_hang[key].append(hanger)
for key in must_hang.keys():
if len(must_hang[key]) > len(hooks[key]):
return False
outstanding_doors = defaultdict(list)
for d in doors_to_connect.values():
if d not in proposed_map.keys():
outstanding_doors[hook_from_door(d)].append(d)
for key in outstanding_doors.keys():
opp_key = opposite_h_type(key)
if len(outstanding_doors[key]) > 0 and len(hangers[key]) == 0 and len(hooks[opp_key]) == 0:
return False
all_visited = set()
bk_possible = not bk_needed or (world.bigkeyshuffle[player] and not bk_special)
for piece in dungeon.values():
all_visited.update(piece.visited_regions)
if not bk_possible and len(piece.possible_bk_locations) > 0:
bk_possible = True
if len(all_regions.difference(all_visited)) > 0:
return False
if not bk_possible:
return False
if not valid_paths(name, paths, entrance_regions, doors_to_connect, all_regions, proposed_map,
bk_needed, bk_special, world, player):
return False
new_hangers_found = True
accessible_hook_types = []
hanger_matching = set()
all_hangers = set()
origin_hooks = set(dungeon['Origin'].hooks.keys())
for door_hook in origin_hooks:
h_type = hook_from_door(door_hook)
if h_type not in accessible_hook_types:
accessible_hook_types.append(h_type)
while new_hangers_found:
new_hangers_found = False
for hanger_set in hangers.values():
for hanger in hanger_set:
all_hangers.add(hanger)
h_type = hanger_from_door(hanger)
if (h_type in accessible_hook_types or hanger in origin_hooks) and hanger not in hanger_matching:
new_hangers_found = True
hanger_matching.add(hanger)
matching_hooks = dungeon[hanger.name].hooks.keys()
origin_hooks.update(matching_hooks)
for door_hook in matching_hooks:
new_h_type = hook_from_door(door_hook)
if new_h_type not in accessible_hook_types:
accessible_hook_types.append(new_h_type)
return len(all_hangers.difference(hanger_matching)) == 0
def valid_paths(name, paths, entrance_regions, valid_doors, all_regions, proposed_map,
bk_needed, bk_special, world, player):
for path in paths:
if type(path) is tuple:
target = path[1]
start_regions = []
for region in all_regions:
if path[0] == region.name:
start_regions.append(region)
break
else:
target = path
start_regions = entrance_regions
if not valid_path(name, start_regions, target, valid_doors, proposed_map, all_regions,
bk_needed, bk_special, world, player):
return False
return True
def valid_path(name, starting_regions, target, valid_doors, proposed_map, all_regions,
bk_needed, bk_special, world, player):
target_regions = set()
if type(target) is not list:
for region in all_regions:
if target == region.name:
target_regions.add(region)
break
else:
for region in all_regions:
if region.name in target:
target_regions.add(region)
start = ExplorationState(dungeon=name)
start.big_key_special = bk_special
bk_flag = False if world.bigkeyshuffle[player] and not bk_special else bk_needed
def exception(d):
return name == 'Skull Woods 2' and d.name == 'Skull Pinball WS'
original_state = extend_reachable_state_improved(starting_regions, start, proposed_map, all_regions,
valid_doors, bk_flag, world, player, exception)
for exp_door in original_state.unattached_doors:
if not exp_door.door.blocked:
return True for target in target_regions:
if original_state.visited_at_all(target):
return True
return False
def determine_required_paths(world, player):
paths = {}
for name, builder in world.dungeon_layouts[player].items():
all_regions = builder.master_sector.regions
paths[name] = determine_paths_for_dungeon(world, player, all_regions, name)
return paths
boss_path_checks = ['Eastern Boss', 'Desert Boss', 'Hera Boss', 'Tower Agahnim 1', 'PoD Boss', 'Swamp Boss',
'Skull Boss', 'Ice Boss', 'Mire Boss', 'TR Boss', 'GT Agahnim 2']
# pinball is allowed to orphan you
drop_path_checks = ['Skull Pot Circle', 'Skull Left Drop', 'Skull Back Drop', 'Sewers Rat Path']
def determine_paths_for_dungeon(world, player, all_regions, name):
all_r_names = set(x.name for x in all_regions)
paths = []
non_hole_portals = []
for portal in world.dungeon_portals[player]:
if portal.door.entrance.parent_region in all_regions:
non_hole_portals.append(portal.door.entrance.parent_region.name)
if portal.destination:
paths.append(portal.door.entrance.parent_region.name)
if world.mode[player] == 'standard' and name == 'Hyrule Castle':
paths.append('Hyrule Dungeon Cellblock')
paths.append(('Hyrule Dungeon Cellblock', 'Sanctuary'))
if world.doorShuffle[player] in ['basic'] and name == 'Thieves Town':
paths.append('Thieves Attic Window')
elif 'Thieves Attic Window' in all_r_names:
paths.append('Thieves Attic Window')
for boss in boss_path_checks:
if boss in all_r_names:
paths.append(boss)
if 'Thieves Boss' in all_r_names:
paths.append('Thieves Boss')
paths.append(('Thieves Blind\'s Cell', 'Thieves Boss'))
for drop_check in drop_path_checks:
if drop_check in all_r_names:
paths.append((drop_check, non_hole_portals))
return paths
def winnow_hangers(hangers, hooks):
removal_info = []
for hanger, door_set in hangers.items():
for door in door_set:
hook_set = hooks[hanger]
if len(hook_set) == 0:
removal_info.append((hanger, door))
else:
found_valid = False
for door_hook, crystal, orig_hanger in hook_set:
if orig_hanger != door:
found_valid = True
break
if not found_valid:
removal_info.append((hanger, door))
for hanger, door in removal_info:
hangers[hanger].remove(door)
def stonewall_valid(stonewall):
bad_door = stonewall.dest
if bad_door.blocked:
return True loop_region = stonewall.entrance.parent_region
start_regions = [bad_door.entrance.parent_region]
if bad_door.dependents:
for dep in bad_door.dependents:
start_regions.append(dep.entrance.parent_region)
queue = deque(start_regions)
visited = set(start_regions)
while len(queue) > 0:
region = queue.popleft()
if region == loop_region:
return False # guaranteed loop
possible_entrances = list(region.entrances)
for entrance in possible_entrances:
parent = entrance.parent_region
if parent.type != RegionType.Dungeon:
return False # you can get stuck from an entrance
else:
door = entrance.door
if (door is None or (door != stonewall and not door.blocked)) and parent not in visited:
visited.add(parent)
queue.append(parent)
# we didn't find anything bad
return True
def create_graph_piece_from_state(door, o_state, b_state, proposed_map, exception):
graph_piece = GraphPiece()
all_unattached = {}
for exp_d in o_state.unattached_doors:
all_unattached[exp_d.door] = exp_d.crystal
for exp_d in b_state.unattached_doors:
d = exp_d.door
if d in all_unattached.keys():
if all_unattached[d] != exp_d.crystal:
if all_unattached[d] == CrystalBarrier.Orange and exp_d.crystal == CrystalBarrier.Blue:
all_unattached[d] = CrystalBarrier.Null
elif all_unattached[d] == CrystalBarrier.Blue and exp_d.crystal == CrystalBarrier.Orange:
logging.getLogger('').warning('Mismatched state @ %s (o:%s b:%s)', d.name, all_unattached[d],
exp_d.crystal)
elif all_unattached[d] == CrystalBarrier.Either:
all_unattached[d] = exp_d.crystal else:
all_unattached[exp_d.door] = exp_d.crystal
h_crystal = door.crystal if door is not None else None
for d, crystal in all_unattached.items():
if (door is None or d != door) and (not d.blocked or exception(d))and d not in proposed_map.keys():
graph_piece.hooks[d] = crystal
if d == door:
h_crystal = crystal
graph_piece.hanger_info = door
graph_piece.hanger_crystal = h_crystal
graph_piece.visited_regions.update(o_state.visited_blue)
graph_piece.visited_regions.update(o_state.visited_orange)
graph_piece.visited_regions.update(b_state.visited_blue)
graph_piece.visited_regions.update(b_state.visited_orange)
graph_piece.possible_bk_locations.update(filter_for_potential_bk_locations(o_state.bk_found))
graph_piece.possible_bk_locations.update(filter_for_potential_bk_locations(b_state.bk_found))
graph_piece.pinball_used = o_state.pinball_used or b_state.pinball_used
return graph_piece
def filter_for_potential_bk_locations(locations):
return [x for x in locations if
'- Big Chest' not in x.name and '- Prize' not in x.name and x.name not in dungeon_events
and not x.forced_item and x.name not in ['Agahnim 1', 'Agahnim 2']]
type_map = {
Hook.Stairs: Hook.Stairs,
Hook.North: Hook.South,
Hook.South: Hook.North,
Hook.West: Hook.East,
Hook.East: Hook.West
}
def opposite_h_type(h_type) -> Hook:
return type_map[h_type]
hang_dir_map = {
Direction.North: Hook.South,
Direction.South: Hook.North,
Direction.West: Hook.East,
Direction.East: Hook.West,
}
def hanger_from_door(door):
if door.type == DoorType.SpiralStairs:
return Hook.Stairs
if door.type in [DoorType.Normal, DoorType.Open, DoorType.StraightStairs, DoorType.Ladder]:
return hang_dir_map[door.direction]
return None
def connect_doors(a, b):
if a.type in [DoorType.Hole, DoorType.Warp, DoorType.Interior, DoorType.Logical]:
return
if a.type in [DoorType.Normal, DoorType.SpiralStairs, DoorType.Open, DoorType.StraightStairs, DoorType.Ladder]:
if a.blocked:
connect_one_way(b.entrance, a.entrance)
elif b.blocked:
connect_one_way(a.entrance, b.entrance)
else:
connect_two_way(a.entrance, b.entrance)
dep_doors, target = [], None
if len(a.dependents) > 0:
dep_doors, target = a.dependents, b
elif len(b.dependents) > 0:
dep_doors, target = b.dependents, a
if target is not None:
target_region = target.entrance.parent_region
for dep in dep_doors:
connect_simple_door(dep, target_region)
return
raise RuntimeError('Unknown door type ' + a.type.name)
def connect_two_way(entrance, ext):
if entrance.connected_region is not None:
entrance.connected_region.entrances.remove(entrance)
if ext.connected_region is not None:
ext.connected_region.entrances.remove(ext)
entrance.connect(ext.parent_region)
ext.connect(entrance.parent_region)
if entrance.parent_region.dungeon:
ext.parent_region.dungeon = entrance.parent_region.dungeon
x = entrance.door
y = ext.door
if x is not None:
x.dest = y
if y is not None:
y.dest = x
def connect_one_way(entrance, ext):
if entrance.connected_region is not None:
entrance.connected_region.entrances.remove(entrance)
if ext.connected_region is not None:
ext.connected_region.entrances.remove(ext)
entrance.connect(ext.parent_region)
if entrance.parent_region.dungeon:
ext.parent_region.dungeon = entrance.parent_region.dungeon
x = entrance.door
y = ext.door
if x is not None:
x.dest = y
if y is not None:
y.dest = x
def connect_simple_door(exit_door, region):
exit_door.entrance.connect(region)
exit_door.dest = region
special_big_key_doors = ['Hyrule Dungeon Cellblock Door', "Thieves Blind's Cell Door"]
class ExplorationState(object):
def __init__(self, init_crystal=CrystalBarrier.Orange, dungeon=None):
self.unattached_doors = []
self.avail_doors = []
self.event_doors = []
self.visited_orange = []
self.visited_blue = []
self.events = set()
self.crystal = init_crystal
# key region stuff
self.door_krs = {}
# key validation stuff
self.small_doors = []
self.big_doors = []
self.opened_doors = []
self.big_key_opened = False
self.big_key_special = False
self.found_locations = []
self.ttl_locations = 0
self.used_locations = 0
self.key_locations = 0
self.used_smalls = 0
self.bk_found = set()
self.non_door_entrances = []
self.dungeon = dungeon
self.pinball_used = False
def copy(self):
ret = ExplorationState(dungeon=self.dungeon)
ret.unattached_doors = list(self.unattached_doors)
ret.avail_doors = list(self.avail_doors)
ret.event_doors = list(self.event_doors)
ret.visited_orange = list(self.visited_orange)
ret.visited_blue = list(self.visited_blue)
ret.events = set(self.events)
ret.crystal = self.crystal
ret.door_krs = self.door_krs.copy()
ret.small_doors = list(self.small_doors)
ret.big_doors = list(self.big_doors)
ret.opened_doors = list(self.opened_doors)
ret.big_key_opened = self.big_key_opened
ret.big_key_special = self.big_key_special
ret.ttl_locations = self.ttl_locations
ret.key_locations = self.key_locations
ret.used_locations = self.used_locations
ret.used_smalls = self.used_smalls
ret.found_locations = list(self.found_locations)
ret.bk_found = set(self.bk_found)
ret.non_door_entrances = list(self.non_door_entrances)
ret.dungeon = self.dungeon
ret.pinball_used = self.pinball_used
return ret
def next_avail_door(self):
self.avail_doors.sort(key=lambda x: 0 if x.flag else 1 if x.door.bigKey else 2)
exp_door = self.avail_doors.pop()
self.crystal = exp_door.crystal
return exp_door
def visit_region(self, region, key_region=None, key_checks=False, bk_Flag=False):
if self.crystal == CrystalBarrier.Either:
if region not in self.visited_blue:
self.visited_blue.append(region)
if region not in self.visited_orange:
self.visited_orange.append(region)
elif self.crystal == CrystalBarrier.Orange:
self.visited_orange.append(region)
elif self.crystal == CrystalBarrier.Blue:
self.visited_blue.append(region)
if region.type == RegionType.Dungeon:
for location in region.locations:
if key_checks and location not in self.found_locations:
if location.forced_item and 'Small Key' in location.item.name:
self.key_locations += 1
if location.name not in dungeon_events and '- Prize' not in location.name and location.name not in ['Agahnim 1', 'Agahnim 2']:
self.ttl_locations += 1
if location not in self.found_locations: # todo: special logic for TT Boss?
self.found_locations.append(location)
if not bk_Flag:
self.bk_found.add(location)
if location.name in dungeon_events and location.name not in self.events:
if self.flooded_key_check(location):
self.perform_event(location.name, key_region)
if location.name in flooded_keys_reverse.keys() and self.location_found(
flooded_keys_reverse[location.name]):
self.perform_event(flooded_keys_reverse[location.name], key_region)
def flooded_key_check(self, location):
if location.name not in flooded_keys.keys():
return True
return flooded_keys[location.name] in [x.name for x in self.found_locations]
def location_found(self, location_name):
for l in self.found_locations:
if l.name == location_name:
return True
return False
def perform_event(self, location_name, key_region):
self.events.add(location_name)
queue = collections.deque(self.event_doors)
while len(queue) > 0:
exp_door = queue.popleft()
if exp_door.door.req_event == location_name:
self.avail_doors.append(exp_door)
self.event_doors.remove(exp_door)
if key_region is not None:
d_name = exp_door.door.name
if d_name not in self.door_krs.keys():
self.door_krs[d_name] = key_region
def add_all_entrance_doors_check_unattached(self, region, world, player):
door_list = [x for x in get_doors(world, region, player) if x.type in [DoorType.Normal, DoorType.SpiralStairs]]
door_list.extend(get_entrance_doors(world, region, player))
for door in door_list:
if self.can_traverse(door):
if door.dest is None and not self.in_door_list_ic(door, self.unattached_doors):
self.append_door_to_list(door, self.unattached_doors)
elif door.req_event is not None and door.req_event not in self.events and not self.in_door_list(door,
self.event_doors):
self.append_door_to_list(door, self.event_doors)
elif not self.in_door_list(door, self.avail_doors):
self.append_door_to_list(door, self.avail_doors)
for entrance in region.entrances:
door = world.check_for_door(entrance.name, player)
if door is None:
self.non_door_entrances.append(entrance)
def add_all_doors_check_unattached(self, region, world, player):
for door in get_doors(world, region, player):
if self.can_traverse(door):
if door.controller is not None:
door = door.controller
if door.dest is None and not self.in_door_list_ic(door, self.unattached_doors):
self.append_door_to_list(door, self.unattached_doors)
elif door.req_event is not None and door.req_event not in self.events and not self.in_door_list(door,
self.event_doors):
self.append_door_to_list(door, self.event_doors)
elif not self.in_door_list(door, self.avail_doors):
self.append_door_to_list(door, self.avail_doors)
def add_all_doors_check_proposed(self, region, proposed_map, valid_doors, flag, world, player, exception):
for door in get_doors(world, region, player):
if door.blocked and exception(door):
self.pinball_used = True
if self.can_traverse(door, exception):
if door.controller is not None:
door = door.controller
if door.dest is None and door not in proposed_map.keys() and door.name in valid_doors.keys():
if not self.in_door_list_ic(door, self.unattached_doors):
self.append_door_to_list(door, self.unattached_doors, flag)
else:
other = self.find_door_in_list(door, self.unattached_doors)
if self.crystal != other.crystal:
other.crystal = CrystalBarrier.Either
elif door.req_event is not None and door.req_event not in self.events and not self.in_door_list(door,
self.event_doors):
self.append_door_to_list(door, self.event_doors, flag)
elif not self.in_door_list(door, self.avail_doors):
self.append_door_to_list(door, self.avail_doors, flag)
def add_all_doors_check_key_region(self, region, key_region, world, player):
for door in get_doors(world, region, player):
if self.can_traverse(door):
if door.req_event is not None and door.req_event not in self.events and not self.in_door_list(door,
self.event_doors):
self.append_door_to_list(door, self.event_doors)
elif not self.in_door_list(door, self.avail_doors):
self.append_door_to_list(door, self.avail_doors)
if door.name not in self.door_krs.keys():
self.door_krs[door.name] = key_region
else:
if door.name not in self.door_krs.keys():
self.door_krs[door.name] = key_region
def add_all_doors_check_keys(self, region, key_door_proposal, world, player):
for door in get_doors(world, region, player):
if self.can_traverse(door):
if door.controller:
door = door.controller
if door in key_door_proposal and door not in self.opened_doors:
if not self.in_door_list(door, self.small_doors):
self.append_door_to_list(door, self.small_doors)
elif (door.bigKey or door.name in special_big_key_doors) and not self.big_key_opened:
if not self.in_door_list(door, self.big_doors):
self.append_door_to_list(door, self.big_doors)
elif door.req_event is not None and door.req_event not in self.events:
if not self.in_door_list(door, self.event_doors):
self.append_door_to_list(door, self.event_doors)
elif not self.in_door_list(door, self.avail_doors):
self.append_door_to_list(door, self.avail_doors)
def visited(self, region):
if self.crystal == CrystalBarrier.Either:
return region in self.visited_blue and region in self.visited_orange
elif self.crystal == CrystalBarrier.Orange:
return region in self.visited_orange
elif self.crystal == CrystalBarrier.Blue:
return region in self.visited_blue
return False
def visited_at_all(self, region):
return region in self.visited_blue or region in self.visited_orange
def found_forced_bk(self):
for location in self.found_locations:
if location.forced_big_key():
return True
return False
def can_traverse(self, door, exception=None):
if door.blocked:
return exception(door) if exception else False
if door.crystal not in [CrystalBarrier.Null, CrystalBarrier.Either]:
return self.crystal == CrystalBarrier.Either or door.crystal == self.crystal
return True
def count_locations_exclude_specials(self):
cnt = 0
for loc in self.found_locations:
if '- Big Chest' not in loc.name and '- Prize' not in loc.name and loc.name not in dungeon_events and not loc.forced_item:
cnt += 1
return cnt
def validate(self, door, region, world, player):
return self.can_traverse(door) and not self.visited(region) and valid_region_to_explore(region, self.dungeon,
world, player)
def in_door_list(self, door, door_list):
for d in door_list:
if d.door == door and d.crystal == self.crystal:
return True
return False
@staticmethod
def in_door_list_ic(door, door_list):
for d in door_list:
if d.door == door:
return True
return False
@staticmethod
def find_door_in_list(door, door_list):
for d in door_list:
if d.door == door:
return d
return None
def append_door_to_list(self, door, door_list, flag=False):
if door.crystal == CrystalBarrier.Null:
door_list.append(ExplorableDoor(door, self.crystal, flag))
else:
door_list.append(ExplorableDoor(door, door.crystal, flag))
def key_door_sort(self, d):
if d.door.smallKey:
if d.door in self.opened_doors:
return 1
else:
return 0
return 2
class ExplorableDoor(object):
def __init__(self, door, crystal, flag):
self.door = door
self.crystal = crystal
self.flag = flag
def __str__(self):
return str(self.__unicode__())
def __unicode__(self):
return '%s (%s)' % (self.door.name, self.crystal.name)
def extend_reachable_state_improved(search_regions, state, proposed_map, all_regions, valid_doors, bk_flag, world, player, exception):
local_state = state.copy()
for region in search_regions:
local_state.visit_region(region)
local_state.add_all_doors_check_proposed(region, proposed_map, valid_doors, False, world, player, exception)
while len(local_state.avail_doors) > 0:
explorable_door = local_state.next_avail_door()
if explorable_door.door.bigKey:
if bk_flag:
big_not_found = not special_big_key_found(local_state) if local_state.big_key_special else local_state.count_locations_exclude_specials() == 0
if big_not_found:
continue # we can't open this door
if explorable_door.door in proposed_map:
connect_region = world.get_entrance(proposed_map[explorable_door.door].name, player).parent_region
else:
connect_region = world.get_entrance(explorable_door.door.name, player).connected_region
if connect_region is not None:
if valid_region_to_explore_in_regions(connect_region, all_regions, world, player) and not local_state.visited(
connect_region):
flag = explorable_door.flag or explorable_door.door.bigKey
local_state.visit_region(connect_region, bk_Flag=flag)
local_state.add_all_doors_check_proposed(connect_region, proposed_map, valid_doors, flag, world, player, exception)
return local_state
def special_big_key_found(state):
for location in state.found_locations:
if location.forced_item and location.forced_item.bigkey:
return True
return False
def valid_region_to_explore_in_regions(region, all_regions, world, player):
if region is None:
return False
return (region.type == RegionType.Dungeon and region in all_regions)\
or region.name in world.inaccessible_regions[player]\
or (region.name == 'Hyrule Castle Ledge' and world.mode[player] == 'standard')
def valid_region_to_explore(region, name, world, player):
if region is None:
return False
return (region.type == RegionType.Dungeon and region.dungeon.name in name)\
or region.name in world.inaccessible_regions[player]\
or (region.name == 'Hyrule Castle Ledge' and world.mode[player] == 'standard')
def get_doors(world, region, player):
res = []
for ext in region.exits:
door = world.check_for_door(ext.name, player)
if door is not None:
res.append(door)
return res
def get_dungeon_doors(region, world, player):
res = []
for ext in region.exits:
door = world.check_for_door(ext.name, player)
if door is not None and ext.parent_region.type == RegionType.Dungeon:
res.append(door)
return res
def get_entrance_doors(world, region, player):
res = []
for ext in region.entrances:
door = world.check_for_door(ext.name, player)
if door is not None:
res.append(door)
return res
def convert_regions(region_names, world, player):
region_list = []
for name in region_names:
region_list.append(world.get_region(name, player))
return region_list
class DungeonBuilder(object):
def __init__(self, name):
self.name = name
self.sectors = []
self.location_cnt = 0
self.key_drop_cnt = 0
self.bk_required = False
self.bk_provided = False
self.c_switch_required = False
self.c_switch_present = False
self.c_locked = False
self.dead_ends = 0
self.branches = 0
self.forced_loops = 0
self.total_conn_lack = 0
self.conn_needed = defaultdict(int)
self.conn_supplied = defaultdict(int)
self.conn_balance = defaultdict(int)
self.mag_needed = {}
self.unfulfilled = defaultdict(int)
self.all_entrances = None self.entrance_list = None self.layout_starts = None self.master_sector = None
self.path_entrances = None self.split_flag = False
self.pre_open_stonewalls = set()
self.candidates = None
self.key_doors_num = None
self.combo_size = None
self.flex = 0
self.key_door_proposal = None
self.allowance = None
if 'Stonewall' in name:
self.allowance = 1
elif 'Prewall' in name:
orig_name = name[:-8]
if orig_name in dungeon_dead_end_allowance.keys():
self.allowance = dungeon_dead_end_allowance[orig_name]
if self.allowance is None:
self.allowance = 1
self.valid_proposal = None
self.split_dungeon_map = None
self.exception_list = []
def polarity_complement(self):
pol = Polarity()
for sector in self.sectors:
pol += sector.polarity()
return pol.complement()
def polarity(self):
pol = Polarity()
for sector in self.sectors:
pol += sector.polarity()
return pol
def __str__(self):
return str(self.__unicode__())
def __unicode__(self):
return '%s' % self.name
def simple_dungeon_builder(name, sector_list):
define_sector_features(sector_list)
builder = DungeonBuilder(name)
dummy_pool = dict.fromkeys(sector_list)
global_pole = GlobalPolarity(dummy_pool)
for sector in sector_list:
assign_sector(sector, builder, dummy_pool, global_pole)
return builder
def create_dungeon_builders(all_sectors, connections_tuple, world, player,
dungeon_entrances=None, split_dungeon_entrances=None):
logger = logging.getLogger('')
logger.info('Shuffling Dungeon Sectors')
if dungeon_entrances is None:
dungeon_entrances = default_dungeon_entrances
if split_dungeon_entrances is None:
split_dungeon_entrances = split_region_starts
define_sector_features(all_sectors)
finished, dungeon_map, attempts = False, {}, 0
while not finished:
candidate_sectors = dict.fromkeys(all_sectors)
global_pole = GlobalPolarity(candidate_sectors)
dungeon_map = {}
for key in dungeon_regions.keys():
dungeon_map[key] = DungeonBuilder(key)
for key in dungeon_boss_sectors.keys():
current_dungeon = dungeon_map[key]
for r_name in dungeon_boss_sectors[key]:
assign_sector(find_sector(r_name, candidate_sectors), current_dungeon, candidate_sectors, global_pole)
if key == 'Hyrule Castle' and world.mode[player] == 'standard':
for r_name in ['Hyrule Dungeon Cellblock', 'Sanctuary']: assign_sector(find_sector(r_name, candidate_sectors), current_dungeon,
candidate_sectors, global_pole)
entrances_map, potentials, connections = connections_tuple
accessible_sectors, reverse_d_map = set(), {}
for key in dungeon_entrances.keys():
current_dungeon = dungeon_map[key]
current_dungeon.all_entrances = dungeon_entrances[key]
for r_name in current_dungeon.all_entrances:
sector = find_sector(r_name, candidate_sectors)
assign_sector(sector, current_dungeon, candidate_sectors, global_pole)
if r_name in entrances_map[key]:
if sector:
accessible_sectors.add(sector)
else:
if not sector:
sector = find_sector(r_name, all_sectors)
reverse_d_map[sector] = key
if world.mode[player] == 'standard':
current_dungeon = dungeon_map['Hyrule Castle']
standard_stair_check(dungeon_map, current_dungeon, candidate_sectors, global_pole)
complete_dungeons = {x: y for x, y in dungeon_map.items() if sum(len(sector.outstanding_doors) for sector in y.sectors) <= 0}
[dungeon_map.pop(key) for key in complete_dungeons.keys()]
identify_destination_sectors(accessible_sectors, reverse_d_map, dungeon_map, connections,
dungeon_entrances, split_dungeon_entrances)
for name, builder in dungeon_map.items():
calc_allowance_and_dead_ends(builder, connections_tuple, world, player)
if world.mode[player] == 'open' and world.shuffle[player] not in ['crossed', 'insanity']:
sanc = find_sector('Sanctuary', candidate_sectors)
if sanc: lw_builders = []
for name, portal_list in dungeon_portals.items():
for portal_name in portal_list:
if world.get_portal(portal_name, player).light_world:
lw_builders.append(dungeon_map[name])
break
sanc_builder = random.choice(lw_builders)
assign_sector(sanc, sanc_builder, candidate_sectors, global_pole)
free_location_sectors = {}
crystal_switches = {}
crystal_barriers = {}
polarized_sectors = {}
neutral_sectors = {}
for sector in candidate_sectors:
if sector.chest_locations > 0:
free_location_sectors[sector] = None
elif sector.c_switch:
crystal_switches[sector] = None
elif sector.blue_barrier:
crystal_barriers[sector] = None
elif sector.polarity().is_neutral():
neutral_sectors[sector] = None
else:
polarized_sectors[sector] = None
assign_location_sectors(dungeon_map, free_location_sectors, global_pole)
leftover = assign_crystal_switch_sectors(dungeon_map, crystal_switches, crystal_barriers, global_pole)
ensure_crystal_switches_reachable(dungeon_map, leftover, polarized_sectors, crystal_barriers, global_pole)
for sector in leftover:
if sector.polarity().is_neutral():
neutral_sectors[sector] = None
else:
polarized_sectors[sector] = None
assign_crystal_barrier_sectors(dungeon_map, crystal_barriers, global_pole)
try:
if not global_pole.is_valid(dungeon_map):
raise NeutralizingException('Either free location/crystal assignment is already globally invalid')
logger.info(world.fish.translate("cli", "cli", "balance.doors"))
builder_info = dungeon_entrances, split_dungeon_entrances, connections_tuple, world, player
assign_polarized_sectors(dungeon_map, polarized_sectors, global_pole, builder_info)
assign_the_rest(dungeon_map, neutral_sectors, global_pole, builder_info)
dungeon_map.update(complete_dungeons)
finished = True
except (NeutralizingException, GenerationException) as e:
attempts += 1
logger.debug(f'Attempt {attempts} failed with {str(e)}')
if attempts >= 10:
raise Exception('Could not find a valid seed quickly, something is likely horribly wrong.', e)
return dungeon_map
def standard_stair_check(dungeon_map, dungeon, candidate_sectors, global_pole):
filtered_sectors = [x for x in candidate_sectors if any(y for y in x.outstanding_doors if not y.dead and y.type == DoorType.SpiralStairs)]
valid = False
while not valid:
chosen_sector = random.choice(filtered_sectors)
filtered_sectors.remove(chosen_sector)
valid = global_pole.is_valid_choice(dungeon_map, dungeon, [chosen_sector])
if valid:
assign_sector(chosen_sector, dungeon, candidate_sectors, global_pole)
def identify_destination_sectors(accessible_sectors, reverse_d_map, dungeon_map, connections, dungeon_entrances, split_dungeon_entrances):
accessible_overworld, found_connections, explored = set(), set(), False
while not explored:
explored = True
for ent_name, region in connections.items():
if ent_name in found_connections:
continue
sector = find_sector(ent_name, reverse_d_map.keys())
if sector in accessible_sectors:
found_connections.add(ent_name)
accessible_overworld.add(region) explored = False
elif region in accessible_overworld:
found_connections.add(ent_name)
accessible_sectors.add(sector)
explored = False
else:
d_name = reverse_d_map[sector]
if d_name not in split_dungeon_entrances:
for r_name in dungeon_entrances[d_name]:
ent_sector = find_sector(r_name, dungeon_map[d_name].sectors)
if ent_sector in accessible_sectors and ent_name not in dead_entrances:
sector.destination_entrance = True
found_connections.add(ent_name)
accessible_sectors.add(sector)
accessible_overworld.add(region)
explored = False
break
elif d_name in split_dungeon_entrances.keys():
split_section = None
for split_name, split_list in split_dungeon_entrances[d_name].items():
if ent_name in split_list:
split_section = split_name
break
if split_section:
for r_name in split_dungeon_entrances[d_name][split_section]:
ent_sector = find_sector(r_name, dungeon_map[d_name].sectors)
if ent_sector in accessible_sectors and ent_name not in dead_entrances:
sector.destination_entrance = True
found_connections.add(ent_name)
accessible_sectors.add(sector)
accessible_overworld.add(region)
explored = False
break
# todo: split version that adds allowance for potential entrances
def calc_allowance_and_dead_ends(builder, connections_tuple, world, player):
portals = world.dungeon_portals[player]
entrances_map, potentials, connections = connections_tuple
name = builder.name if not builder.split_flag else builder.name.rsplit(' ', 1)[0]
needed_connections = [x for x in builder.all_entrances if x not in entrances_map[name]]
starting_allowance = 0
used_sectors = set()
destination_entrances = [x.door.entrance.parent_region.name for x in portals if x.destination]
dead_ends = [x.door.entrance.parent_region.name for x in portals if x.deadEnd]
for entrance in entrances_map[name]:
sector = find_sector(entrance, builder.sectors)
if sector:
outflow_target = 0 if entrance not in drop_entrances_allowance else 1
if sector not in used_sectors and (sector.adj_outflow() > outflow_target or entrance in dead_ends):
if entrance not in destination_entrances:
starting_allowance += 1
else:
builder.branches -= 1
used_sectors.add(sector)
elif sector not in used_sectors:
if entrance in destination_entrances and sector.branches() > 0:
builder.branches -= 1
if entrance not in drop_entrances_allowance:
needed_connections.append(entrance)
builder.allowance = starting_allowance
for entrance in needed_connections:
sector = find_sector(entrance, builder.sectors)
if sector and sector not in used_sectors: # ignore things on same sector
is_destination = entrance in destination_entrances
connect_able = False
if entrance in connections.keys():
enabling_region = connections[entrance]
check_list = list(potentials[enabling_region])
if enabling_region.name in ['Desert Ledge', 'Desert Palace Entrance (North) Spot']:
alternate = 'Desert Palace Entrance (North) Spot' if enabling_region.name == 'Desert Ledge' else 'Desert Ledge'
if world.get_region(alternate, player) in potentials:
check_list.extend(potentials[world.get_region(alternate, player)])
connecting_entrances = [x for x in check_list if x != entrance and x not in dead_entrances and x not in drop_entrances_allowance]
connect_able = len(connecting_entrances) > 0
if is_destination and sector.branches() == 0: #
builder.dead_ends += 1
if is_destination and sector.branches() > 0:
builder.branches -= 1
if connect_able and not is_destination:
builder.allowance += 1
used_sectors.add(sector)
def define_sector_features(sectors):
for sector in sectors:
for region in sector.regions:
for loc in region.locations:
if '- Prize' in loc.name or loc.name in ['Agahnim 1', 'Agahnim 2']:
pass
elif loc.forced_item and 'Small Key' in loc.item.name:
sector.key_only_locations += 1
elif loc.forced_item and loc.forced_item.bigkey:
sector.bk_provided = True
elif loc.name not in dungeon_events and not loc.forced_item:
sector.chest_locations += 1
if '- Big Chest' in loc.name or loc.name in ["Hyrule Castle - Zelda's Chest",
"Thieves' Town - Blind's Cell"]:
sector.bk_required = True
for ext in region.exits:
door = ext.door
if door is not None:
if door.crystal == CrystalBarrier.Either:
sector.c_switch = True
elif door.crystal == CrystalBarrier.Orange:
sector.orange_barrier = True
elif door.crystal == CrystalBarrier.Blue:
sector.blue_barrier = True
if door.bigKey:
sector.bk_required = True
def assign_sector(sector, dungeon, candidate_sectors, global_pole):
if sector:
del candidate_sectors[sector]
global_pole.consume(sector)
assign_sector_helper(sector, dungeon)
def assign_sector_helper(sector, builder):
builder.sectors.append(sector)
builder.location_cnt += sector.chest_locations
builder.key_drop_cnt += sector.key_only_locations
if sector.c_switch:
builder.c_switch_present = True
if sector.blue_barrier:
builder.c_switch_required = True
if sector.bk_required:
builder.bk_required = True
if sector.bk_provided:
builder.bk_provided = True
count_conn_needed_supplied(sector, builder.conn_needed, builder.conn_supplied)
builder.dead_ends += sector.dead_ends()
builder.branches += sector.branches()
if sector in builder.exception_list:
builder.exception_list.remove(sector)
else:
if builder.split_dungeon_map:
builder.split_dungeon_map = None
if builder.valid_proposal:
builder.valid_proposal = None
def count_conn_needed_supplied(sector, conn_needed, conn_supplied):
for door in sector.outstanding_doors:
if (door.blocked or door.dead or sector.adj_outflow() <= 1) and not sector.is_entrance_sector():
conn_needed[hook_from_door(door)] += 1
else: conn_supplied[hanger_from_door(door)] += 1
def find_sector(r_name, sectors):
for s in sectors:
if r_name in s.region_set():
return s
return None
def assign_location_sectors(dungeon_map, free_location_sectors, global_pole):
valid = False
choices = None
sector_list = list(free_location_sectors)
random.shuffle(sector_list)
while not valid:
choices, d_idx, totals = weighted_random_locations(dungeon_map, sector_list)
for i, sector in enumerate(sector_list):
choice = d_idx[choices[i].name]
totals[choice] += sector.chest_locations
valid = True
for d_name, idx in d_idx.items():
if totals[idx] < 5: valid = False
break
for i, choice in enumerate(choices):
builder = dungeon_map[choice.name]
assign_sector(sector_list[i], builder, free_location_sectors, global_pole)
def weighted_random_locations(dungeon_map, free_location_sectors):
population = []
ttl_assigned = 0
weights = []
totals = []
d_idx = {}
for i, dungeon_builder in enumerate(dungeon_map.values()):
population.append(dungeon_builder)
totals.append(dungeon_builder.location_cnt)
ttl_assigned += dungeon_builder.location_cnt
weights.append(6.375)
d_idx[dungeon_builder.name] = i
average = ttl_assigned / 13
for i, db in enumerate(population):
if db.location_cnt < average:
weights[i] += average - db.location_cnt
if db.location_cnt > average:
weights[i] = max(0, weights[i] - db.location_cnt + average)
choices = random.choices(population, weights, k=len(free_location_sectors))
return choices, d_idx, totals
def assign_crystal_switch_sectors(dungeon_map, crystal_switches, crystal_barriers, global_pole, assign_one=False):
population = []
some_c_switches_present = False
for name, builder in dungeon_map.items():
if builder.c_switch_required and not builder.c_switch_present and not builder.c_locked:
population.append(name)
if builder.c_switch_present and not builder.c_locked:
some_c_switches_present = True
if len(population) == 0: if assign_one and not some_c_switches_present: if len(crystal_switches) == 0:
raise GenerationException('No crystal switches to assign. Ref %s' % next(iter(dungeon_map.keys())))
valid, builder_choice, switch_choice = False, None, None
switch_candidates = list(crystal_switches)
switch_choice = random.choice(switch_candidates)
switch_candidates.remove(switch_choice)
builder_candidates = [name for name, builder in dungeon_map.items() if not builder.c_locked]
while not valid:
if len(builder_candidates) == 0:
if len(switch_candidates) == 0:
raise GenerationException('No where to assign crystal switch. Ref %s' % next(iter(dungeon_map.keys())))
switch_choice = random.choice(switch_candidates)
switch_candidates.remove(switch_choice)
builder_candidates = list(dungeon_map.keys())
choice = random.choice(builder_candidates)
builder_candidates.remove(choice)
builder_choice = dungeon_map[choice]
test_set = [switch_choice]
test_set.extend(crystal_barriers)
valid = global_pole.is_valid_choice(dungeon_map, builder_choice, test_set)
assign_sector(switch_choice, builder_choice, crystal_switches, global_pole)
return crystal_switches
if len(crystal_switches) == 0:
raise GenerationException('No crystal switches to assign')
sector_list = list(crystal_switches)
if len(population) > len(sector_list):
raise GenerationException('Not enough crystal switch sectors for those needed')
choices = random.sample(sector_list, k=len(population))
for i, choice in enumerate(choices):
builder = dungeon_map[population[i]]
assign_sector(choice, builder, crystal_switches, global_pole)
return crystal_switches
def ensure_crystal_switches_reachable(dungeon_map, crystal_switches, polarized_sectors, crystal_barriers, global_pole):
invalid_builders = []
for name, builder in dungeon_map.items():
if builder.c_switch_present and builder.c_switch_required and not builder.c_locked:
invalid_builders.append(builder)
while len(invalid_builders) > 0:
valid_builders = []
for builder in invalid_builders:
entrance_sectors = []
reachable_crystals = defaultdict()
for sector in builder.sectors:
if sector.equations is None:
sector.equations = calc_sector_equations(sector)
if sector.is_entrance_sector() and not sector.destination_entrance:
need_switch = True
for region in sector.get_start_regions():
if region.crystal_switch:
need_switch = False
break
any_benefit = False
for eq in sector.equations:
if len(eq.benefit) > 0:
any_benefit = True
break
if need_switch and any_benefit:
entrance_sectors.append(sector)
for eq in sector.equations:
if eq.c_switch:
reachable_crystals[hook_from_door(eq.door)] = True
valid_ent_sectors = []
for entrance_sector in entrance_sectors:
other_sectors = [x for x in builder.sectors if x != entrance_sector]
reachable, access = is_c_switch_reachable(entrance_sector, reachable_crystals, other_sectors)
if reachable:
valid_ent_sectors.append(entrance_sector)
else:
candidates = {}
for c in find_pol_cand_for_c_switch(access, reachable_crystals, polarized_sectors):
candidates[c] = 'Polarized'
for c in find_crystal_cand(access, crystal_switches):
candidates[c] = 'Crystals'
for c in find_pol_cand_for_c_switch(access, reachable_crystals, crystal_barriers):
candidates[c] = 'Barriers'
valid, sector, which_list = False, None, None
while not valid:
if len(candidates) <= 0:
raise GenerationException(f'need to provide more sophisticated crystal connection for {entrance_sector}')
sector, which_list = random.choice(list(candidates.items()))
del candidates[sector]
valid = global_pole.is_valid_choice(dungeon_map, builder, [sector])
if which_list == 'Polarized':
assign_sector(sector, builder, polarized_sectors, global_pole)
elif which_list == 'Crystals':
assign_sector(sector, builder, crystal_switches, global_pole)
elif which_list == 'Barriers':
assign_sector(sector, builder, crystal_barriers, global_pole)
entrance_sectors = [x for x in entrance_sectors if x not in valid_ent_sectors]
if len(entrance_sectors) == 0:
valid_builders.append(builder)
invalid_builders = [x for x in invalid_builders if x not in valid_builders]
def is_c_switch_reachable(entrance_sector, reachable_crystals, other_sectors):
current_access = {}
for eq in entrance_sector.equations:
if eq.total_cost() <= 0:
for key, door_list in eq.benefit.items():
for door in door_list:
if door not in eq.crystal_blocked.keys() or eq.crystal_blocked[door] != CrystalBarrier.Blue:
current_access[key] = True
break
for key, flag in current_access.items():
if opposite_h_type(key) in reachable_crystals.keys():
return True, {}
changed = True
while changed:
changed = False
for sector in other_sectors:
for eq in sector.equations:
key, cost_door = eq.cost
if key in current_access.keys() and current_access[key]:
for bene_key, door_list in eq.benefit.items():
for door in door_list:
block_dict = eq.crystal_blocked
if door not in block_dict.keys() or block_dict[door] != CrystalBarrier.Blue:
if bene_key not in current_access.keys():
current_access[bene_key] = True
changed = True
break
for key, flag in current_access.items():
if opposite_h_type(key) in reachable_crystals.keys():
return True, {}
return False, current_access
def find_pol_cand_for_c_switch(access, reachable_crystals, polarized_candidates):
candidates = []
for sector in polarized_candidates:
if pol_cand_matches_access_reach(sector, access, reachable_crystals):
candidates.append(sector)
return candidates
def pol_cand_matches_access_reach(sector, access, reachable_crystals):
if sector.equations is None:
sector.equations = calc_sector_equations(sector)
for eq in sector.equations:
key, cost_door = eq.cost
if key in access.keys() and access[key]:
for bene_key, door_list in eq.benefit.items():
for door in door_list:
if door not in eq.crystal_blocked.keys() or eq.crystal_blocked[door] != CrystalBarrier.Blue:
if opposite_h_type(bene_key) in reachable_crystals.keys():
return True
return False
def find_crystal_cand(access, crystal_switches):
candidates = []
for sector in crystal_switches:
if crystal_cand_matches_access(sector, access):
candidates.append(sector)
return candidates
def crystal_cand_matches_access(sector, access):
if sector.equations is None:
sector.equations = calc_sector_equations(sector)
for eq in sector.equations:
key, cost_door = eq.cost
if key in access.keys() and access[key] and eq.c_switch and len(sector.outstanding_doors) > 1:
return True
return False
def assign_crystal_barrier_sectors(dungeon_map, crystal_barriers, global_pole):
population = []
for name, builder in dungeon_map.items():
if builder.c_switch_present and not builder.c_locked:
population.append(name)
sector_list = list(crystal_barriers)
random.shuffle(sector_list)
choices = random.choices(population, k=len(sector_list))
for i, choice in enumerate(choices):
builder = dungeon_map[choice]
assign_sector(sector_list[i], builder, crystal_barriers, global_pole)
def identify_polarity_issues(dungeon_map):
unconnected_builders = {}
for name, builder in dungeon_map.items():
identify_polarity_issues_internal(name, builder, unconnected_builders)
return unconnected_builders
def identify_polarity_issues_internal(name, builder, unconnected_builders):
if len(builder.sectors) == 1:
return
else:
def sector_filter(x, y):
return x != y
connection_flags = {}
for slot in PolSlot:
connection_flags[slot] = {}
for slot2 in PolSlot:
connection_flags[slot][slot2] = False
for sector in builder.sectors:
others = [x for x in builder.sectors if sector_filter(x, sector)]
other_mag = sum_magnitude(others)
sector_mag = sector.magnitude()
check_flags(sector_mag, connection_flags)
unconnected_sector = True
for i in PolSlot:
if sector_mag[i.value] == 0 or other_mag[i.value] > 0 or self_connecting(sector, i, sector_mag):
unconnected_sector = False
break
if unconnected_sector:
for i in PolSlot:
if sector_mag[i.value] > 0 and other_mag[i.value] == 0 and not self_connecting(sector, i, sector_mag):
builder.mag_needed[i] = [x for x in PolSlot if other_mag[x.value] > 0]
if name not in unconnected_builders.keys():
unconnected_builders[name] = builder
ttl_mag = sum_magnitude(builder.sectors)
for slot in PolSlot:
for slot2 in PolSlot:
if ttl_mag[slot.value] > 0 and ttl_mag[slot2.value] > 0 and not connection_flags[slot][slot2]:
builder.mag_needed[slot] = [slot2]
builder.mag_needed[slot2] = [slot]
if name not in unconnected_builders.keys():
unconnected_builders[name] = builder
def self_connecting(sector, slot, magnitude):
return sector.polarity()[slot.value] == 0 and sum(magnitude) > magnitude[slot.value]
def check_flags(sector_mag, connection_flags):
for slot in PolSlot:
for slot2 in PolSlot:
if sector_mag[slot.value] > 0 and sector_mag[slot2.value] > 0:
connection_flags[slot][slot2] = True
if slot != slot2:
for check_slot in PolSlot: if check_slot not in [slot, slot2] and connection_flags[slot2][check_slot]:
connection_flags[slot][check_slot] = True
connection_flags[check_slot][slot] = True
def identify_simple_branching_issues(dungeon_map):
problem_builders = {}
for name, builder in dungeon_map.items():
if name == 'Skull Woods 2': builder.conn_supplied[Hook.West] += 1
builder.conn_needed[Hook.East] -= 1
builder.forced_loops = calc_forced_loops(builder.sectors)
if builder.dead_ends + builder.forced_loops * 2 > builder.branches + builder.allowance:
problem_builders[name] = builder
for h_type in Hook:
lack = builder.conn_balance[h_type] = builder.conn_supplied[h_type] - builder.conn_needed[h_type]
if lack < 0:
builder.total_conn_lack += -lack
problem_builders[name] = builder
return problem_builders
def calc_forced_loops(sector_list):
forced_loops = 0
for sector in sector_list:
h_mag = sector.hook_magnitude()
other_sectors = [x for x in sector_list if x != sector]
other_mag = sum_hook_magnitude(other_sectors)
loop_parts = 0
for hook in Hook:
opp = opposite_h_type(hook).value
if h_mag[hook.value] > other_mag[opp] and loop_present(hook, opp, h_mag, other_mag):
loop_parts += (h_mag[hook.value] - other_mag[opp]) / 2
forced_loops += math.floor(loop_parts)
return forced_loops
def loop_present(hook, opp, h_mag, other_mag):
if hook == Hook.Stairs:
return h_mag[hook.value] - other_mag[opp] >= 2
else:
return h_mag[opp] >= h_mag[hook.value] - other_mag[opp]
def is_satisfied(door_dict_list):
for door_dict in door_dict_list:
for door_list in door_dict.values():
if len(door_list) > 0:
return False
return True
def filter_match_deps(candidate, match_deps):
return [x for x in match_deps if x != candidate]
def sum_magnitude(sector_list):
result = [0] * len(PolSlot)
for sector in sector_list:
vector = sector.magnitude()
for i in range(len(result)):
result[i] = result[i] + vector[i]
return result
def sum_hook_magnitude(sector_list):
result = [0] * len(Hook)
for sector in sector_list:
vector = sector.hook_magnitude()
for i in range(len(result)):
result[i] = result[i] + vector[i]
return result
def sum_polarity(sector_list):
pol = Polarity()
for sector in sector_list:
pol += sector.polarity()
return pol
def assign_polarized_sectors(dungeon_map, polarized_sectors, global_pole, builder_info):
unconnected_builders = identify_polarity_issues(dungeon_map)
while len(unconnected_builders) > 0:
for name, builder in unconnected_builders.items():
candidates = find_connection_candidates(builder.mag_needed, polarized_sectors)
valid, sector = False, None
while not valid:
if len(candidates) == 0:
raise GenerationException('Cross Dungeon Builder: Cannot find a candidate for connectedness. %s' % name)
sector = random.choice(candidates)
candidates.remove(sector)
valid = global_pole.is_valid_choice(dungeon_map, builder, [sector])
assign_sector(sector, builder, polarized_sectors, global_pole)
builder.mag_needed = {}
unconnected_builders = identify_polarity_issues(unconnected_builders)
problem_builders = identify_simple_branching_issues(dungeon_map)
while len(problem_builders) > 0:
for name, builder in problem_builders.items():
candidates, charges = find_simple_branching_candidates(builder, polarized_sectors)
best = min(charges)
best_candidates = [x for i, x in enumerate(candidates) if charges[i] <= best]
valid, choice = False, None
while not valid:
if len(best_candidates) == 0:
if len(candidates) == 0:
raise GenerationException('Cross Dungeon Builder: Simple branch problems: %s' % name)
best = min(charges)
best_candidates = [x for i, x in enumerate(candidates) if charges[i] <= best]
choice = random.choice(best_candidates)
best_candidates.remove(choice)
i = candidates.index(choice)
candidates.pop(i)
charges.pop(i)
valid = global_pole.is_valid_choice(dungeon_map, builder, [choice]) and valid_connected_assignment(builder, [choice])
assign_sector(choice, builder, polarized_sectors, global_pole)
builder.total_conn_lack = 0
builder.conn_balance.clear()
problem_builders = identify_simple_branching_issues(problem_builders)
polarity_step_3(dungeon_map, polarized_sectors, global_pole)
neutral_choices: List[List] = neutralize_the_rest(polarized_sectors)
problem_builders = identify_branching_issues(dungeon_map, builder_info)
while len(problem_builders) > 0:
for name, builder in problem_builders.items():
candidates = find_branching_candidates(builder, neutral_choices, builder_info)
valid, choice = False, None
while not valid:
if len(candidates) <= 0:
raise GenerationException('Cross Dungeon Builder: Complex branch problems: %s' % name)
choice = random.choice(candidates)
candidates.remove(choice)
valid = global_pole.is_valid_choice(dungeon_map, builder, choice) and valid_polarized_assignment(builder, choice)
neutral_choices.remove(choice)
for sector in choice:
assign_sector(sector, builder, polarized_sectors, global_pole)
builder.unfulfilled.clear()
problem_builders = identify_branching_issues(problem_builders, builder_info)
comb_w_replace = len(dungeon_map) ** len(neutral_choices)
combinations = None
if comb_w_replace <= 1000:
combinations = list(itertools.product(dungeon_map.keys(), repeat=len(neutral_choices)))
random.shuffle(combinations)
tries = 0
while len(polarized_sectors) > 0:
if tries > 1000 or (combinations and tries >= len(combinations)):
raise GenerationException('No valid assignment found. Ref: %s' % next(iter(dungeon_map.keys())))
if combinations:
choices = combinations[tries]
else:
choices = random.choices(list(dungeon_map.keys()), k=len(neutral_choices))
chosen_sectors = defaultdict(list)
for i, choice in enumerate(choices):
chosen_sectors[choice].extend(neutral_choices[i])
all_valid = True
for name, sector_list in chosen_sectors.items():
if not valid_assignment(dungeon_map[name], sector_list, builder_info):
all_valid = False
break
if all_valid:
for i, choice in enumerate(choices):
builder = dungeon_map[choice]
for sector in neutral_choices[i]:
assign_sector(sector, builder, polarized_sectors, global_pole)
tries += 1
def polarity_step_3(dungeon_map, polarized_sectors, global_pole):
odd_builders = [x for x in dungeon_map.values() if sum_polarity(x.sectors).charge() % 2 != 0]
grouped_choices: List[List] = find_forced_groupings(polarized_sectors, dungeon_map)
random.shuffle(odd_builders)
odd_candidates = find_odd_sectors(grouped_choices)
tries = 0
while len(odd_builders) > 0:
if tries > 1000:
raise GenerationException('Unable to fix dungeon parity. Ref: %s' % next(iter(odd_builders)).name)
best_choices = None
best_charge = sum([x.polarity().charge() for x in dungeon_map.values()])
samples = 0
combos = ncr(len(odd_candidates), len(odd_builders))
sample_target = 100 if combos > 10 else combos * 2
while best_choices is None or samples < sample_target:
samples += 1
if len(odd_candidates) < len(odd_builders):
raise GenerationException(f'Unable to fix dungeon parity - not enough candidates.'
f' Ref: {next(iter(odd_builders)).name}')
choices = random.sample(odd_candidates, k=len(odd_builders))
valid = global_pole.is_valid_multi_choice(dungeon_map, odd_builders, choices)
charge = calc_total_charge(dungeon_map, odd_builders, choices)
if valid and charge < best_charge:
best_choices = choices
best_charge = charge
if samples > sample_target and best_choices is None:
best_choices = choices
best_charge = charge
samples = 0
all_valid = True
for i, candidate_list in enumerate(best_choices):
test_set = find_forced_connections(dungeon_map, candidate_list, polarized_sectors)
builder = odd_builders[i]
if ensure_test_set_connectedness(test_set, builder, polarized_sectors, dungeon_map, global_pole):
all_valid &= valid_branch_only(builder, candidate_list)
else:
all_valid = False
break
if not all_valid:
break
if all_valid:
for i, candidate_list in enumerate(best_choices):
builder = odd_builders[i]
for sector in candidate_list:
assign_sector(sector, builder, polarized_sectors, global_pole)
odd_builders = [x for x in dungeon_map.values() if sum_polarity(x.sectors).charge() % 2 != 0]
else:
tries += 1
parallel_full_neutralization(dungeon_map, polarized_sectors, global_pole)
def parallel_full_neutralization(dungeon_map, polarized_sectors, global_pole):
start = time.process_time()
builders = list(dungeon_map.values())
finished = all([x.polarity().is_neutral() for x in builders])
solution_list, current_depth = defaultdict(list), 1
complete_builders = [x for x in builders if x.polarity().is_neutral()]
avail_sectors = list(polarized_sectors)
while not finished:
builders_to_check = [x for x in builders if not (x.polarity()+sum_polarity(solution_list[x])).is_neutral()]
candidates, last_depth = find_exact_neutralizing_candidates_parallel_db(builders_to_check, solution_list,
avail_sectors, current_depth)
increment_depth = True
any_valid = False
for builder, candidate_list in candidates.items():
valid, sectors = False, None
while not valid:
if len(candidate_list) == 0:
increment_depth = False break
sectors = random.choice(candidate_list)
candidate_list.remove(sectors)
proposal = solution_list.copy()
proposal[builder] = list(proposal[builder])
proposal[builder].extend(sectors)
valid = global_pole.is_valid_multi_choice_2(dungeon_map, builders, proposal)
if valid:
any_valid = True
solution_list[builder].extend(sectors)
for sector in sectors:
avail_sectors.remove(sector)
complete_builders.append(builder)
for other_builder, other_cand_list in candidates.items():
if other_builder not in complete_builders:
candidates_to_remove = list()
for candidate in other_cand_list:
for sector in sectors:
if sector in candidate:
candidates_to_remove.append(candidate)
break
other_cand_list[:] = [x for x in other_cand_list if x not in candidates_to_remove]
if not any_valid:
increment_depth = True
current_depth = last_depth + 1 if increment_depth else last_depth
finished = all([(x.polarity()+sum_polarity(solution_list[x])).is_neutral() for x in builders])
logging.getLogger('').info(f'-Balanced solution found in {time.process_time()-start}')
for builder, sectors in solution_list.items():
for sector in sectors:
assign_sector(sector, builder, polarized_sectors, global_pole)
def find_forced_connections(dungeon_map, candidate_list, polarized_sectors):
test_set = list(candidate_list)
other_sectors = [x for x in polarized_sectors if x not in candidate_list]
dungeon_hooks = defaultdict(int)
for name, builder in dungeon_map.items():
d_mag = sum_hook_magnitude(builder.sectors)
for val in Hook:
dungeon_hooks[val] += d_mag[val.value]
queue = deque(candidate_list)
while queue:
candidate = queue.popleft()
c_mag = candidate.hook_magnitude()
other_candidates = [x for x in candidate_list if x != candidate]
for val in Hook:
if c_mag[val.value] > 0:
opp = opposite_h_type(val)
o_val = opp.value
if sum_hook_magnitude(other_candidates)[o_val] == 0 and dungeon_hooks[opp] == 0 and not valid_self(c_mag, val, opp):
forced_sector = []
for sec in other_sectors:
if sec.hook_magnitude()[o_val] > 0:
forced_sector.append(sec)
if len(forced_sector) > 1:
break
if len(forced_sector) == 1:
test_set.append(forced_sector[0])
return test_set
def valid_self(c_mag, val, opp):
if val == Hook.Stairs:
return c_mag[val.value] > 2
else:
return c_mag[opp.value] > 0 and sum(c_mag) > 2
def ensure_test_set_connectedness(test_set, builder, polarized_sectors, dungeon_map, global_pole):
test_copy = list(test_set)
while not valid_connected_assignment(builder, test_copy):
dummy_builder = DungeonBuilder("Dummy Builder for " + builder.name)
dummy_builder.sectors = builder.sectors + test_copy
possibles = [x for x in polarized_sectors if x not in test_copy]
candidates = find_connected_candidates(possibles)
valid, sector = False, None
while not valid:
if len(candidates) == 0:
return False
sector = random.choice(candidates)
candidates.remove(sector)
t2 = test_copy+[sector]
valid = global_pole.is_valid_choice(dungeon_map, builder, t2) and valid_branch_only(builder, t2)
test_copy.append(sector)
dummy_builder.sectors = builder.sectors + test_copy
test_set[:] = test_copy
return True
def calc_total_charge(dungeon_map, builders, sector_lists):
polarity_list = [x.polarity() for x in dungeon_map.values() if x not in builders]
for i, sectors in enumerate(sector_lists):
builder = builders[i]
polarity = builder.polarity() + sum_polarity(sectors)
polarity_list.append(polarity)
return sum([x.charge() for x in polarity_list])
class GlobalPolarity:
def __init__(self, candidate_sectors):
self.positives = [0, 0, 0]
self.negatives = [0, 0, 0]
self.evens = 0
self.odds = 0
for sector in candidate_sectors:
pol = sector.polarity()
if pol.charge() % 2 == 0:
self.evens += 1
else:
self.odds += 1
for slot in PolSlot:
if pol.vector[slot.value] < 0:
self.negatives[slot.value] += -pol.vector[slot.value]
elif pol.vector[slot.value] > 0:
self.positives[slot.value] += pol.vector[slot.value]
def copy(self):
gp = GlobalPolarity([])
gp.positives = self.positives.copy()
gp.negatives = self.negatives.copy()
gp.evens = self.evens
gp.odds = self.odds
return gp
def is_valid(self, dungeon_map):
polarities = [x.polarity() for x in dungeon_map.values()]
return self._check_parity(polarities) and self._is_valid_polarities(polarities)
def _check_parity(self, polarities):
local_evens = 0
local_odds = 0
for pol in polarities:
if pol.charge() % 2 == 0:
local_evens += 1
else:
local_odds += 1
if local_odds > self.odds:
return False
return True
def _is_valid_polarities(self, polarities):
positives = self.positives.copy()
negatives = self.negatives.copy()
for polarity in polarities:
for slot in PolSlot:
if polarity[slot.value] > 0 and slot != PolSlot.Stairs:
if negatives[slot.value] >= polarity[slot.value]:
negatives[slot.value] -= polarity[slot.value]
else:
return False
elif polarity[slot.value] < 0 and slot != PolSlot.Stairs:
if positives[slot.value] >= -polarity[slot.value]:
positives[slot.value] += polarity[slot.value]
else:
return False
elif slot == PolSlot.Stairs:
if positives[slot.value] >= polarity[slot.value]:
positives[slot.value] -= polarity[slot.value]
else:
return False
return True
def consume(self, sector):
polarity = sector.polarity()
if polarity.charge() % 2 == 0:
self.evens -= 1
else:
self.odds -= 1
for slot in PolSlot:
if polarity[slot.value] > 0 and slot != PolSlot.Stairs:
if self.positives[slot.value] >= polarity[slot.value]:
self.positives[slot.value] -= polarity[slot.value]
else:
raise GenerationException('Invalid assignment of %s' % sector.name)
elif polarity[slot.value] < 0 and slot != PolSlot.Stairs:
if self.negatives[slot.value] >= -polarity[slot.value]:
self.negatives[slot.value] += polarity[slot.value]
else:
raise GenerationException('Invalid assignment of %s' % sector.name)
elif slot == PolSlot.Stairs:
if self.positives[slot.value] >= polarity[slot.value]:
self.positives[slot.value] -= polarity[slot.value]
else:
raise GenerationException('Invalid assignment of %s' % sector.name)
def is_valid_choice(self, dungeon_map, builder, sectors):
proposal = self.copy()
non_neutral_polarities = [x.polarity() for x in dungeon_map.values() if not x.polarity().is_neutral() and x != builder]
current_polarity = builder.polarity() + sum_polarity(sectors)
non_neutral_polarities.append(current_polarity)
for sector in sectors:
proposal.consume(sector)
return proposal._check_parity(non_neutral_polarities) and proposal._is_valid_polarities(non_neutral_polarities)
def is_valid_multi_choice(self, dungeon_map, builders, sector_lists):
proposal = self.copy()
non_neutral_polarities = [x.polarity() for x in dungeon_map.values() if not x.polarity().is_neutral()
and x not in builders]
for i, sectors in enumerate(sector_lists):
builder = builders[i]
current_polarity = builder.polarity() + sum_polarity(sectors)
non_neutral_polarities.append(current_polarity)
for sector in sectors:
proposal.consume(sector)
return proposal._check_parity(non_neutral_polarities) and proposal._is_valid_polarities(non_neutral_polarities)
def is_valid_multi_choice_2(self, dungeon_map, builders, sector_dict):
proposal = self.copy()
non_neutral_polarities = [x.polarity() for x in dungeon_map.values() if not x.polarity().is_neutral()
and x not in builders]
for builder, sectors in sector_dict.items():
current_polarity = builder.polarity() + sum_polarity(sectors)
non_neutral_polarities.append(current_polarity)
for sector in sectors:
proposal.consume(sector)
return proposal._check_parity(non_neutral_polarities) and proposal._is_valid_polarities(non_neutral_polarities)
def find_connection_candidates(mag_needed, sector_pool):
candidates = []
for sector in sector_pool:
if sector.branching_factor() < 2:
continue
mag = sector.magnitude()
matches = False
for slot, match_slot in mag_needed.items():
if mag[slot.value] > 0:
for i in PolSlot:
if i in match_slot and mag[i.value] > 0:
matches = True
break
if matches:
candidates.append(sector)
return candidates
def find_simple_branching_candidates(builder, sector_pool):
candidates = defaultdict(list)
charges = defaultdict(list)
outflow_needed = builder.dead_ends + builder.forced_loops * 2 > builder.branches + builder.allowance
total_needed = builder.dead_ends + builder.forced_loops * 2 - builder.branches + builder.allowance
original_lack = builder.total_conn_lack
best_lack = original_lack
for sector in sector_pool:
if outflow_needed and sector.branching_factor() <= 2:
continue
calc_sector_balance(sector)
ttl_lack = 0
for hook in Hook:
lack = builder.conn_balance[hook] + sector.conn_balance[hook]
if lack < 0:
ttl_lack += -lack
forced_loops = calc_forced_loops(builder.sectors + [sector])
net_outflow = builder.dead_ends + forced_loops * 2 + sector.dead_ends() - builder.branches - builder.allowance - sector.branches()
valid_branches = net_outflow < total_needed
if valid_branches and (ttl_lack < original_lack or original_lack >= 0):
candidates[ttl_lack].append(sector)
charges[ttl_lack].append((builder.polarity()+sector.polarity()).charge())
if ttl_lack < best_lack:
best_lack = ttl_lack
if best_lack == original_lack and not outflow_needed:
raise GenerationException('These candidates may not help at all')
if len(candidates[best_lack]) <= 0:
raise GenerationException('Nothing can fix the simple branching issue. Panic ensues.')
return candidates[best_lack], charges[best_lack]
def calc_sector_balance(sector): if sector.conn_balance is None:
sector.conn_balance = defaultdict(int)
for door in sector.outstanding_doors:
if door.blocked or door.dead or sector.branching_factor() <= 1:
sector.conn_balance[hook_from_door(door)] -= 1
else:
sector.conn_balance[hanger_from_door(door)] += 1
def find_odd_sectors(grouped_candidates):
return [x for x in grouped_candidates if sum_polarity(x).charge() % 2 != 0]
def find_exact_neutralizing_candidates_parallel_db(builders, proposal, avail_sectors, current_depth):
candidate_map = defaultdict(list)
polarity_map = {}
for builder in builders:
polarity_map[builder] = builder.polarity() + sum_polarity(proposal[builder])
finished = False
db, index = create_db_for_depth(current_depth, avail_sectors)
while not finished:
depth_map = db[current_depth]
for builder in builders:
target = polarity_map[builder].complement()
if target in depth_map.keys():
finished = True
candidate_map[builder].extend(depth_map[target].keys())
if finished:
for builder in list(candidate_map.keys()):
try:
candidate_map[builder] = weed_candidates(builder, {0: candidate_map[builder]}, 0)
except NeutralizingException:
del candidate_map[builder]
if len(candidate_map) == 0:
finished = False
if not finished:
current_depth += 1
add_depth_to_db(db, index, current_depth, avail_sectors)
return candidate_map, current_depth
def create_db_for_depth(depth, avail_sectors):
db = {0: {Polarity(): {OrderedFrozenSet(): None}}}
db_index = {Polarity()}
for i in range(1, depth+1):
add_depth_to_db(db, db_index, i, avail_sectors)
return db, db_index
def add_depth_to_db(db, db_index, i, avail_sectors):
previous = db[i-1]
depth_map = defaultdict(dict)
index_additions = set()
for sector in avail_sectors:
sector_set = {sector}
sector_pol = sector.polarity()
for polarity, choices in previous.items():
combo_pol = sector_pol + polarity
if combo_pol not in db_index:
index_additions.add(combo_pol)
for choice in choices:
if sector in choice.frozen_set:
continue
new_set = choice.new_with_element(sector_set)
depth_map[combo_pol][new_set] = None
for addition in index_additions:
if len(depth_map[addition]) > 0:
db_index.add(addition)
else:
del depth_map[addition]
if len(depth_map) == 0:
raise NeutralizingException('There is not a solution for this particular combination. Crystal switch issue?') db[i] = depth_map
class OrderedFrozenSet:
def __init__(self):
self.frozen_set = frozenset()
self.order = []
def __eq__(self, other):
return self.frozen_set == other.frozen_set
def __hash__(self):
return hash(self.frozen_set)
def __iter__(self):
return self.order.__iter__()
def __len__(self):
return len(self.order)
def new_with_element(self, elements):
ret = OrderedFrozenSet()
ret.frozen_set = frozenset(self.frozen_set | elements)
ret.order = list(self.order)
ret.order.extend(elements)
return ret
def weed_candidates(builder, candidates, best_charge):
official_cand = []
while len(official_cand) == 0:
if len(candidates.keys()) == 0:
raise NeutralizingException('Cross Dungeon Builder: Weeded out all candidates %s' % builder.name)
while best_charge not in candidates.keys():
best_charge += 1
candidate_list = candidates.pop(best_charge)
best_lack = None
for cand in candidate_list:
ttl_deads = 0
ttl_branches = 0
for sector in cand:
calc_sector_balance(sector)
ttl_deads += sector.dead_ends()
ttl_branches += sector.branches()
ttl_lack = 0
ttl_balance = 0
for hook in Hook:
bal = 0
for sector in cand:
bal += sector.conn_balance[hook]
lack = builder.conn_balance[hook] + bal
ttl_balance += lack
if lack < 0:
ttl_lack += -lack
forced_loops = calc_forced_loops(builder.sectors + list(cand))
if ttl_balance >= 0 and builder.dead_ends + ttl_deads + forced_loops * 2 <= builder.branches + ttl_branches + builder.allowance:
if best_lack is None or ttl_lack < best_lack:
best_lack = ttl_lack
official_cand = [cand]
elif ttl_lack == best_lack:
official_cand.append(cand)
# choose from among those that use less
best_len = None
cand_len = []
for cand in official_cand:
size = len(cand)
if best_len is None or size < best_len:
best_len = size
cand_len = [cand]
elif size == best_len:
cand_len.append(cand)
return cand_len
def find_branching_candidates(builder, neutral_choices, builder_info):
candidates = []
for choice in neutral_choices:
resolved, problem_list = check_for_valid_layout(builder, choice, builder_info)
if resolved:
candidates.append(choice)
return candidates
def find_connected_candidates(sector_pool):
candidates = []
for sector in sector_pool:
if sector.adj_outflow() >= 2:
candidates.append(sector)
return candidates
def neutralize_the_rest(sector_pool):
neutral_choices = []
main_pool = list(sector_pool)
failed_pool = []
r_size = 1
while len(main_pool) > 0 or len(failed_pool) > 0:
if len(main_pool) <= r_size:
main_pool.extend(failed_pool)
failed_pool.clear()
r_size += 1
candidate = random.choice(main_pool)
main_pool.remove(candidate)
if r_size > len(main_pool):
raise GenerationException("Cross Dungeon Builder: no more neutral pairings possible")
combinations = ncr(len(main_pool), r_size)
itr = 0
done = False
while not done:
ttl_polarity = candidate.polarity()
choice_set = kth_combination(itr, main_pool, r_size)
for choice in choice_set:
ttl_polarity += choice.polarity()
if ttl_polarity.is_neutral():
choice_set.append(candidate)
neutral_choices.append(choice_set)
main_pool = [x for x in main_pool if x not in choice_set]
failed_pool = [x for x in failed_pool if x not in choice_set]
done = True
else:
itr += 1
if itr >= combinations:
failed_pool.append(candidate)
done = True
return neutral_choices
# doesn't force a grouping when all in the found_list comes from the same sector
def find_forced_groupings(sector_pool, dungeon_map):
dungeon_hooks = {}
for name, builder in dungeon_map.items():
dungeon_hooks[name] = categorize_groupings(builder.sectors)
groupings = []
queue = deque(sector_pool)
skips = set()
while len(queue) > 0:
grouping = queue.popleft()
is_list = isinstance(grouping, List)
if not is_list and grouping in skips:
continue
grouping = grouping if is_list else [grouping]
hook_categories = categorize_groupings(grouping)
force_found = False
for val in Hook:
if val in hook_categories.keys():
required_doors, flexible_doors = hook_categories[val]
if len(required_doors) >= 1:
opp = opposite_h_type(val)
found_list = []
if opp in hook_categories.keys() and len(hook_categories[opp][1]) > 0:
found_list.extend(hook_categories[opp][1])
for name, hooks in dungeon_hooks.items():
if opp in hooks.keys() and len(hooks[opp][1]) > 0:
found_list.extend(hooks[opp][1])
other_sectors = [x for x in sector_pool if x not in grouping]
other_sector_cats = categorize_groupings(other_sectors)
if opp in other_sector_cats.keys() and len(other_sector_cats[opp][1]) > 0:
found_list.extend(other_sector_cats[opp][1])
if len(required_doors) == len(found_list):
forced_sectors = []
for sec in other_sectors:
cats = categorize_groupings([sec])
if opp in cats.keys() and len(cats[opp][1]) > 0:
forced_sectors.append(sec)
if len(forced_sectors) > 0:
grouping.extend(forced_sectors)
skips.update(forced_sectors)
merge_groups = []
for group in groupings:
for sector in group:
if sector in forced_sectors:
merge_groups.append(group)
for merge in merge_groups:
grouping = list(set(grouping).union(set(merge)))
groupings.remove(merge)
queue.append(grouping)
force_found = True
elif len(flexible_doors) == 1:
opp = opposite_h_type(val)
found_list = []
if opp in hook_categories.keys() and (len(hook_categories[opp][0]) > 0 or len(hook_categories[opp][1]) > 0):
found_list.extend(hook_categories[opp][0])
found_list.extend([x for x in hook_categories[opp][1] if x not in flexible_doors])
for name, hooks in dungeon_hooks.items():
if opp in hooks.keys() and (len(hooks[opp][0]) > 0 or len(hooks[opp][1]) > 0):
found_list.extend(hooks[opp][0])
found_list.extend(hooks[opp][1])
other_sectors = [x for x in sector_pool if x not in grouping]
other_sector_cats = categorize_groupings(other_sectors)
if opp in other_sector_cats.keys() and (len(other_sector_cats[opp][0]) > 0 or len(other_sector_cats[opp][1]) > 0):
found_list.extend(other_sector_cats[opp][0])
found_list.extend(other_sector_cats[opp][1])
if len(found_list) == 1:
forced_sectors = []
for sec in other_sectors:
cats = categorize_groupings([sec])
if opp in cats.keys() and (len(cats[opp][0]) > 0 or len(cats[opp][1]) > 0):
forced_sectors.append(sec)
if len(forced_sectors) > 0:
grouping.extend(forced_sectors)
skips.update(forced_sectors)
merge_groups = []
for group in groupings:
for sector in group:
if sector in forced_sectors:
merge_groups.append(group)
for merge in merge_groups:
grouping += merge
groupings.remove(merge)
queue.append(grouping)
force_found = True
if force_found:
break
if not force_found:
groupings.append(grouping)
return groupings
def categorize_groupings(sectors):
hook_categories = {}
for sector in sectors:
for door in sector.outstanding_doors:
hook = hook_from_door(door)
if hook not in hook_categories.keys():
hook_categories[hook] = ([], [])
if door.blocked or door.dead:
hook_categories[hook][0].append(door)
else:
hook_categories[hook][1].append(door)
return hook_categories
def valid_assignment(builder, sector_list, builder_info):
if not valid_entrance(builder, sector_list, builder_info):
return False
if not valid_c_switch(builder, sector_list):
return False
if not valid_polarized_assignment(builder, sector_list):
return False
resolved, problems = check_for_valid_layout(builder, sector_list, builder_info)
return resolved
def valid_entrance(builder, sector_list, builder_info):
is_dead_end = False
if len(builder.sectors) == 0:
is_dead_end = True
else:
entrances, splits, c_tuple, world, player = builder_info
if builder.name not in entrances.keys():
name_parts = builder.name.rsplit(' ', 1)
entrance_list = splits[name_parts[0]][name_parts[1]]
entrances = []
for sector in builder.sectors:
if sector.is_entrance_sector():
sector.region_set()
entrances.append(sector)
all_dead = True
for sector in entrances:
for region in entrance_list:
if region in sector.region_set():
portal = next((x for x in world.dungeon_portals[player] if x.door.entrance.parent_region.name == region), None)
if portal and not portal.deadEnd:
all_dead = False
break
if not all_dead:
break
is_dead_end = all_dead
return len(sector_list) == 0 if is_dead_end else True
def valid_c_switch(builder, sector_list):
if builder.c_switch_present:
return True
for sector in sector_list:
if sector.c_switch:
return True
if builder.c_switch_required:
return False
for sector in sector_list:
if sector.blue_barrier:
return False
return True
def valid_connected_assignment(builder, sector_list):
full_list = sector_list + builder.sectors
if len(full_list) == 1 and sum_magnitude(full_list) == [0, 0, 0]:
return True
for sector in full_list:
if sector.is_entrance_sector():
continue
others = [x for x in full_list if x != sector]
other_mag = sum_magnitude(others)
sector_mag = sector.magnitude()
hookable = False
for i in range(len(sector_mag)):
if sector_mag[i] > 0 and other_mag[i] > 0:
hookable = True
if not hookable:
return False
return True
def valid_branch_assignment(builder, sector_list):
if not valid_connected_assignment(builder, sector_list):
return False
return valid_branch_only(builder, sector_list)
def valid_branch_only(builder, sector_list):
forced_loops = calc_forced_loops(builder.sectors + sector_list)
ttl_deads = 0
ttl_branches = 0
for s in sector_list:
ttl_deads += s.dead_ends()
ttl_branches += s.branches()
return builder.dead_ends + ttl_deads + forced_loops * 2 <= builder.branches + ttl_branches + builder.allowance
def valid_polarized_assignment(builder, sector_list):
if not valid_branch_assignment(builder, sector_list):
return False
return (sum_polarity(sector_list) + sum_polarity(builder.sectors)).is_neutral()
def assign_the_rest(dungeon_map, neutral_sectors, global_pole, builder_info):
comb_w_replace = len(dungeon_map) ** len(neutral_sectors)
combinations = None
if comb_w_replace <= 1000:
combinations = list(itertools.product(dungeon_map.keys(), repeat=len(neutral_sectors)))
random.shuffle(combinations)
tries = 0
while len(neutral_sectors) > 0:
if tries > 1000 or (combinations and tries >= len(combinations)):
raise GenerationException('No valid assignment found for "neutral" sectors. Ref: %s' % next(iter(dungeon_map.keys())))
if combinations:
choices = combinations[tries]
else:
choices = random.choices(list(dungeon_map.keys()), k=len(neutral_sectors))
neutral_sector_list = list(neutral_sectors)
chosen_sectors = defaultdict(list)
for i, choice in enumerate(choices):
chosen_sectors[choice].append(neutral_sector_list[i])
all_valid = True
for name, sector_list in chosen_sectors.items():
if not valid_assignment(dungeon_map[name], sector_list, builder_info):
all_valid = False
break
if all_valid:
for name, sector_list in chosen_sectors.items():
builder = dungeon_map[name]
for sector in sector_list:
assign_sector(sector, builder, neutral_sectors, global_pole)
tries += 1
def split_dungeon_builder(builder, split_list, builder_info):
if builder.split_dungeon_map and len(builder.exception_list) == 0:
for name, proposal in builder.valid_proposal.items():
builder.split_dungeon_map[name].valid_proposal = proposal
return builder.split_dungeon_map
attempts, comb_w_replace, merge_attempt, merge_limit = 0, None, 0, len(split_list) - 1
while attempts < 5: try:
candidate_sectors = dict.fromkeys(builder.sectors)
global_pole = GlobalPolarity(candidate_sectors)
dungeon_map, sub_builder, merge_keys = {}, None, []
if merge_attempt > 0:
candidates = []
for name, split_entrances in split_list.items():
if len(split_entrances) > 1:
candidates.append(name)
continue
elif len(split_entrances) <= 0:
continue
ents, splits, c_tuple, world, player = builder_info
r_name = split_entrances[0]
p = next((x for x in world.dungeon_portals[player] if x.door.entrance.parent_region.name == r_name), None)
if p and not p.deadEnd:
candidates.append(name)
merge_keys = random.sample(candidates, merge_attempt+1) if len(candidates) >= merge_attempt+1 else []
for name, split_entrances in split_list.items():
key = builder.name + ' ' + name
if merge_keys and name in merge_keys:
other_keys = [builder.name + ' ' + x for x in merge_keys if x != name]
other_key = next((x for x in other_keys if x in dungeon_map), None)
if other_key:
key = other_key
sub_builder = dungeon_map[other_key]
sub_builder.all_entrances.extend(split_entrances)
if key not in dungeon_map:
dungeon_map[key] = sub_builder = DungeonBuilder(key)
sub_builder.split_flag = True
sub_builder.all_entrances = list(split_entrances)
for r_name in split_entrances:
assign_sector(find_sector(r_name, candidate_sectors), sub_builder, candidate_sectors, global_pole)
comb_w_replace = len(dungeon_map) ** len(candidate_sectors)
return balance_split(candidate_sectors, dungeon_map, global_pole, builder_info)
except (GenerationException, NeutralizingException):
if comb_w_replace and comb_w_replace <= 10000:
attempts += 5 else:
attempts += 1
if attempts >= 5 and merge_attempt < merge_limit:
merge_attempt, attempts = merge_attempt + 1, 0
raise GenerationException('Unable to resolve in 5 attempts')
def balance_split(candidate_sectors, dungeon_map, global_pole, builder_info):
dungeon_entrances, split_dungeon_entrances, connections_tuple, world, player = builder_info
for name, builder in dungeon_map.items():
calc_allowance_and_dead_ends(builder, connections_tuple, world, player)
comb_w_replace = len(dungeon_map) ** len(candidate_sectors)
if comb_w_replace <= 10000:
combinations = list(itertools.product(dungeon_map.keys(), repeat=len(candidate_sectors)))
random.shuffle(combinations)
tries = 0
while tries < len(combinations):
choices = combinations[tries]
main_sector_list = list(candidate_sectors)
chosen_sectors = defaultdict(list)
for i, choice in enumerate(choices):
chosen_sectors[choice].append(main_sector_list[i])
all_valid = True
for name, builder in dungeon_map.items():
if not valid_assignment(builder, chosen_sectors[name], builder_info):
all_valid = False
break
if all_valid:
for name, sector_list in chosen_sectors.items():
builder = dungeon_map[name]
for sector in sector_list:
assign_sector(sector, builder, candidate_sectors, global_pole)
return dungeon_map
tries += 1
raise GenerationException('Split Dungeon Builder: Impossible dungeon. Ref %s' % next(iter(dungeon_map.keys())))
check_for_forced_dead_ends(dungeon_map, candidate_sectors, global_pole)
check_for_forced_assignments(dungeon_map, candidate_sectors, global_pole)
check_for_forced_crystal(dungeon_map, candidate_sectors, global_pole)
crystal_switches, crystal_barriers, neutral_sectors, polarized_sectors = categorize_sectors(candidate_sectors)
leftover = assign_crystal_switch_sectors(dungeon_map, crystal_switches, crystal_barriers,
global_pole, len(crystal_barriers) > 0)
ensure_crystal_switches_reachable(dungeon_map, leftover, polarized_sectors, crystal_barriers, global_pole)
for sector in leftover:
if sector.polarity().is_neutral():
neutral_sectors[sector] = None
else:
polarized_sectors[sector] = None
assign_crystal_barrier_sectors(dungeon_map, crystal_barriers, global_pole)
assign_polarized_sectors(dungeon_map, polarized_sectors, global_pole, builder_info)
assign_the_rest(dungeon_map, neutral_sectors, global_pole, builder_info)
return dungeon_map
def check_for_forced_dead_ends(dungeon_map, candidate_sectors, global_pole):
dead_end_sectors = [x for x in candidate_sectors if x.branching_factor() <= 1]
other_sectors = [x for x in candidate_sectors if x not in dead_end_sectors]
for name, builder in dungeon_map.items():
other_sectors += builder.sectors
other_magnitude = sum_hook_magnitude(other_sectors)
dead_cnt = [0] * len(Hook)
for sector in dead_end_sectors:
hook_mag = sector.hook_magnitude()
for hook in Hook:
if hook_mag[hook.value] != 0:
dead_cnt[hook.value] += 1
for hook in Hook:
opp = opposite_h_type(hook).value
if dead_cnt[hook.value] > other_magnitude[opp]:
raise GenerationException('Impossible to satisfy all these dead ends')
elif dead_cnt[hook.value] == other_magnitude[opp]:
candidates = [x for x in dead_end_sectors if x.hook_magnitude()[hook.value] > 0]
for sector in other_sectors:
if sector.hook_magnitude()[opp] > 0 and sector.is_entrance_sector() and sector.branching_factor() == 2:
builder = None
for b in dungeon_map.values():
if sector in b.sectors:
builder = b
break
valid, candidate_sector = False, None
while not valid:
if len(candidates) == 0:
raise GenerationException('Split Dungeon Builder: Bad dead end %s' % builder.name)
candidate_sector = random.choice(candidates)
candidates.remove(candidate_sector)
valid = global_pole.is_valid_choice(dungeon_map, builder, [candidate_sector]) and check_crystal(candidate_sector, sector)
assign_sector(candidate_sector, builder, candidate_sectors, global_pole)
builder.c_locked = True
def check_crystal(dead_end, entrance):
if dead_end.blue_barrier and not entrance.c_switch and not dead_end.c_switch:
return False
if entrance.blue_barrier and not entrance.c_switch and not dead_end.c_switch:
return False
return True
def check_for_forced_assignments(dungeon_map, candidate_sectors, global_pole):
done = False
while not done:
done = True
magnitude = sum_hook_magnitude(candidate_sectors)
dungeon_hooks = {}
for name, builder in dungeon_map.items():
dungeon_hooks[name] = sum_hook_magnitude(builder.sectors)
for val in Hook:
if magnitude[val.value] == 1:
forced_sector = None
for sec in candidate_sectors:
if sec.hook_magnitude()[val.value] > 0:
forced_sector = sec
break
opp = opposite_h_type(val).value
other_sectors = [x for x in candidate_sectors if x != forced_sector]
if sum_hook_magnitude(other_sectors)[opp] == 0:
found_hooks = []
for name, hooks in dungeon_hooks.items():
if hooks[opp] > 0 and not dungeon_map[name].c_locked:
found_hooks.append(name)
if len(found_hooks) == 1:
done = False
assign_sector(forced_sector, dungeon_map[found_hooks[0]], candidate_sectors, global_pole)
def check_for_forced_crystal(dungeon_map, candidate_sectors, global_pole):
for name, builder in dungeon_map.items():
if check_for_forced_crystal_single(builder, candidate_sectors):
builder.c_switch_required = True
def check_for_forced_crystal_single(builder, candidate_sectors):
builder_doors = defaultdict(dict)
for sector in builder.sectors:
for door in sector.outstanding_doors:
builder_doors[hook_from_door(door)][door] = sector
if len(builder_doors) == 0:
return False
candidate_doors = defaultdict(dict)
for sector in candidate_sectors:
for door in sector.outstanding_doors:
candidate_doors[hook_from_door(door)][door] = sector
for hook in builder_doors.keys():
for door in builder_doors[hook].keys():
opp = opposite_h_type(hook)
if opp in builder_doors.keys():
for d, sector in builder_doors[opp].items():
if d != door and (not sector.blue_barrier or sector.c_switch):
return False
for d, sector in candidate_doors[opp].items():
if not sector.blue_barrier or sector.c_switch:
return False
return True
def categorize_sectors(candidate_sectors):
crystal_switches = {}
crystal_barriers = {}
polarized_sectors = {}
neutral_sectors = {}
for sector in candidate_sectors:
if sector.c_switch:
crystal_switches[sector] = None
elif sector.blue_barrier:
crystal_barriers[sector] = None
elif sector.polarity().is_neutral():
neutral_sectors[sector] = None
else:
polarized_sectors[sector] = None
return crystal_switches, crystal_barriers, neutral_sectors, polarized_sectors
class NeutralizingException(Exception):
pass
class GenerationException(Exception):
pass
class DoorEquation:
def __init__(self, door):
self.door = door
self.cost = None, None
self.benefit = defaultdict(list)
self.required = False
self.access_id = None
self.c_switch = False
self.crystal_blocked = {}
self.entrance_flag = False
def copy(self):
eq = DoorEquation(self.door)
eq.cost = self.cost
for key, doors in self.benefit.items():
eq.benefit[key] = doors.copy()
eq.required = self.required
eq.c_switch = self.c_switch
eq.crystal_blocked = self.crystal_blocked.copy()
return eq
def total_cost(self):
return 0 if self.cost[0] is None else 1
def gross(self, current_access):
key, cost_door = self.cost
if key is None:
crystal_access = current_access.access_door[None]
else:
crystal_access = None
for match_door, crystal in current_access.outstanding_doors.items():
if hook_from_door(match_door) == key:
if crystal_access is None or current_access._better_crystal(crystal_access, crystal):
crystal_access = crystal
ttl = 0
for key, door_list in self.benefit.items():
for door in door_list:
if door in current_access.outstanding_doors.keys() or door in current_access.proposed_connections.keys():
continue
if door in self.crystal_blocked.keys() and not self.c_switch:
if crystal_access == CrystalBarrier.Either or crystal_access == self.crystal_blocked[door]:
ttl += 1
else:
ttl += 1
return ttl
def profit(self, current_access):
return self.gross(current_access) - self.total_cost()
def neutral(self):
key, door = self.cost
if key is not None and len(self.benefit[key]) <= 0:
return False
return True
def neutral_profit(self):
key, door = self.cost
if key is not None:
if len(self.benefit[key]) < 1:
return False
if len(self.benefit[key]) > 1:
return True
return False
else:
return True
def can_cover_cost(self, current_access):
key, door = self.cost
if key is not None and current_access[key] < 1:
return False
return True
class DungeonAccess:
def __init__(self):
self.access = defaultdict(int)
self.door_access = {} self.door_sector_map = {} self.outstanding_doors = {}
self.blocked_doors = {}
self.door_access[None] = CrystalBarrier.Orange
self.proposed_connections = {}
self.reached_doors = set()
def can_cover_equation(self, equation):
key, door = equation.cost
if key is None:
return True
return self.access[key] >= 1
def can_pay(self, key):
if key is None:
return True
return self.access[key] >= 1
def adjust_for_equation(self, equation, sector):
if equation.cost[0] is None:
original_crystal = self.door_access[None]
for key, door_list in equation.benefit.items():
self.access[key] += len(door_list)
for door in door_list:
crystal_state = CrystalBarrier.Either if equation.c_switch else original_crystal
if crystal_state == CrystalBarrier.Either:
self.door_access[None] = CrystalBarrier.Either
self.door_access[door] = crystal_state
self.door_sector_map[door] = sector
self.outstanding_doors[door] = crystal_state
self.reached_doors.add(door)
else:
key, door = equation.cost
self.access[key] -= 1
# find the a matching connection
best_door, best_crystal = None, None
for match_door, crystal in self.outstanding_doors.items():
if hook_from_door(match_door) == key:
if best_door is None or self._better_crystal(best_crystal, crystal):
best_door = match_door
best_crystal = crystal
if best_door is None:
raise Exception('Something went terribly wrong I think')
# for match_door, crystal in self.blocked_doors.items():
# if hook_from_door(match_door) == key:
# if best_door is None or self._better_crystal(best_crystal, crystal):
# best_door = match_door
# best_crystal = crystal
self.door_sector_map[door] = sector
self.door_access[door] = best_crystal
self.reached_doors.add(door)
self.proposed_connections[door] = best_door
self.proposed_connections[best_door] = door
if best_door in self.outstanding_doors.keys():
del self.outstanding_doors[best_door]
elif best_door in self.blocked_doors.keys():
del self.blocked_doors[best_door]
self.reached_doors.add(best_door)
# todo: backpropagate crystal access
if equation.c_switch or best_crystal == CrystalBarrier.Either:
# if not equation.door.blocked:
self.door_access[door] = CrystalBarrier.Either
self.door_access[best_door] = CrystalBarrier.Either
queue = deque([best_door, door])
visited = set()
while len(queue) > 0:
next_door = queue.popleft()
visited.add(next_door)
curr_sector = self.door_sector_map[next_door]
next_eq = None
for eq in curr_sector.equations:
if eq.door == next_door:
next_eq = eq
break
if next_eq.entrance_flag:
crystal_state = self.door_access[next_door]
self.door_access[None] = crystal_state
for eq in curr_sector.equations:
cand_door = eq.door
crystal_state = self.door_access[None]
if cand_door in next_eq.crystal_blocked.keys():
crystal_state = next_eq.crystal_blocked[cand_door]
if cand_door not in visited:
self.door_access[cand_door] = crystal_state
if not cand_door.blocked:
if cand_door in self.outstanding_doors.keys():
self.outstanding_doors[cand_door] = crystal_state
if cand_door in self.proposed_connections.keys():
partner_door = self.proposed_connections[cand_door]
self.door_access[partner_door] = crystal_state
if partner_door in self.outstanding_doors.keys():
self.outstanding_doors[partner_door] = crystal_state
if partner_door not in visited:
queue.append(partner_door)
else:
for key, door_list in next_eq.benefit.items():
for cand_door in door_list:
crystal_state = self.door_access[next_door]
if cand_door in next_eq.crystal_blocked.keys():
crystal_state = next_eq.crystal_blocked[cand_door]
if cand_door in self.blocked_doors.keys():
needed_crystal = self.blocked_doors[cand_door]
if meets_crystal_requirment(crystal_state, needed_crystal):
del self.blocked_doors[cand_door]
if cand_door != door:
self.access[key] += 1
self.outstanding_doors[cand_door] = crystal_state
self.door_access[cand_door] = crystal_state
self.reached_doors.add(cand_door)
if cand_door not in visited:
self.door_access[cand_door] = crystal_state
if not cand_door.blocked:
if cand_door in self.outstanding_doors.keys():
self.outstanding_doors[cand_door] = crystal_state
if cand_door in self.proposed_connections.keys():
partner_door = self.proposed_connections[cand_door]
self.door_access[partner_door] = crystal_state
if partner_door in self.outstanding_doors.keys():
self.outstanding_doors[partner_door] = crystal_state
queue.append(cand_door)
queue.append(partner_door)
for key, door_list in equation.benefit.items():
for door in door_list:
crystal_access = self.door_access[best_door]
can_access = True
if door in equation.crystal_blocked.keys():
if crystal_access == CrystalBarrier.Either or crystal_access == equation.crystal_blocked[door]:
crystal_access = equation.crystal_blocked[door]
else:
self.blocked_doors[door] = equation.crystal_blocked[door]
can_access = False
self.door_sector_map[door] = sector
if can_access and door not in self.reached_doors:
self.access[key] += 1
self.door_access[door] = crystal_access
self.outstanding_doors[door] = crystal_access
self.reached_doors.add(door)
def _better_crystal(self, current_champ, contender):
if current_champ == CrystalBarrier.Either:
return False
elif contender == CrystalBarrier.Either:
return True
elif current_champ == CrystalBarrier.Blue:
return False
elif contender == CrystalBarrier.Blue:
return True
else:
return False
def identify_branching_issues(dungeon_map, builder_info):
unconnected_builders = {}
for name, builder in dungeon_map.items():
resolved, unreached_doors = check_for_valid_layout(builder, [], builder_info)
if not resolved:
unconnected_builders[name] = builder
for hook, door_list in unreached_doors.items():
builder.unfulfilled[hook] += len(door_list)
return unconnected_builders
def check_for_valid_layout(builder, sector_list, builder_info):
dungeon_entrances, split_dungeon_entrances, c_tuple, world, player = builder_info
if builder.name in split_dungeon_entrances.keys():
try:
temp_builder = DungeonBuilder(builder.name)
for s in sector_list + builder.sectors:
assign_sector_helper(s, temp_builder)
split_list = split_dungeon_entrances[builder.name]
builder.split_dungeon_map = split_dungeon_builder(temp_builder, split_list, builder_info)
builder.valid_proposal = {}
possible_regions = set()
for portal in world.dungeon_portals[player]:
if not portal.destination and portal.name in dungeon_portals[builder.name]:
possible_regions.add(portal.door.entrance.parent_region.name)
if builder.name in dungeon_drops.keys():
possible_regions.update(dungeon_drops[builder.name])
for name, split_build in builder.split_dungeon_map.items():
name_bits = name.split(" ")
orig_name = " ".join(name_bits[:-1])
entrance_regions = split_dungeon_entrances[orig_name][name_bits[-1]]
# todo: this is hardcoded information for random entrances
for sector in split_build.sectors:
match_set = set(sector.region_set()).intersection(possible_regions)
if len(match_set) > 0:
for r_name in match_set:
if r_name not in entrance_regions:
entrance_regions.append(r_name)
# entrance_regions = [x for x in entrance_regions if x not in split_check_entrance_invalid]
proposal = generate_dungeon_find_proposal(split_build, entrance_regions, True, world, player)
# record split proposals
builder.valid_proposal[name] = proposal
builder.exception_list = list(sector_list)
return True, {}
except (GenerationException, NeutralizingException):
builder.split_dungeon_map = None
builder.valid_proposal = None
unreached_doors = resolve_equations(builder, sector_list)
return False, unreached_doors
else:
unreached_doors = resolve_equations(builder, sector_list)
return len(unreached_doors) == 0, unreached_doors
def resolve_equations(builder, sector_list):
unreached_doors = defaultdict(list)
equations = {x: y for x, y in copy_door_equations(builder, sector_list).items() if len(y) > 0}
current_access = {}
sector_split = {} # those sectors that belong to a certain sector
if builder.name in split_region_starts.keys():
for name, region_list in split_region_starts[builder.name].items():
current_access[name] = DungeonAccess()
for r_name in region_list:
sector = find_sector(r_name, builder.sectors)
sector_split[sector] = name
else:
current_access[builder.name] = DungeonAccess()
# resolve all that provide more access
free_sector, eq_list, free_eq = find_free_equation(equations)
while free_eq is not None:
if free_sector in sector_split.keys():
access_id = sector_split[free_sector]
access = current_access[access_id]
else:
access_id = next(iter(current_access.keys()))
access = current_access[access_id]
resolve_equation(free_eq, eq_list, free_sector, access_id, access, equations)
free_sector, eq_list, free_eq = find_free_equation(equations)
while len(equations) > 0:
valid_access = next_access(current_access)
eq, eq_list, sector, access, access_id = None, None, None, None, None
if len(valid_access) == 1:
access_id, access = valid_access[0]
eq, eq_list, sector = find_priority_equation(equations, access_id, access)
elif len(valid_access) > 1:
access_id, access = valid_access[0]
eq, eq_list, sector = find_greedy_equation(equations, access_id, access, sector_split)
if eq:
resolve_equation(eq, eq_list, sector, access_id, access, equations)
else:
for sector, eq_list in equations.items():
for eq in eq_list:
unreached_doors[hook_from_door(eq.door)].append(eq.door)
return unreached_doors
valid_access = next_access(current_access)
for access_id, dungeon_access in valid_access:
access = dungeon_access.access
access[Hook.Stairs] = access[Hook.Stairs] % 2
ns_leftover = min(access[Hook.North], access[Hook.South])
access[Hook.North] -= ns_leftover
access[Hook.South] -= ns_leftover
ew_leftover = min(access[Hook.West], access[Hook.East])
access[Hook.East] -= ew_leftover
access[Hook.West] -= ew_leftover
if sum(access.values()) > 0:
for hook, num in access.items():
for i in range(num):
unreached_doors[hook].append('placeholder')
return unreached_doors
def next_access(current_access):
valid_ones = [(x, y) for x, y in current_access.items() if sum(y.access.values()) > 0]
valid_ones.sort(key=lambda x: sum(x[1].access.values()))
return valid_ones
# an equations with no change to access (check)
# the highest benefit equations, that can be paid for (check)
# 0-benefit required transforms
# 0-benefit transforms (how to pick between these?)
# negative benefit transforms (dead end)
def find_priority_equation(equations, access_id, current_access):
flex = calc_flex(equations, current_access)
required = calc_required(equations, current_access)
wanted_candidates = []
best_profit = None
all_candidates = []
local_profit_map = {}
for sector, eq_list in equations.items():
eq_list.sort(key=lambda eq: eq.profit(current_access), reverse=True)
best_local_profit = None
for eq in eq_list:
profit = eq.profit(current_access)
if current_access.can_cover_equation(eq) and (eq.access_id is None or eq.access_id == access_id):
# if eq.neutral_profit() or eq.neutral():
# return eq, eq_list, sector # don't need to compare - just use it now
if best_local_profit is None or profit > best_local_profit:
best_local_profit = profit
all_candidates.append((eq, eq_list, sector))
elif (best_profit is None or profit >= best_profit) and profit > 0:
if best_profit is None or profit > best_profit:
wanted_candidates = [eq]
best_profit = profit
else:
wanted_candidates.append(eq)
local_profit_map[sector] = best_local_profit
filtered_candidates = filter_requirements(all_candidates, equations, required, current_access)
filtered_candidates = [x for x in filtered_candidates if x[0].gross(current_access) > 0]
if len(filtered_candidates) == 0:
filtered_candidates = all_candidates if len(filtered_candidates) == 0:
return None, None, None if len(filtered_candidates) == 1:
return filtered_candidates[0]
neutral_candidates = [x for x in filtered_candidates if (x[0].neutral_profit() or x[0].neutral()) and x[0].profit(current_access) == local_profit_map[x[2]]]
if len(neutral_candidates) == 0:
neutral_candidates = filtered_candidates
if len(neutral_candidates) == 1:
return neutral_candidates[0]
filtered_candidates = filter_requirements(neutral_candidates, equations, required, current_access)
if len(filtered_candidates) == 0:
filtered_candidates = neutral_candidates
if len(filtered_candidates) == 1:
return filtered_candidates[0]
triplet_candidates = []
best_profit = None
for eq, eq_list, sector in filtered_candidates:
profit = eq.profit(current_access)
if best_profit is None or profit >= best_profit:
if best_profit is None or profit > best_profit:
triplet_candidates = [(eq, eq_list, sector)]
best_profit = profit
else:
triplet_candidates.append((eq, eq_list, sector))
filtered_candidates = filter_requirements(triplet_candidates, equations, required, current_access)
if len(filtered_candidates) == 0:
filtered_candidates = triplet_candidates
if len(filtered_candidates) == 1:
return filtered_candidates[0]
required_candidates = [x for x in filtered_candidates if x[0].required]
if len(required_candidates) == 0:
required_candidates = filtered_candidates
if len(required_candidates) == 1:
return required_candidates[0]
c_switch_candidates = [x for x in required_candidates if x[0].c_switch]
if len(c_switch_candidates) == 0:
c_switch_candidates = required_candidates
if len(c_switch_candidates) == 1:
return c_switch_candidates[0]
loop_candidates = find_enabling_switch_connections(current_access)
if len(loop_candidates) >= 1:
return loop_candidates[0] # just pick one
flexible_candidates = [x for x in c_switch_candidates if x[0].can_cover_cost(flex)]
if len(flexible_candidates) == 0:
flexible_candidates = c_switch_candidates
if len(flexible_candidates) == 1:
return flexible_candidates[0]
good_local_candidates = [x for x in flexible_candidates if local_profit_map[x[2]] == x[0].profit(current_access)]
if len(good_local_candidates) == 0:
good_local_candidates = flexible_candidates
if len(good_local_candidates) == 1:
return good_local_candidates[0]
leads_to_profit = [x for x in good_local_candidates if can_enable_wanted(x[0], wanted_candidates)]
if len(leads_to_profit) == 0:
leads_to_profit = good_local_candidates
if len(leads_to_profit) == 1:
return leads_to_profit[0]
cost_point = {x[0]: find_cost_point(x, current_access) for x in leads_to_profit}
best_point = max(cost_point.values())
cost_point_candidates = [x for x in leads_to_profit if cost_point[x[0]] == best_point]
if len(cost_point_candidates) == 0:
cost_point_candidates = leads_to_profit
return cost_point_candidates[0] # just pick one I guess
def find_enabling_switch_connections(current_access):
triad_list = []
# probably should check for loop/branches in builder at some stage
# - but this could indicate that a loop or branch is necessary
for cand_door, crystal in current_access.outstanding_doors.items():
for blocked_door, req_crystal in current_access.blocked_doors.items():
if hook_from_door(cand_door) == hanger_from_door(blocked_door):
if crystal == CrystalBarrier.Either or crystal == req_crystal:
sector, equation = current_access.door_sector_map[blocked_door], None
for eq in sector.equations:
if eq.door == blocked_door:
equation = eq.copy()
break
if equation:
triad_list.append((equation, [equation], sector))
return triad_list
def find_cost_point(eq_triplet, access):
cost_point = 0
key, cost_door = eq_triplet[0].cost
if cost_door is not None:
cost_point += access.access[key] - 1
return cost_point
def find_greedy_equation(equations, access_id, current_access, sector_split):
all_candidates = []
for sector, eq_list in equations.items():
if sector not in sector_split.keys() or sector_split[sector] == access_id:
eq_list.sort(key=lambda eq: eq.profit(current_access), reverse=True)
for eq in eq_list:
if current_access.can_cover_equation(eq) and (eq.access_id is None or eq.access_id == access_id):
all_candidates.append((eq, eq_list, sector))
if len(all_candidates) == 0:
return None, None, None # can't pay for anything
if len(all_candidates) == 1:
return all_candidates[0]
filtered_candidates = [x for x in all_candidates if x[0].profit(current_access) + 2 >= len(x[2].outstanding_doors)]
if len(filtered_candidates) == 0:
filtered_candidates = all_candidates if len(filtered_candidates) == 1:
return filtered_candidates[0]
triplet_candidates = []
worst_profit = None
for eq, eq_list, sector in filtered_candidates:
profit = eq.profit(current_access)
if worst_profit is None or profit <= worst_profit:
if worst_profit is None or profit < worst_profit:
triplet_candidates = [(eq, eq_list, sector)]
worst_profit = profit
else:
triplet_candidates.append((eq, eq_list, sector))
if len(triplet_candidates) == 0:
triplet_candidates = filtered_candidates return triplet_candidates[0]
def calc_required(equations, current_access):
ttl = sum(current_access.access.values())
local_profit_map = {}
for sector, eq_list in equations.items():
best_local_profit = None
for eq in eq_list:
profit = eq.profit(current_access)
if best_local_profit is None or profit > best_local_profit:
best_local_profit = profit
local_profit_map[sector] = best_local_profit
ttl += best_local_profit
if ttl == 0:
new_lists = {}
for sector, eq_list in equations.items():
if len(eq_list) > 1:
rem_list = []
for eq in eq_list:
if eq.profit(current_access) < local_profit_map[sector]:
rem_list.append(eq)
if len(rem_list) > 0:
new_lists[sector] = [x for x in eq_list if x not in rem_list]
for sector, eq_list in new_lists.items():
if len(eq_list) <= 1:
for eq in eq_list:
eq.required = True
equations[sector] = eq_list
required_costs = defaultdict(int)
required_benefits = defaultdict(int)
for sector, eq_list in equations.items():
for eq in eq_list:
if eq.required:
key, door = eq.cost
required_costs[key] += 1
for key, door_list in eq.benefit.items():
required_benefits[key] += len(door_list)
return required_costs, required_benefits
def calc_flex(equations, current_access):
flex_spending = defaultdict(int)
required_costs = defaultdict(int)
for sector, eq_list in equations.items():
for eq in eq_list:
if eq.required:
key, door = eq.cost
required_costs[key] += 1
for key in Hook:
flex_spending[key] = max(0, current_access.access[key]-required_costs[key])
return flex_spending
def filter_requirements(triplet_candidates, equations, required, current_access):
r_costs, r_exits = required
valid_candidates = []
for cand, cand_list, cand_sector in triplet_candidates:
valid = True
if not cand.required and not cand.c_switch:
potential_benefit = defaultdict(int)
benefit_counted = set()
potential_costs = defaultdict(int)
for h_type, benefit in current_access.access.items():
cur_cost = 1 if cand.cost[0] is not None else 0
if benefit - cur_cost > 0:
potential_benefit[h_type] += benefit - cur_cost
for h_type, benefit_list in cand.benefit.items():
potential_benefit[h_type] += len(benefit_list)
for sector, eq_list in equations.items():
if sector == cand_sector:
affected_doors = [d for x in cand.benefit.values() for d in x] + [cand.cost[1]]
adj_list = [x for x in eq_list if x.door not in affected_doors]
else:
adj_list = eq_list
for eq in adj_list:
for h_type, benefit_list in eq.benefit.items():
total_benefit = set(benefit_list) - benefit_counted
potential_benefit[h_type] += len(total_benefit)
benefit_counted.update(benefit_list)
h_type, cost_door = eq.cost
potential_costs[h_type] += 1
for h_type, requirement in r_costs.items():
if requirement > 0 and potential_benefit[h_type] < requirement:
valid = False
break
if valid:
for h_type, requirement in r_exits.items():
if requirement > 0 and potential_costs[h_type] < requirement:
valid = False
break
if valid:
valid_candidates.append((cand, cand_list, cand_sector))
return valid_candidates
def can_enable_wanted(test_eq, wanted_candidates):
for wanted in wanted_candidates:
covered = True
key, cost_door = wanted.cost
if len(test_eq.benefit[key]) < 1:
covered = False
if covered:
return True
return False
def resolve_equation(equation, eq_list, sector, access_id, current_access, equations):
if not current_access.can_pay(equation.cost[0]):
raise GenerationException('Cannot pay for this connection')
current_access.adjust_for_equation(equation, sector)
eq_list.remove(equation)
reached_doors = set(current_access.reached_doors)
reached_doors.update(current_access.blocked_doors.keys())
for r_eq in list(eq_list):
all_benefits_met = r_eq.door in reached_doors
for key in Hook:
fringe_list = [x for x in r_eq.benefit[key] if x not in reached_doors]
r_eq.benefit[key] = fringe_list
if len(fringe_list) > 0:
all_benefits_met = False
if all_benefits_met:
eq_list.remove(r_eq)
if len(eq_list) == 0 and sector in equations.keys():
del equations[sector]
else:
for eq in eq_list:
eq.access_id = access_id
def find_free_equation(equations):
for sector, eq_list in equations.items():
for eq in eq_list:
if eq.total_cost() <= 0:
return sector, eq_list, eq
return None, None, None
def copy_door_equations(builder, sector_list):
equations = {}
for sector in builder.sectors + sector_list:
if sector.equations is None:
sector.equations = calc_sector_equations(sector)
curr_list = equations[sector] = []
for equation in sector.equations:
curr_list.append(equation.copy())
return equations
def calc_sector_equations(sector):
equations = []
is_entrance = sector.is_entrance_sector() and not sector.destination_entrance
if is_entrance:
flagged_equations = []
for door in sector.outstanding_doors:
equation, flag = calc_door_equation(door, sector, True)
if flag:
flagged_equations.append(equation)
equations.append(equation)
for flagged_equation in flagged_equations:
for equation in equations:
for key, door_list in equation.benefit.items():
if flagged_equation.door in door_list and flagged_equation != equation:
door_list.remove(flagged_equation.door)
else:
for door in sector.outstanding_doors:
equation, flag = calc_door_equation(door, sector, False)
equations.append(equation)
return equations
def calc_door_equation(door, sector, look_for_entrance):
if look_for_entrance and not door.blocked:
flag = sector.is_entrance_sector()
if flag:
eq = DoorEquation(door)
eq.benefit[hook_from_door(door)].append(door)
eq.required = True
eq.c_switch = door.crystal == CrystalBarrier.Either
eq.entrance_flag = True
return eq, flag
eq = DoorEquation(door)
eq.required = door.blocked or door.dead
eq.cost = (hanger_from_door(door), door)
eq.entrance_flag = sector.is_entrance_sector()
if not door.stonewall:
start_region = door.entrance.parent_region
visited = {(start_region, CrystalBarrier.Null)}
queue = deque([(start_region, CrystalBarrier.Null)])
found_events = set()
event_doors = set()
while len(queue) > 0:
region, crystal_barrier = queue.popleft()
if region.crystal_switch and crystal_barrier == CrystalBarrier.Null:
eq.c_switch = True
crystal_barrier = CrystalBarrier.Either
for loc in region.locations:
if loc.name in dungeon_events:
found_events.add(loc.name)
for d in event_doors:
if loc.name == d.req_event:
connect = d.entrance.connected_region
if connect is not None and connect.type == RegionType.Dungeon and valid_crystal(d, crystal_barrier):
cb_flag = crystal_barrier if d.crystal == CrystalBarrier.Null else d.crystal
cb_flag = CrystalBarrier.Null if cb_flag == CrystalBarrier.Either else cb_flag
if (connect, cb_flag) not in visited:
visited.add((connect, cb_flag))
queue.append((connect, cb_flag))
for ext in region.exits:
d = ext.door
if d is not None:
if d.controller is not None:
d = d.controller
if d is not door and d in sector.outstanding_doors and not d.blocked:
eq_list = eq.benefit[hook_from_door(d)]
if d not in eq_list:
eq_list.append(d)
crystal_barrier = crystal_barrier if d.crystal == CrystalBarrier.Null else d.crystal
if crystal_barrier != CrystalBarrier.Null:
if d in eq.crystal_blocked.keys() and eq.crystal_blocked[d] != crystal_barrier:
del eq.crystal_blocked[d]
else:
eq.crystal_blocked[d] = crystal_barrier
elif d.crystal == CrystalBarrier.Null:
if d in eq.crystal_blocked.keys() and eq.crystal_blocked[d] != crystal_barrier:
del eq.crystal_blocked[d]
if d.req_event is not None and d.req_event not in found_events:
event_doors.add(d)
else:
connect = ext.connected_region if ext.door.controller is None else d.entrance.parent_region
if connect is not None and connect.type == RegionType.Dungeon and valid_crystal(d, crystal_barrier):
cb_flag = crystal_barrier if d.crystal == CrystalBarrier.Null else d.crystal
cb_flag = CrystalBarrier.Null if cb_flag == CrystalBarrier.Either else cb_flag
if (connect, cb_flag) not in visited:
visited.add((connect, cb_flag))
queue.append((connect, cb_flag))
if len(eq.benefit) == 0:
eq.required = True
return eq, False
def meets_crystal_requirment(current_crystal, requirement):
if current_crystal == CrystalBarrier.Either:
return True
return current_crystal == requirement
def valid_crystal(door, current_crystal):
if door.crystal in [CrystalBarrier.Null, CrystalBarrier.Either]:
return True
if current_crystal in [CrystalBarrier.Either, CrystalBarrier.Null]:
return True
return door.crystal == current_crystal
def kth_combination(k, l, r):
if r == 0:
return []
elif len(l) == r:
return l
else:
i = ncr(len(l) - 1, r - 1)
if k < i:
return l[0:1] + kth_combination(k, l[1:], r - 1)
else:
return kth_combination(k - i, l[1:], r)
def ncr(n, r):
if r == 0:
return 1
r = min(r, n - r)
numerator = reduce(op.mul, range(n, n - r, -1), 1)
denominator = reduce(op.mul, range(1, r + 1), 1)
return int(numerator / denominator)
dungeon_boss_sectors = {
'Hyrule Castle': [],
'Eastern Palace': ['Eastern Boss'],
'Desert Palace': ['Desert Boss'],
'Tower of Hera': ['Hera Boss'],
'Agahnims Tower': ['Tower Agahnim 1'],
'Palace of Darkness': ['PoD Boss'],
'Swamp Palace': ['Swamp Boss'],
'Skull Woods': ['Skull Boss'],
'Thieves Town': ['Thieves Blind\'s Cell', 'Thieves Boss'],
'Ice Palace': ['Ice Boss'],
'Misery Mire': ['Mire Boss'],
'Turtle Rock': ['TR Boss'],
'Ganons Tower': ['GT Agahnim 2']
}
default_dungeon_entrances = {
'Hyrule Castle': ['Hyrule Castle Lobby', 'Hyrule Castle West Lobby', 'Hyrule Castle East Lobby', 'Sewers Rat Path',
'Sanctuary'],
'Eastern Palace': ['Eastern Lobby'],
'Desert Palace': ['Desert Back Lobby', 'Desert Main Lobby', 'Desert West Lobby', 'Desert East Lobby'],
'Tower of Hera': ['Hera Lobby'],
'Agahnims Tower': ['Tower Lobby'],
'Palace of Darkness': ['PoD Lobby'],
'Swamp Palace': ['Swamp Lobby'],
'Skull Woods': ['Skull 1 Lobby', 'Skull Pinball', 'Skull Left Drop', 'Skull Pot Circle', 'Skull 2 East Lobby',
'Skull 2 West Lobby', 'Skull Back Drop', 'Skull 3 Lobby'],
'Thieves Town': ['Thieves Lobby'],
'Ice Palace': ['Ice Lobby'],
'Misery Mire': ['Mire Lobby'],
'Turtle Rock': ['TR Main Lobby', 'TR Eye Bridge', 'TR Big Chest Entrance', 'TR Lazy Eyes'],
'Ganons Tower': ['GT Lobby']
}
drop_entrances = {
'Hyrule Castle': ['Sewers Rat Path'],
'Eastern Palace': [],
'Desert Palace': [],
'Tower of Hera': [],
'Agahnims Tower': [],
'Palace of Darkness': [],
'Swamp Palace': [],
'Skull Woods': ['Skull Pinball', 'Skull Left Drop', 'Skull Pot Circle', 'Skull Back Drop'],
'Thieves Town': [],
'Ice Palace': [],
'Misery Mire': [],
'Turtle Rock': [],
'Ganons Tower': []
}
# todo: calculate these for ER - the multi entrance dungeons anyway
dungeon_dead_end_allowance = {
'Hyrule Castle': 6,
'Eastern Palace': 1,
'Desert Palace': 2,
'Tower of Hera': 1,
'Agahnims Tower': 1,
'Palace of Darkness': 1,
'Swamp Palace': 1,
'Skull Woods': 3, # two allowed in skull 1, 1 in skull 3, 0 in skull 2
'Thieves Town': 1,
'Ice Palace': 1,
'Misery Mire': 1,
'Turtle Rock': 2, # this assumes one overworld connection
'Ganons Tower': 1,
'Desert Palace Back': 1,
'Desert Palace Main': 1,
'Skull Woods 1': 0,
'Skull Woods 2': 0,
'Skull Woods 3': 1,
}
drop_entrances_allowance = [
'Sewers Rat Path', 'Skull Pinball', 'Skull Left Drop', 'Skull Pot Circle', 'Skull Back Drop'
]
dead_entrances = [
'TR Big Chest Entrance'
]
split_check_entrance_invalid = [
'Desert East Lobby', 'Skull 2 West Lobby'
]
dungeon_portals = {
'Hyrule Castle': ['Hyrule Castle South', 'Hyrule Castle West', 'Hyrule Castle East', 'Sanctuary'],
'Eastern Palace': ['Eastern'],
'Desert Palace': ['Desert Back', 'Desert South', 'Desert West', 'Desert East'],
'Tower of Hera': ['Hera'],
'Agahnims Tower': ['Agahnims Tower'],
'Palace of Darkness': ['Palace of Darkness'],
'Swamp Palace': ['Swamp'],
'Skull Woods': ['Skull 1', 'Skull 2 East', 'Skull 2 West', 'Skull 3'],
'Thieves Town': ['Thieves Town'],
'Ice Palace': ['Ice'],
'Misery Mire': ['Mire'],
'Turtle Rock': ['Turtle Rock Main', 'Turtle Rock Lazy Eyes', 'Turtle Rock Chest', 'Turtle Rock Eye Bridge'],
'Ganons Tower': ['Ganons Tower']
}
dungeon_drops = {
'Hyrule Castle': ['Sewers Rat Path'],
'Skull Woods': ['Skull Pot Circle', 'Skull Pinball', 'Skull Left Drop', 'Skull Back Drop'],
}
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.